1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012, 2016, 2025 Chelsio Communications.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30
31 #include <sys/param.h>
32 #include <sys/eventhandler.h>
33
34 #include "common.h"
35 #include "t4_regs.h"
36 #include "t4_regs_values.h"
37 #include "firmware/t4fw_interface.h"
38
39 #undef msleep
40 #define msleep(x) do { \
41 if (cold) \
42 DELAY((x) * 1000); \
43 else \
44 pause("t4hw", (x) * hz / 1000); \
45 } while (0)
46
47 /**
48 * t4_wait_op_done_val - wait until an operation is completed
49 * @adapter: the adapter performing the operation
50 * @reg: the register to check for completion
51 * @mask: a single-bit field within @reg that indicates completion
52 * @polarity: the value of the field when the operation is completed
53 * @attempts: number of check iterations
54 * @delay: delay in usecs between iterations
55 * @valp: where to store the value of the register at completion time
56 *
57 * Wait until an operation is completed by checking a bit in a register
58 * up to @attempts times. If @valp is not NULL the value of the register
59 * at the time it indicated completion is stored there. Returns 0 if the
60 * operation completes and -EAGAIN otherwise.
61 */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)62 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
63 int polarity, int attempts, int delay, u32 *valp)
64 {
65 while (1) {
66 u32 val = t4_read_reg(adapter, reg);
67
68 if (!!(val & mask) == polarity) {
69 if (valp)
70 *valp = val;
71 return 0;
72 }
73 if (--attempts == 0)
74 return -EAGAIN;
75 if (delay)
76 udelay(delay);
77 }
78 }
79
t4_wait_op_done(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay)80 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
81 int polarity, int attempts, int delay)
82 {
83 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
84 delay, NULL);
85 }
86
87 /**
88 * t7_wait_sram_done - wait until an operation is completed
89 * @adapter: the adapter performing the operation
90 * @reg: the register to check for completion
91 * @result_reg: register that holds the result value
92 * @attempts: number of check iterations
93 * @delay: delay in usecs between iterations
94 * @valp: where to store the value of the result register at completion time
95 *
96 * Waits until a specific bit in @reg is cleared, checking up to
97 * @attempts times.Once the bit is cleared, reads from @result_reg
98 * and stores the value in @valp if it is not NULL. Returns 0 if the
99 * operation completes successfully and -EAGAIN if it times out.
100 */
t7_wait_sram_done(struct adapter * adap,int reg,int result_reg,int attempts,int delay,u32 * valp)101 static int t7_wait_sram_done(struct adapter *adap, int reg, int result_reg,
102 int attempts, int delay, u32 *valp)
103 {
104 while (1) {
105 u32 val = t4_read_reg(adap, reg);
106
107 /* Check if SramStart (bit 19) is cleared */
108 if (!(val & (1 << 19))) {
109 if (valp)
110 *valp = t4_read_reg(adap, result_reg);
111 return 0;
112 }
113
114 if (--attempts == 0)
115 return -EAGAIN;
116
117 if (delay)
118 udelay(delay);
119 }
120 }
121
122 /**
123 * t4_set_reg_field - set a register field to a value
124 * @adapter: the adapter to program
125 * @addr: the register address
126 * @mask: specifies the portion of the register to modify
127 * @val: the new value for the register field
128 *
129 * Sets a register field specified by the supplied mask to the
130 * given value.
131 */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)132 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
133 u32 val)
134 {
135 u32 v = t4_read_reg(adapter, addr) & ~mask;
136
137 t4_write_reg(adapter, addr, v | val);
138 (void) t4_read_reg(adapter, addr); /* flush */
139 }
140
141 /**
142 * t4_read_indirect - read indirectly addressed registers
143 * @adap: the adapter
144 * @addr_reg: register holding the indirect address
145 * @data_reg: register holding the value of the indirect register
146 * @vals: where the read register values are stored
147 * @nregs: how many indirect registers to read
148 * @start_idx: index of first indirect register to read
149 *
150 * Reads registers that are accessed indirectly through an address/data
151 * register pair.
152 */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)153 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
154 unsigned int data_reg, u32 *vals,
155 unsigned int nregs, unsigned int start_idx)
156 {
157 while (nregs--) {
158 t4_write_reg(adap, addr_reg, start_idx);
159 *vals++ = t4_read_reg(adap, data_reg);
160 start_idx++;
161 }
162 }
163
164 /**
165 * t4_write_indirect - write indirectly addressed registers
166 * @adap: the adapter
167 * @addr_reg: register holding the indirect addresses
168 * @data_reg: register holding the value for the indirect registers
169 * @vals: values to write
170 * @nregs: how many indirect registers to write
171 * @start_idx: address of first indirect register to write
172 *
173 * Writes a sequential block of registers that are accessed indirectly
174 * through an address/data register pair.
175 */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)176 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
177 unsigned int data_reg, const u32 *vals,
178 unsigned int nregs, unsigned int start_idx)
179 {
180 while (nregs--) {
181 t4_write_reg(adap, addr_reg, start_idx++);
182 t4_write_reg(adap, data_reg, *vals++);
183 }
184 }
185
186 /*
187 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
188 * mechanism. This guarantees that we get the real value even if we're
189 * operating within a Virtual Machine and the Hypervisor is trapping our
190 * Configuration Space accesses.
191 *
192 * N.B. This routine should only be used as a last resort: the firmware uses
193 * the backdoor registers on a regular basis and we can end up
194 * conflicting with it's uses!
195 */
t4_hw_pci_read_cfg4(adapter_t * adap,int reg)196 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
197 {
198 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
199 u32 val;
200
201 if (chip_id(adap) <= CHELSIO_T5)
202 req |= F_ENABLE;
203 else
204 req |= F_T6_ENABLE;
205
206 if (is_t4(adap))
207 req |= F_LOCALCFG;
208
209 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
210 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
211
212 /*
213 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
214 * Configuration Space read. (None of the other fields matter when
215 * F_ENABLE is 0 so a simple register write is easier than a
216 * read-modify-write via t4_set_reg_field().)
217 */
218 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
219
220 return val;
221 }
222
223 /*
224 * t4_report_fw_error - report firmware error
225 * @adap: the adapter
226 *
227 * The adapter firmware can indicate error conditions to the host.
228 * If the firmware has indicated an error, print out the reason for
229 * the firmware error.
230 */
t4_report_fw_error(struct adapter * adap)231 void t4_report_fw_error(struct adapter *adap)
232 {
233 static const char *const reason[] = {
234 "Crash", /* PCIE_FW_EVAL_CRASH */
235 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
236 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
237 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
238 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
239 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
240 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
241 "Reserved", /* reserved */
242 };
243 u32 pcie_fw;
244
245 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
246 if (pcie_fw & F_PCIE_FW_ERR) {
247 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
248 reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw);
249 }
250 }
251
252 /*
253 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
254 */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)255 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
256 u32 mbox_addr)
257 {
258 for ( ; nflit; nflit--, mbox_addr += 8)
259 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
260 }
261
262 /*
263 * Handle a FW assertion reported in a mailbox.
264 */
fw_asrt(struct adapter * adap,struct fw_debug_cmd * asrt)265 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
266 {
267 CH_ALERT(adap,
268 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
269 asrt->u.assert.filename_0_7,
270 be32_to_cpu(asrt->u.assert.line),
271 be32_to_cpu(asrt->u.assert.x),
272 be32_to_cpu(asrt->u.assert.y));
273 }
274
275 struct port_tx_state {
276 uint64_t rx_pause;
277 uint64_t tx_frames;
278 };
279
280 u32
t4_port_reg(struct adapter * adap,u8 port,u32 reg)281 t4_port_reg(struct adapter *adap, u8 port, u32 reg)
282 {
283 if (chip_id(adap) > CHELSIO_T6)
284 return T7_PORT_REG(port, reg);
285 if (chip_id(adap) > CHELSIO_T4)
286 return T5_PORT_REG(port, reg);
287 return PORT_REG(port, reg);
288 }
289
290 static void
read_tx_state_one(struct adapter * sc,int i,struct port_tx_state * tx_state)291 read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
292 {
293 uint32_t rx_pause_reg, tx_frames_reg;
294
295 rx_pause_reg = t4_port_reg(sc, i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
296 tx_frames_reg = t4_port_reg(sc, i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
297
298 tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
299 tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
300 }
301
302 static void
read_tx_state(struct adapter * sc,struct port_tx_state * tx_state)303 read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
304 {
305 int i;
306
307 for (i = 0; i < MAX_NCHAN; i++) {
308 if (sc->chan_map[i] != 0xff)
309 read_tx_state_one(sc, i, &tx_state[i]);
310 }
311 }
312
313 static void
check_tx_state(struct adapter * sc,struct port_tx_state * tx_state)314 check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
315 {
316 uint32_t port_ctl_reg;
317 uint64_t tx_frames, rx_pause;
318 int i;
319
320 for (i = 0; i < MAX_NCHAN; i++) {
321 if (sc->chan_map[i] == 0xff)
322 continue;
323 rx_pause = tx_state[i].rx_pause;
324 tx_frames = tx_state[i].tx_frames;
325 read_tx_state_one(sc, i, &tx_state[i]); /* update */
326
327 port_ctl_reg = t4_port_reg(sc, i, A_MPS_PORT_CTL);
328 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
329 rx_pause != tx_state[i].rx_pause &&
330 tx_frames == tx_state[i].tx_frames) {
331 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
332 mdelay(1);
333 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
334 }
335 }
336 }
337
338 #define X_CIM_PF_NOACCESS 0xeeeeeeee
339 /**
340 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
341 * @adap: the adapter
342 * @mbox: index of the mailbox to use
343 * @cmd: the command to write
344 * @size: command length in bytes
345 * @rpl: where to optionally store the reply
346 * @sleep_ok: if true we may sleep while awaiting command completion
347 * @timeout: time to wait for command to finish before timing out
348 * (negative implies @sleep_ok=false)
349 *
350 * Sends the given command to FW through the selected mailbox and waits
351 * for the FW to execute the command. If @rpl is not %NULL it is used to
352 * store the FW's reply to the command. The command and its optional
353 * reply are of the same length. Some FW commands like RESET and
354 * INITIALIZE can take a considerable amount of time to execute.
355 * @sleep_ok determines whether we may sleep while awaiting the response.
356 * If sleeping is allowed we use progressive backoff otherwise we spin.
357 * Note that passing in a negative @timeout is an alternate mechanism
358 * for specifying @sleep_ok=false. This is useful when a higher level
359 * interface allows for specification of @timeout but not @sleep_ok ...
360 *
361 * The return value is 0 on success or a negative errno on failure. A
362 * failure can happen either because we are not able to execute the
363 * command or FW executes it but signals an error. In the latter case
364 * the return value is the error code indicated by FW (negated).
365 */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)366 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
367 int size, void *rpl, bool sleep_ok, int timeout)
368 {
369 /*
370 * We delay in small increments at first in an effort to maintain
371 * responsiveness for simple, fast executing commands but then back
372 * off to larger delays to a maximum retry delay.
373 */
374 static const int delay[] = {
375 1, 1, 3, 5, 10, 10, 20, 50, 100
376 };
377 u32 v;
378 u64 res;
379 int i, ms, delay_idx, ret, next_tx_check;
380 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
381 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
382 u32 ctl;
383 __be64 cmd_rpl[MBOX_LEN/8];
384 u32 pcie_fw;
385 struct port_tx_state tx_state[MAX_NPORTS];
386
387 if (adap->flags & CHK_MBOX_ACCESS)
388 ASSERT_SYNCHRONIZED_OP(adap);
389
390 if (size <= 0 || (size & 15) || size > MBOX_LEN)
391 return -EINVAL;
392
393 if (adap->flags & IS_VF) {
394 if (chip_id(adap) >= CHELSIO_T6)
395 data_reg = FW_T6VF_MBDATA_BASE_ADDR;
396 else
397 data_reg = FW_T4VF_MBDATA_BASE_ADDR;
398 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
399 }
400
401 /*
402 * If we have a negative timeout, that implies that we can't sleep.
403 */
404 if (timeout < 0) {
405 sleep_ok = false;
406 timeout = -timeout;
407 }
408
409 /*
410 * Attempt to gain access to the mailbox.
411 */
412 pcie_fw = 0;
413 if (!(adap->flags & IS_VF)) {
414 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
415 if (pcie_fw & F_PCIE_FW_ERR)
416 goto failed;
417 }
418 for (i = 0; i < 4; i++) {
419 ctl = t4_read_reg(adap, ctl_reg);
420 v = G_MBOWNER(ctl);
421 if (v != X_MBOWNER_NONE)
422 break;
423 }
424
425 /*
426 * If we were unable to gain access, report the error to our caller.
427 */
428 if (v != X_MBOWNER_PL) {
429 if (!(adap->flags & IS_VF)) {
430 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
431 if (pcie_fw & F_PCIE_FW_ERR)
432 goto failed;
433 }
434 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
435 return ret;
436 }
437
438 /*
439 * If we gain ownership of the mailbox and there's a "valid" message
440 * in it, this is likely an asynchronous error message from the
441 * firmware. So we'll report that and then proceed on with attempting
442 * to issue our own command ... which may well fail if the error
443 * presaged the firmware crashing ...
444 */
445 if (ctl & F_MBMSGVALID) {
446 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
447 }
448
449 /*
450 * Copy in the new mailbox command and send it on its way ...
451 */
452 memset(cmd_rpl, 0, sizeof(cmd_rpl));
453 memcpy(cmd_rpl, cmd, size);
454 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
455 for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++)
456 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
457
458 if (adap->flags & IS_VF) {
459 /*
460 * For the VFs, the Mailbox Data "registers" are
461 * actually backed by T4's "MA" interface rather than
462 * PL Registers (as is the case for the PFs). Because
463 * these are in different coherency domains, the write
464 * to the VF's PL-register-backed Mailbox Control can
465 * race in front of the writes to the MA-backed VF
466 * Mailbox Data "registers". So we need to do a
467 * read-back on at least one byte of the VF Mailbox
468 * Data registers before doing the write to the VF
469 * Mailbox Control register.
470 */
471 t4_read_reg(adap, data_reg);
472 }
473
474 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
475 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */
476 next_tx_check = 1000;
477 delay_idx = 0;
478 ms = delay[0];
479
480 /*
481 * Loop waiting for the reply; bail out if we time out or the firmware
482 * reports an error.
483 */
484 for (i = 0; i < timeout; i += ms) {
485 if (!(adap->flags & IS_VF)) {
486 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
487 if (pcie_fw & F_PCIE_FW_ERR)
488 break;
489 }
490
491 if (i >= next_tx_check) {
492 check_tx_state(adap, &tx_state[0]);
493 next_tx_check = i + 1000;
494 }
495
496 if (sleep_ok) {
497 ms = delay[delay_idx]; /* last element may repeat */
498 if (delay_idx < ARRAY_SIZE(delay) - 1)
499 delay_idx++;
500 msleep(ms);
501 } else {
502 mdelay(ms);
503 }
504
505 v = t4_read_reg(adap, ctl_reg);
506 if (v == X_CIM_PF_NOACCESS)
507 continue;
508 if (G_MBOWNER(v) == X_MBOWNER_PL) {
509 if (!(v & F_MBMSGVALID)) {
510 t4_write_reg(adap, ctl_reg,
511 V_MBOWNER(X_MBOWNER_NONE));
512 continue;
513 }
514
515 /*
516 * Retrieve the command reply and release the mailbox.
517 */
518 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
519 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
520 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
521
522 res = be64_to_cpu(cmd_rpl[0]);
523 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
524 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
525 res = V_FW_CMD_RETVAL(EIO);
526 } else if (rpl)
527 memcpy(rpl, cmd_rpl, size);
528 return -G_FW_CMD_RETVAL((int)res);
529 }
530 }
531
532 /*
533 * We timed out waiting for a reply to our mailbox command. Report
534 * the error and also check to see if the firmware reported any
535 * errors ...
536 */
537 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
538 *(const u8 *)cmd, mbox, pcie_fw);
539 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
540 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
541 failed:
542 adap->flags &= ~FW_OK;
543 ret = pcie_fw & F_PCIE_FW_ERR ? -ENXIO : -ETIMEDOUT;
544 t4_fatal_err(adap, true);
545 return ret;
546 }
547
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)548 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
549 void *rpl, bool sleep_ok)
550 {
551 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
552 sleep_ok, FW_CMD_MAX_TIMEOUT);
553 }
554
t4_edc_err_read(struct adapter * adap,int idx)555 static int t4_edc_err_read(struct adapter *adap, int idx)
556 {
557 u32 edc_ecc_err_addr_reg;
558 u32 edc_bist_status_rdata_reg;
559
560 if (is_t4(adap)) {
561 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
562 return 0;
563 }
564 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
565 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
566 return 0;
567 }
568
569 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
570 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
571
572 CH_WARN(adap,
573 "edc%d err addr 0x%x: 0x%x.\n",
574 idx, edc_ecc_err_addr_reg,
575 t4_read_reg(adap, edc_ecc_err_addr_reg));
576 CH_WARN(adap,
577 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
578 edc_bist_status_rdata_reg,
579 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
580 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
581 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
582 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
583 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
584 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
585 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
586 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
587 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
588
589 return 0;
590 }
591
592 /**
593 * t4_mc_read - read from MC through backdoor accesses
594 * @adap: the adapter
595 * @idx: which MC to access
596 * @addr: address of first byte requested
597 * @data: 64 bytes of data containing the requested address
598 * @ecc: where to store the corresponding 64-bit ECC word
599 *
600 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
601 * that covers the requested address @addr. If @parity is not %NULL it
602 * is assigned the 64-bit ECC word for the read data.
603 */
t4_mc_read(struct adapter * adap,int idx,u32 addr,__be32 * data,u64 * ecc)604 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
605 {
606 int i;
607 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
608 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
609
610 if (is_t4(adap)) {
611 mc_bist_cmd_reg = A_MC_BIST_CMD;
612 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
613 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
614 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
615 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
616 } else if (chip_id(adap) < CHELSIO_T7) {
617 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
618 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
619 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
620 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, idx);
621 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, idx);
622 } else {
623 /* Need to figure out split mode and the rest. */
624 return (-ENOTSUP);
625 }
626
627 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
628 return -EBUSY;
629 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
630 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
631 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
632 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
633 F_START_BIST | V_BIST_CMD_GAP(1));
634 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
635 if (i)
636 return i;
637
638 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
639
640 for (i = 15; i >= 0; i--)
641 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
642 if (ecc)
643 *ecc = t4_read_reg64(adap, MC_DATA(16));
644 #undef MC_DATA
645 return 0;
646 }
647
648 /**
649 * t4_edc_read - read from EDC through backdoor accesses
650 * @adap: the adapter
651 * @idx: which EDC to access
652 * @addr: address of first byte requested
653 * @data: 64 bytes of data containing the requested address
654 * @ecc: where to store the corresponding 64-bit ECC word
655 *
656 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
657 * that covers the requested address @addr. If @parity is not %NULL it
658 * is assigned the 64-bit ECC word for the read data.
659 */
t4_edc_read(struct adapter * adap,int idx,u32 addr,__be32 * data,u64 * ecc)660 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
661 {
662 int i;
663 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
664 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
665
666 if (is_t4(adap)) {
667 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
668 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
669 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
670 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
671 idx);
672 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
673 idx);
674 } else {
675 edc_bist_cmd_reg = EDC_T5_REG(A_EDC_H_BIST_CMD, idx);
676 edc_bist_cmd_addr_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_ADDR, idx);
677 edc_bist_cmd_len_reg = EDC_T5_REG(A_EDC_H_BIST_CMD_LEN, idx);
678 edc_bist_cmd_data_pattern = EDC_T5_REG(A_EDC_H_BIST_DATA_PATTERN,
679 idx);
680 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA,
681 idx);
682 }
683
684 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
685 return -EBUSY;
686 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
687 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
688 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
689 t4_write_reg(adap, edc_bist_cmd_reg,
690 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
691 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
692 if (i)
693 return i;
694
695 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
696
697 for (i = 15; i >= 0; i--)
698 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
699 if (ecc)
700 *ecc = t4_read_reg64(adap, EDC_DATA(16));
701 #undef EDC_DATA
702 return 0;
703 }
704
705 /**
706 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
707 * @adap: the adapter
708 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
709 * @addr: address within indicated memory type
710 * @len: amount of memory to read
711 * @buf: host memory buffer
712 *
713 * Reads an [almost] arbitrary memory region in the firmware: the
714 * firmware memory address, length and host buffer must be aligned on
715 * 32-bit boudaries. The memory is returned as a raw byte sequence from
716 * the firmware's memory. If this memory contains data structures which
717 * contain multi-byte integers, it's the callers responsibility to
718 * perform appropriate byte order conversions.
719 */
t4_mem_read(struct adapter * adap,int mtype,u32 addr,u32 len,__be32 * buf)720 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
721 __be32 *buf)
722 {
723 u32 pos, start, end, offset;
724 int ret;
725
726 /*
727 * Argument sanity checks ...
728 */
729 if ((addr & 0x3) || (len & 0x3))
730 return -EINVAL;
731
732 /*
733 * The underlaying EDC/MC read routines read 64 bytes at a time so we
734 * need to round down the start and round up the end. We'll start
735 * copying out of the first line at (addr - start) a word at a time.
736 */
737 start = rounddown2(addr, 64);
738 end = roundup2(addr + len, 64);
739 offset = (addr - start)/sizeof(__be32);
740
741 for (pos = start; pos < end; pos += 64, offset = 0) {
742 __be32 data[16];
743
744 /*
745 * Read the chip's memory block and bail if there's an error.
746 */
747 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
748 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
749 else
750 ret = t4_edc_read(adap, mtype, pos, data, NULL);
751 if (ret)
752 return ret;
753
754 /*
755 * Copy the data into the caller's memory buffer.
756 */
757 while (offset < 16 && len > 0) {
758 *buf++ = data[offset++];
759 len -= sizeof(__be32);
760 }
761 }
762
763 return 0;
764 }
765
766 /*
767 * Return the specified PCI-E Configuration Space register from our Physical
768 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
769 * since we prefer to let the firmware own all of these registers, but if that
770 * fails we go for it directly ourselves.
771 */
t4_read_pcie_cfg4(struct adapter * adap,int reg,int drv_fw_attach)772 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
773 {
774
775 /*
776 * If fw_attach != 0, construct and send the Firmware LDST Command to
777 * retrieve the specified PCI-E Configuration Space register.
778 */
779 if (drv_fw_attach != 0) {
780 struct fw_ldst_cmd ldst_cmd;
781 int ret;
782
783 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
784 ldst_cmd.op_to_addrspace =
785 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
786 F_FW_CMD_REQUEST |
787 F_FW_CMD_READ |
788 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
789 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
790 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
791 ldst_cmd.u.pcie.ctrl_to_fn =
792 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
793 ldst_cmd.u.pcie.r = reg;
794
795 /*
796 * If the LDST Command succeeds, return the result, otherwise
797 * fall through to reading it directly ourselves ...
798 */
799 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
800 &ldst_cmd);
801 if (ret == 0)
802 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
803
804 CH_WARN(adap, "Firmware failed to return "
805 "Configuration Space register %d, err = %d\n",
806 reg, -ret);
807 }
808
809 /*
810 * Read the desired Configuration Space register via the PCI-E
811 * Backdoor mechanism.
812 */
813 return t4_hw_pci_read_cfg4(adap, reg);
814 }
815
816 /**
817 * t4_get_regs_len - return the size of the chips register set
818 * @adapter: the adapter
819 *
820 * Returns the size of the chip's BAR0 register space.
821 */
t4_get_regs_len(struct adapter * adapter)822 unsigned int t4_get_regs_len(struct adapter *adapter)
823 {
824 unsigned int chip_version = chip_id(adapter);
825
826 switch (chip_version) {
827 case CHELSIO_T4:
828 if (adapter->flags & IS_VF)
829 return FW_T4VF_REGMAP_SIZE;
830 return T4_REGMAP_SIZE;
831
832 case CHELSIO_T5:
833 case CHELSIO_T6:
834 case CHELSIO_T7:
835 if (adapter->flags & IS_VF)
836 return FW_T4VF_REGMAP_SIZE;
837 return T5_REGMAP_SIZE;
838 }
839
840 CH_ERR(adapter,
841 "Unsupported chip version %d\n", chip_version);
842 return 0;
843 }
844
845 /**
846 * t4_get_regs - read chip registers into provided buffer
847 * @adap: the adapter
848 * @buf: register buffer
849 * @buf_size: size (in bytes) of register buffer
850 *
851 * If the provided register buffer isn't large enough for the chip's
852 * full register range, the register dump will be truncated to the
853 * register buffer's size.
854 */
t4_get_regs(struct adapter * adap,u8 * buf,size_t buf_size)855 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
856 {
857 static const unsigned int t4_reg_ranges[] = {
858 0x1008, 0x1108,
859 0x1180, 0x1184,
860 0x1190, 0x1194,
861 0x11a0, 0x11a4,
862 0x11b0, 0x11b4,
863 0x11fc, 0x123c,
864 0x1300, 0x173c,
865 0x1800, 0x18fc,
866 0x3000, 0x30d8,
867 0x30e0, 0x30e4,
868 0x30ec, 0x5910,
869 0x5920, 0x5924,
870 0x5960, 0x5960,
871 0x5968, 0x5968,
872 0x5970, 0x5970,
873 0x5978, 0x5978,
874 0x5980, 0x5980,
875 0x5988, 0x5988,
876 0x5990, 0x5990,
877 0x5998, 0x5998,
878 0x59a0, 0x59d4,
879 0x5a00, 0x5ae0,
880 0x5ae8, 0x5ae8,
881 0x5af0, 0x5af0,
882 0x5af8, 0x5af8,
883 0x6000, 0x6098,
884 0x6100, 0x6150,
885 0x6200, 0x6208,
886 0x6240, 0x6248,
887 0x6280, 0x62b0,
888 0x62c0, 0x6338,
889 0x6370, 0x638c,
890 0x6400, 0x643c,
891 0x6500, 0x6524,
892 0x6a00, 0x6a04,
893 0x6a14, 0x6a38,
894 0x6a60, 0x6a70,
895 0x6a78, 0x6a78,
896 0x6b00, 0x6b0c,
897 0x6b1c, 0x6b84,
898 0x6bf0, 0x6bf8,
899 0x6c00, 0x6c0c,
900 0x6c1c, 0x6c84,
901 0x6cf0, 0x6cf8,
902 0x6d00, 0x6d0c,
903 0x6d1c, 0x6d84,
904 0x6df0, 0x6df8,
905 0x6e00, 0x6e0c,
906 0x6e1c, 0x6e84,
907 0x6ef0, 0x6ef8,
908 0x6f00, 0x6f0c,
909 0x6f1c, 0x6f84,
910 0x6ff0, 0x6ff8,
911 0x7000, 0x700c,
912 0x701c, 0x7084,
913 0x70f0, 0x70f8,
914 0x7100, 0x710c,
915 0x711c, 0x7184,
916 0x71f0, 0x71f8,
917 0x7200, 0x720c,
918 0x721c, 0x7284,
919 0x72f0, 0x72f8,
920 0x7300, 0x730c,
921 0x731c, 0x7384,
922 0x73f0, 0x73f8,
923 0x7400, 0x7450,
924 0x7500, 0x7530,
925 0x7600, 0x760c,
926 0x7614, 0x761c,
927 0x7680, 0x76cc,
928 0x7700, 0x7798,
929 0x77c0, 0x77fc,
930 0x7900, 0x79fc,
931 0x7b00, 0x7b58,
932 0x7b60, 0x7b84,
933 0x7b8c, 0x7c38,
934 0x7d00, 0x7d38,
935 0x7d40, 0x7d80,
936 0x7d8c, 0x7ddc,
937 0x7de4, 0x7e04,
938 0x7e10, 0x7e1c,
939 0x7e24, 0x7e38,
940 0x7e40, 0x7e44,
941 0x7e4c, 0x7e78,
942 0x7e80, 0x7ea4,
943 0x7eac, 0x7edc,
944 0x7ee8, 0x7efc,
945 0x8dc0, 0x8e04,
946 0x8e10, 0x8e1c,
947 0x8e30, 0x8e78,
948 0x8ea0, 0x8eb8,
949 0x8ec0, 0x8f6c,
950 0x8fc0, 0x9008,
951 0x9010, 0x9058,
952 0x9060, 0x9060,
953 0x9068, 0x9074,
954 0x90fc, 0x90fc,
955 0x9400, 0x9408,
956 0x9410, 0x9458,
957 0x9600, 0x9600,
958 0x9608, 0x9638,
959 0x9640, 0x96bc,
960 0x9800, 0x9808,
961 0x9820, 0x983c,
962 0x9850, 0x9864,
963 0x9c00, 0x9c6c,
964 0x9c80, 0x9cec,
965 0x9d00, 0x9d6c,
966 0x9d80, 0x9dec,
967 0x9e00, 0x9e6c,
968 0x9e80, 0x9eec,
969 0x9f00, 0x9f6c,
970 0x9f80, 0x9fec,
971 0xd004, 0xd004,
972 0xd010, 0xd03c,
973 0xdfc0, 0xdfe0,
974 0xe000, 0xea7c,
975 0xf000, 0x11110,
976 0x11118, 0x11190,
977 0x19040, 0x1906c,
978 0x19078, 0x19080,
979 0x1908c, 0x190e4,
980 0x190f0, 0x190f8,
981 0x19100, 0x19110,
982 0x19120, 0x19124,
983 0x19150, 0x19194,
984 0x1919c, 0x191b0,
985 0x191d0, 0x191e8,
986 0x19238, 0x1924c,
987 0x193f8, 0x1943c,
988 0x1944c, 0x19474,
989 0x19490, 0x194e0,
990 0x194f0, 0x194f8,
991 0x19800, 0x19c08,
992 0x19c10, 0x19c90,
993 0x19ca0, 0x19ce4,
994 0x19cf0, 0x19d40,
995 0x19d50, 0x19d94,
996 0x19da0, 0x19de8,
997 0x19df0, 0x19e40,
998 0x19e50, 0x19e90,
999 0x19ea0, 0x19f4c,
1000 0x1a000, 0x1a004,
1001 0x1a010, 0x1a06c,
1002 0x1a0b0, 0x1a0e4,
1003 0x1a0ec, 0x1a0f4,
1004 0x1a100, 0x1a108,
1005 0x1a114, 0x1a120,
1006 0x1a128, 0x1a130,
1007 0x1a138, 0x1a138,
1008 0x1a190, 0x1a1c4,
1009 0x1a1fc, 0x1a1fc,
1010 0x1e040, 0x1e04c,
1011 0x1e284, 0x1e28c,
1012 0x1e2c0, 0x1e2c0,
1013 0x1e2e0, 0x1e2e0,
1014 0x1e300, 0x1e384,
1015 0x1e3c0, 0x1e3c8,
1016 0x1e440, 0x1e44c,
1017 0x1e684, 0x1e68c,
1018 0x1e6c0, 0x1e6c0,
1019 0x1e6e0, 0x1e6e0,
1020 0x1e700, 0x1e784,
1021 0x1e7c0, 0x1e7c8,
1022 0x1e840, 0x1e84c,
1023 0x1ea84, 0x1ea8c,
1024 0x1eac0, 0x1eac0,
1025 0x1eae0, 0x1eae0,
1026 0x1eb00, 0x1eb84,
1027 0x1ebc0, 0x1ebc8,
1028 0x1ec40, 0x1ec4c,
1029 0x1ee84, 0x1ee8c,
1030 0x1eec0, 0x1eec0,
1031 0x1eee0, 0x1eee0,
1032 0x1ef00, 0x1ef84,
1033 0x1efc0, 0x1efc8,
1034 0x1f040, 0x1f04c,
1035 0x1f284, 0x1f28c,
1036 0x1f2c0, 0x1f2c0,
1037 0x1f2e0, 0x1f2e0,
1038 0x1f300, 0x1f384,
1039 0x1f3c0, 0x1f3c8,
1040 0x1f440, 0x1f44c,
1041 0x1f684, 0x1f68c,
1042 0x1f6c0, 0x1f6c0,
1043 0x1f6e0, 0x1f6e0,
1044 0x1f700, 0x1f784,
1045 0x1f7c0, 0x1f7c8,
1046 0x1f840, 0x1f84c,
1047 0x1fa84, 0x1fa8c,
1048 0x1fac0, 0x1fac0,
1049 0x1fae0, 0x1fae0,
1050 0x1fb00, 0x1fb84,
1051 0x1fbc0, 0x1fbc8,
1052 0x1fc40, 0x1fc4c,
1053 0x1fe84, 0x1fe8c,
1054 0x1fec0, 0x1fec0,
1055 0x1fee0, 0x1fee0,
1056 0x1ff00, 0x1ff84,
1057 0x1ffc0, 0x1ffc8,
1058 0x20000, 0x2002c,
1059 0x20100, 0x2013c,
1060 0x20190, 0x201a0,
1061 0x201a8, 0x201b8,
1062 0x201c4, 0x201c8,
1063 0x20200, 0x20318,
1064 0x20400, 0x204b4,
1065 0x204c0, 0x20528,
1066 0x20540, 0x20614,
1067 0x21000, 0x21040,
1068 0x2104c, 0x21060,
1069 0x210c0, 0x210ec,
1070 0x21200, 0x21268,
1071 0x21270, 0x21284,
1072 0x212fc, 0x21388,
1073 0x21400, 0x21404,
1074 0x21500, 0x21500,
1075 0x21510, 0x21518,
1076 0x2152c, 0x21530,
1077 0x2153c, 0x2153c,
1078 0x21550, 0x21554,
1079 0x21600, 0x21600,
1080 0x21608, 0x2161c,
1081 0x21624, 0x21628,
1082 0x21630, 0x21634,
1083 0x2163c, 0x2163c,
1084 0x21700, 0x2171c,
1085 0x21780, 0x2178c,
1086 0x21800, 0x21818,
1087 0x21820, 0x21828,
1088 0x21830, 0x21848,
1089 0x21850, 0x21854,
1090 0x21860, 0x21868,
1091 0x21870, 0x21870,
1092 0x21878, 0x21898,
1093 0x218a0, 0x218a8,
1094 0x218b0, 0x218c8,
1095 0x218d0, 0x218d4,
1096 0x218e0, 0x218e8,
1097 0x218f0, 0x218f0,
1098 0x218f8, 0x21a18,
1099 0x21a20, 0x21a28,
1100 0x21a30, 0x21a48,
1101 0x21a50, 0x21a54,
1102 0x21a60, 0x21a68,
1103 0x21a70, 0x21a70,
1104 0x21a78, 0x21a98,
1105 0x21aa0, 0x21aa8,
1106 0x21ab0, 0x21ac8,
1107 0x21ad0, 0x21ad4,
1108 0x21ae0, 0x21ae8,
1109 0x21af0, 0x21af0,
1110 0x21af8, 0x21c18,
1111 0x21c20, 0x21c20,
1112 0x21c28, 0x21c30,
1113 0x21c38, 0x21c38,
1114 0x21c80, 0x21c98,
1115 0x21ca0, 0x21ca8,
1116 0x21cb0, 0x21cc8,
1117 0x21cd0, 0x21cd4,
1118 0x21ce0, 0x21ce8,
1119 0x21cf0, 0x21cf0,
1120 0x21cf8, 0x21d7c,
1121 0x21e00, 0x21e04,
1122 0x22000, 0x2202c,
1123 0x22100, 0x2213c,
1124 0x22190, 0x221a0,
1125 0x221a8, 0x221b8,
1126 0x221c4, 0x221c8,
1127 0x22200, 0x22318,
1128 0x22400, 0x224b4,
1129 0x224c0, 0x22528,
1130 0x22540, 0x22614,
1131 0x23000, 0x23040,
1132 0x2304c, 0x23060,
1133 0x230c0, 0x230ec,
1134 0x23200, 0x23268,
1135 0x23270, 0x23284,
1136 0x232fc, 0x23388,
1137 0x23400, 0x23404,
1138 0x23500, 0x23500,
1139 0x23510, 0x23518,
1140 0x2352c, 0x23530,
1141 0x2353c, 0x2353c,
1142 0x23550, 0x23554,
1143 0x23600, 0x23600,
1144 0x23608, 0x2361c,
1145 0x23624, 0x23628,
1146 0x23630, 0x23634,
1147 0x2363c, 0x2363c,
1148 0x23700, 0x2371c,
1149 0x23780, 0x2378c,
1150 0x23800, 0x23818,
1151 0x23820, 0x23828,
1152 0x23830, 0x23848,
1153 0x23850, 0x23854,
1154 0x23860, 0x23868,
1155 0x23870, 0x23870,
1156 0x23878, 0x23898,
1157 0x238a0, 0x238a8,
1158 0x238b0, 0x238c8,
1159 0x238d0, 0x238d4,
1160 0x238e0, 0x238e8,
1161 0x238f0, 0x238f0,
1162 0x238f8, 0x23a18,
1163 0x23a20, 0x23a28,
1164 0x23a30, 0x23a48,
1165 0x23a50, 0x23a54,
1166 0x23a60, 0x23a68,
1167 0x23a70, 0x23a70,
1168 0x23a78, 0x23a98,
1169 0x23aa0, 0x23aa8,
1170 0x23ab0, 0x23ac8,
1171 0x23ad0, 0x23ad4,
1172 0x23ae0, 0x23ae8,
1173 0x23af0, 0x23af0,
1174 0x23af8, 0x23c18,
1175 0x23c20, 0x23c20,
1176 0x23c28, 0x23c30,
1177 0x23c38, 0x23c38,
1178 0x23c80, 0x23c98,
1179 0x23ca0, 0x23ca8,
1180 0x23cb0, 0x23cc8,
1181 0x23cd0, 0x23cd4,
1182 0x23ce0, 0x23ce8,
1183 0x23cf0, 0x23cf0,
1184 0x23cf8, 0x23d7c,
1185 0x23e00, 0x23e04,
1186 0x24000, 0x2402c,
1187 0x24100, 0x2413c,
1188 0x24190, 0x241a0,
1189 0x241a8, 0x241b8,
1190 0x241c4, 0x241c8,
1191 0x24200, 0x24318,
1192 0x24400, 0x244b4,
1193 0x244c0, 0x24528,
1194 0x24540, 0x24614,
1195 0x25000, 0x25040,
1196 0x2504c, 0x25060,
1197 0x250c0, 0x250ec,
1198 0x25200, 0x25268,
1199 0x25270, 0x25284,
1200 0x252fc, 0x25388,
1201 0x25400, 0x25404,
1202 0x25500, 0x25500,
1203 0x25510, 0x25518,
1204 0x2552c, 0x25530,
1205 0x2553c, 0x2553c,
1206 0x25550, 0x25554,
1207 0x25600, 0x25600,
1208 0x25608, 0x2561c,
1209 0x25624, 0x25628,
1210 0x25630, 0x25634,
1211 0x2563c, 0x2563c,
1212 0x25700, 0x2571c,
1213 0x25780, 0x2578c,
1214 0x25800, 0x25818,
1215 0x25820, 0x25828,
1216 0x25830, 0x25848,
1217 0x25850, 0x25854,
1218 0x25860, 0x25868,
1219 0x25870, 0x25870,
1220 0x25878, 0x25898,
1221 0x258a0, 0x258a8,
1222 0x258b0, 0x258c8,
1223 0x258d0, 0x258d4,
1224 0x258e0, 0x258e8,
1225 0x258f0, 0x258f0,
1226 0x258f8, 0x25a18,
1227 0x25a20, 0x25a28,
1228 0x25a30, 0x25a48,
1229 0x25a50, 0x25a54,
1230 0x25a60, 0x25a68,
1231 0x25a70, 0x25a70,
1232 0x25a78, 0x25a98,
1233 0x25aa0, 0x25aa8,
1234 0x25ab0, 0x25ac8,
1235 0x25ad0, 0x25ad4,
1236 0x25ae0, 0x25ae8,
1237 0x25af0, 0x25af0,
1238 0x25af8, 0x25c18,
1239 0x25c20, 0x25c20,
1240 0x25c28, 0x25c30,
1241 0x25c38, 0x25c38,
1242 0x25c80, 0x25c98,
1243 0x25ca0, 0x25ca8,
1244 0x25cb0, 0x25cc8,
1245 0x25cd0, 0x25cd4,
1246 0x25ce0, 0x25ce8,
1247 0x25cf0, 0x25cf0,
1248 0x25cf8, 0x25d7c,
1249 0x25e00, 0x25e04,
1250 0x26000, 0x2602c,
1251 0x26100, 0x2613c,
1252 0x26190, 0x261a0,
1253 0x261a8, 0x261b8,
1254 0x261c4, 0x261c8,
1255 0x26200, 0x26318,
1256 0x26400, 0x264b4,
1257 0x264c0, 0x26528,
1258 0x26540, 0x26614,
1259 0x27000, 0x27040,
1260 0x2704c, 0x27060,
1261 0x270c0, 0x270ec,
1262 0x27200, 0x27268,
1263 0x27270, 0x27284,
1264 0x272fc, 0x27388,
1265 0x27400, 0x27404,
1266 0x27500, 0x27500,
1267 0x27510, 0x27518,
1268 0x2752c, 0x27530,
1269 0x2753c, 0x2753c,
1270 0x27550, 0x27554,
1271 0x27600, 0x27600,
1272 0x27608, 0x2761c,
1273 0x27624, 0x27628,
1274 0x27630, 0x27634,
1275 0x2763c, 0x2763c,
1276 0x27700, 0x2771c,
1277 0x27780, 0x2778c,
1278 0x27800, 0x27818,
1279 0x27820, 0x27828,
1280 0x27830, 0x27848,
1281 0x27850, 0x27854,
1282 0x27860, 0x27868,
1283 0x27870, 0x27870,
1284 0x27878, 0x27898,
1285 0x278a0, 0x278a8,
1286 0x278b0, 0x278c8,
1287 0x278d0, 0x278d4,
1288 0x278e0, 0x278e8,
1289 0x278f0, 0x278f0,
1290 0x278f8, 0x27a18,
1291 0x27a20, 0x27a28,
1292 0x27a30, 0x27a48,
1293 0x27a50, 0x27a54,
1294 0x27a60, 0x27a68,
1295 0x27a70, 0x27a70,
1296 0x27a78, 0x27a98,
1297 0x27aa0, 0x27aa8,
1298 0x27ab0, 0x27ac8,
1299 0x27ad0, 0x27ad4,
1300 0x27ae0, 0x27ae8,
1301 0x27af0, 0x27af0,
1302 0x27af8, 0x27c18,
1303 0x27c20, 0x27c20,
1304 0x27c28, 0x27c30,
1305 0x27c38, 0x27c38,
1306 0x27c80, 0x27c98,
1307 0x27ca0, 0x27ca8,
1308 0x27cb0, 0x27cc8,
1309 0x27cd0, 0x27cd4,
1310 0x27ce0, 0x27ce8,
1311 0x27cf0, 0x27cf0,
1312 0x27cf8, 0x27d7c,
1313 0x27e00, 0x27e04,
1314 };
1315
1316 static const unsigned int t4vf_reg_ranges[] = {
1317 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1318 VF_MPS_REG(A_MPS_VF_CTL),
1319 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1320 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1321 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1322 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1323 FW_T4VF_MBDATA_BASE_ADDR,
1324 FW_T4VF_MBDATA_BASE_ADDR +
1325 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1326 };
1327
1328 static const unsigned int t5_reg_ranges[] = {
1329 0x1008, 0x10c0,
1330 0x10cc, 0x10f8,
1331 0x1100, 0x1100,
1332 0x110c, 0x1148,
1333 0x1180, 0x1184,
1334 0x1190, 0x1194,
1335 0x11a0, 0x11a4,
1336 0x11b0, 0x11b4,
1337 0x11fc, 0x123c,
1338 0x1280, 0x173c,
1339 0x1800, 0x18fc,
1340 0x3000, 0x3028,
1341 0x3060, 0x30b0,
1342 0x30b8, 0x30d8,
1343 0x30e0, 0x30fc,
1344 0x3140, 0x357c,
1345 0x35a8, 0x35cc,
1346 0x35ec, 0x35ec,
1347 0x3600, 0x5624,
1348 0x56cc, 0x56ec,
1349 0x56f4, 0x5720,
1350 0x5728, 0x575c,
1351 0x580c, 0x5814,
1352 0x5890, 0x589c,
1353 0x58a4, 0x58ac,
1354 0x58b8, 0x58bc,
1355 0x5940, 0x59c8,
1356 0x59d0, 0x59dc,
1357 0x59fc, 0x5a18,
1358 0x5a60, 0x5a70,
1359 0x5a80, 0x5a9c,
1360 0x5b94, 0x5bfc,
1361 0x6000, 0x6020,
1362 0x6028, 0x6040,
1363 0x6058, 0x609c,
1364 0x60a8, 0x614c,
1365 0x7700, 0x7798,
1366 0x77c0, 0x78fc,
1367 0x7b00, 0x7b58,
1368 0x7b60, 0x7b84,
1369 0x7b8c, 0x7c54,
1370 0x7d00, 0x7d38,
1371 0x7d40, 0x7d80,
1372 0x7d8c, 0x7ddc,
1373 0x7de4, 0x7e04,
1374 0x7e10, 0x7e1c,
1375 0x7e24, 0x7e38,
1376 0x7e40, 0x7e44,
1377 0x7e4c, 0x7e78,
1378 0x7e80, 0x7edc,
1379 0x7ee8, 0x7efc,
1380 0x8dc0, 0x8de0,
1381 0x8df8, 0x8e04,
1382 0x8e10, 0x8e84,
1383 0x8ea0, 0x8f84,
1384 0x8fc0, 0x9058,
1385 0x9060, 0x9060,
1386 0x9068, 0x90f8,
1387 0x9400, 0x9408,
1388 0x9410, 0x9470,
1389 0x9600, 0x9600,
1390 0x9608, 0x9638,
1391 0x9640, 0x96f4,
1392 0x9800, 0x9808,
1393 0x9810, 0x9864,
1394 0x9c00, 0x9c6c,
1395 0x9c80, 0x9cec,
1396 0x9d00, 0x9d6c,
1397 0x9d80, 0x9dec,
1398 0x9e00, 0x9e6c,
1399 0x9e80, 0x9eec,
1400 0x9f00, 0x9f6c,
1401 0x9f80, 0xa020,
1402 0xd000, 0xd004,
1403 0xd010, 0xd03c,
1404 0xdfc0, 0xdfe0,
1405 0xe000, 0x1106c,
1406 0x11074, 0x11088,
1407 0x1109c, 0x11110,
1408 0x11118, 0x1117c,
1409 0x11190, 0x11204,
1410 0x19040, 0x1906c,
1411 0x19078, 0x19080,
1412 0x1908c, 0x190e8,
1413 0x190f0, 0x190f8,
1414 0x19100, 0x19110,
1415 0x19120, 0x19124,
1416 0x19150, 0x19194,
1417 0x1919c, 0x191b0,
1418 0x191d0, 0x191e8,
1419 0x19238, 0x19290,
1420 0x193f8, 0x19428,
1421 0x19430, 0x19444,
1422 0x1944c, 0x1946c,
1423 0x19474, 0x19474,
1424 0x19490, 0x194cc,
1425 0x194f0, 0x194f8,
1426 0x19c00, 0x19c08,
1427 0x19c10, 0x19c60,
1428 0x19c94, 0x19ce4,
1429 0x19cf0, 0x19d40,
1430 0x19d50, 0x19d94,
1431 0x19da0, 0x19de8,
1432 0x19df0, 0x19e10,
1433 0x19e50, 0x19e90,
1434 0x19ea0, 0x19f24,
1435 0x19f34, 0x19f34,
1436 0x19f40, 0x19f50,
1437 0x19f90, 0x19fb4,
1438 0x19fc4, 0x19fe4,
1439 0x1a000, 0x1a004,
1440 0x1a010, 0x1a06c,
1441 0x1a0b0, 0x1a0e4,
1442 0x1a0ec, 0x1a0f8,
1443 0x1a100, 0x1a108,
1444 0x1a114, 0x1a130,
1445 0x1a138, 0x1a1c4,
1446 0x1a1fc, 0x1a1fc,
1447 0x1e008, 0x1e00c,
1448 0x1e040, 0x1e044,
1449 0x1e04c, 0x1e04c,
1450 0x1e284, 0x1e290,
1451 0x1e2c0, 0x1e2c0,
1452 0x1e2e0, 0x1e2e0,
1453 0x1e300, 0x1e384,
1454 0x1e3c0, 0x1e3c8,
1455 0x1e408, 0x1e40c,
1456 0x1e440, 0x1e444,
1457 0x1e44c, 0x1e44c,
1458 0x1e684, 0x1e690,
1459 0x1e6c0, 0x1e6c0,
1460 0x1e6e0, 0x1e6e0,
1461 0x1e700, 0x1e784,
1462 0x1e7c0, 0x1e7c8,
1463 0x1e808, 0x1e80c,
1464 0x1e840, 0x1e844,
1465 0x1e84c, 0x1e84c,
1466 0x1ea84, 0x1ea90,
1467 0x1eac0, 0x1eac0,
1468 0x1eae0, 0x1eae0,
1469 0x1eb00, 0x1eb84,
1470 0x1ebc0, 0x1ebc8,
1471 0x1ec08, 0x1ec0c,
1472 0x1ec40, 0x1ec44,
1473 0x1ec4c, 0x1ec4c,
1474 0x1ee84, 0x1ee90,
1475 0x1eec0, 0x1eec0,
1476 0x1eee0, 0x1eee0,
1477 0x1ef00, 0x1ef84,
1478 0x1efc0, 0x1efc8,
1479 0x1f008, 0x1f00c,
1480 0x1f040, 0x1f044,
1481 0x1f04c, 0x1f04c,
1482 0x1f284, 0x1f290,
1483 0x1f2c0, 0x1f2c0,
1484 0x1f2e0, 0x1f2e0,
1485 0x1f300, 0x1f384,
1486 0x1f3c0, 0x1f3c8,
1487 0x1f408, 0x1f40c,
1488 0x1f440, 0x1f444,
1489 0x1f44c, 0x1f44c,
1490 0x1f684, 0x1f690,
1491 0x1f6c0, 0x1f6c0,
1492 0x1f6e0, 0x1f6e0,
1493 0x1f700, 0x1f784,
1494 0x1f7c0, 0x1f7c8,
1495 0x1f808, 0x1f80c,
1496 0x1f840, 0x1f844,
1497 0x1f84c, 0x1f84c,
1498 0x1fa84, 0x1fa90,
1499 0x1fac0, 0x1fac0,
1500 0x1fae0, 0x1fae0,
1501 0x1fb00, 0x1fb84,
1502 0x1fbc0, 0x1fbc8,
1503 0x1fc08, 0x1fc0c,
1504 0x1fc40, 0x1fc44,
1505 0x1fc4c, 0x1fc4c,
1506 0x1fe84, 0x1fe90,
1507 0x1fec0, 0x1fec0,
1508 0x1fee0, 0x1fee0,
1509 0x1ff00, 0x1ff84,
1510 0x1ffc0, 0x1ffc8,
1511 0x30000, 0x30030,
1512 0x30100, 0x30144,
1513 0x30190, 0x301a0,
1514 0x301a8, 0x301b8,
1515 0x301c4, 0x301c8,
1516 0x301d0, 0x301d0,
1517 0x30200, 0x30318,
1518 0x30400, 0x304b4,
1519 0x304c0, 0x3052c,
1520 0x30540, 0x3061c,
1521 0x30800, 0x30828,
1522 0x30834, 0x30834,
1523 0x308c0, 0x30908,
1524 0x30910, 0x309ac,
1525 0x30a00, 0x30a14,
1526 0x30a1c, 0x30a2c,
1527 0x30a44, 0x30a50,
1528 0x30a74, 0x30a74,
1529 0x30a7c, 0x30afc,
1530 0x30b08, 0x30c24,
1531 0x30d00, 0x30d00,
1532 0x30d08, 0x30d14,
1533 0x30d1c, 0x30d20,
1534 0x30d3c, 0x30d3c,
1535 0x30d48, 0x30d50,
1536 0x31200, 0x3120c,
1537 0x31220, 0x31220,
1538 0x31240, 0x31240,
1539 0x31600, 0x3160c,
1540 0x31a00, 0x31a1c,
1541 0x31e00, 0x31e20,
1542 0x31e38, 0x31e3c,
1543 0x31e80, 0x31e80,
1544 0x31e88, 0x31ea8,
1545 0x31eb0, 0x31eb4,
1546 0x31ec8, 0x31ed4,
1547 0x31fb8, 0x32004,
1548 0x32200, 0x32200,
1549 0x32208, 0x32240,
1550 0x32248, 0x32280,
1551 0x32288, 0x322c0,
1552 0x322c8, 0x322fc,
1553 0x32600, 0x32630,
1554 0x32a00, 0x32abc,
1555 0x32b00, 0x32b10,
1556 0x32b20, 0x32b30,
1557 0x32b40, 0x32b50,
1558 0x32b60, 0x32b70,
1559 0x33000, 0x33028,
1560 0x33030, 0x33048,
1561 0x33060, 0x33068,
1562 0x33070, 0x3309c,
1563 0x330f0, 0x33128,
1564 0x33130, 0x33148,
1565 0x33160, 0x33168,
1566 0x33170, 0x3319c,
1567 0x331f0, 0x33238,
1568 0x33240, 0x33240,
1569 0x33248, 0x33250,
1570 0x3325c, 0x33264,
1571 0x33270, 0x332b8,
1572 0x332c0, 0x332e4,
1573 0x332f8, 0x33338,
1574 0x33340, 0x33340,
1575 0x33348, 0x33350,
1576 0x3335c, 0x33364,
1577 0x33370, 0x333b8,
1578 0x333c0, 0x333e4,
1579 0x333f8, 0x33428,
1580 0x33430, 0x33448,
1581 0x33460, 0x33468,
1582 0x33470, 0x3349c,
1583 0x334f0, 0x33528,
1584 0x33530, 0x33548,
1585 0x33560, 0x33568,
1586 0x33570, 0x3359c,
1587 0x335f0, 0x33638,
1588 0x33640, 0x33640,
1589 0x33648, 0x33650,
1590 0x3365c, 0x33664,
1591 0x33670, 0x336b8,
1592 0x336c0, 0x336e4,
1593 0x336f8, 0x33738,
1594 0x33740, 0x33740,
1595 0x33748, 0x33750,
1596 0x3375c, 0x33764,
1597 0x33770, 0x337b8,
1598 0x337c0, 0x337e4,
1599 0x337f8, 0x337fc,
1600 0x33814, 0x33814,
1601 0x3382c, 0x3382c,
1602 0x33880, 0x3388c,
1603 0x338e8, 0x338ec,
1604 0x33900, 0x33928,
1605 0x33930, 0x33948,
1606 0x33960, 0x33968,
1607 0x33970, 0x3399c,
1608 0x339f0, 0x33a38,
1609 0x33a40, 0x33a40,
1610 0x33a48, 0x33a50,
1611 0x33a5c, 0x33a64,
1612 0x33a70, 0x33ab8,
1613 0x33ac0, 0x33ae4,
1614 0x33af8, 0x33b10,
1615 0x33b28, 0x33b28,
1616 0x33b3c, 0x33b50,
1617 0x33bf0, 0x33c10,
1618 0x33c28, 0x33c28,
1619 0x33c3c, 0x33c50,
1620 0x33cf0, 0x33cfc,
1621 0x34000, 0x34030,
1622 0x34100, 0x34144,
1623 0x34190, 0x341a0,
1624 0x341a8, 0x341b8,
1625 0x341c4, 0x341c8,
1626 0x341d0, 0x341d0,
1627 0x34200, 0x34318,
1628 0x34400, 0x344b4,
1629 0x344c0, 0x3452c,
1630 0x34540, 0x3461c,
1631 0x34800, 0x34828,
1632 0x34834, 0x34834,
1633 0x348c0, 0x34908,
1634 0x34910, 0x349ac,
1635 0x34a00, 0x34a14,
1636 0x34a1c, 0x34a2c,
1637 0x34a44, 0x34a50,
1638 0x34a74, 0x34a74,
1639 0x34a7c, 0x34afc,
1640 0x34b08, 0x34c24,
1641 0x34d00, 0x34d00,
1642 0x34d08, 0x34d14,
1643 0x34d1c, 0x34d20,
1644 0x34d3c, 0x34d3c,
1645 0x34d48, 0x34d50,
1646 0x35200, 0x3520c,
1647 0x35220, 0x35220,
1648 0x35240, 0x35240,
1649 0x35600, 0x3560c,
1650 0x35a00, 0x35a1c,
1651 0x35e00, 0x35e20,
1652 0x35e38, 0x35e3c,
1653 0x35e80, 0x35e80,
1654 0x35e88, 0x35ea8,
1655 0x35eb0, 0x35eb4,
1656 0x35ec8, 0x35ed4,
1657 0x35fb8, 0x36004,
1658 0x36200, 0x36200,
1659 0x36208, 0x36240,
1660 0x36248, 0x36280,
1661 0x36288, 0x362c0,
1662 0x362c8, 0x362fc,
1663 0x36600, 0x36630,
1664 0x36a00, 0x36abc,
1665 0x36b00, 0x36b10,
1666 0x36b20, 0x36b30,
1667 0x36b40, 0x36b50,
1668 0x36b60, 0x36b70,
1669 0x37000, 0x37028,
1670 0x37030, 0x37048,
1671 0x37060, 0x37068,
1672 0x37070, 0x3709c,
1673 0x370f0, 0x37128,
1674 0x37130, 0x37148,
1675 0x37160, 0x37168,
1676 0x37170, 0x3719c,
1677 0x371f0, 0x37238,
1678 0x37240, 0x37240,
1679 0x37248, 0x37250,
1680 0x3725c, 0x37264,
1681 0x37270, 0x372b8,
1682 0x372c0, 0x372e4,
1683 0x372f8, 0x37338,
1684 0x37340, 0x37340,
1685 0x37348, 0x37350,
1686 0x3735c, 0x37364,
1687 0x37370, 0x373b8,
1688 0x373c0, 0x373e4,
1689 0x373f8, 0x37428,
1690 0x37430, 0x37448,
1691 0x37460, 0x37468,
1692 0x37470, 0x3749c,
1693 0x374f0, 0x37528,
1694 0x37530, 0x37548,
1695 0x37560, 0x37568,
1696 0x37570, 0x3759c,
1697 0x375f0, 0x37638,
1698 0x37640, 0x37640,
1699 0x37648, 0x37650,
1700 0x3765c, 0x37664,
1701 0x37670, 0x376b8,
1702 0x376c0, 0x376e4,
1703 0x376f8, 0x37738,
1704 0x37740, 0x37740,
1705 0x37748, 0x37750,
1706 0x3775c, 0x37764,
1707 0x37770, 0x377b8,
1708 0x377c0, 0x377e4,
1709 0x377f8, 0x377fc,
1710 0x37814, 0x37814,
1711 0x3782c, 0x3782c,
1712 0x37880, 0x3788c,
1713 0x378e8, 0x378ec,
1714 0x37900, 0x37928,
1715 0x37930, 0x37948,
1716 0x37960, 0x37968,
1717 0x37970, 0x3799c,
1718 0x379f0, 0x37a38,
1719 0x37a40, 0x37a40,
1720 0x37a48, 0x37a50,
1721 0x37a5c, 0x37a64,
1722 0x37a70, 0x37ab8,
1723 0x37ac0, 0x37ae4,
1724 0x37af8, 0x37b10,
1725 0x37b28, 0x37b28,
1726 0x37b3c, 0x37b50,
1727 0x37bf0, 0x37c10,
1728 0x37c28, 0x37c28,
1729 0x37c3c, 0x37c50,
1730 0x37cf0, 0x37cfc,
1731 0x38000, 0x38030,
1732 0x38100, 0x38144,
1733 0x38190, 0x381a0,
1734 0x381a8, 0x381b8,
1735 0x381c4, 0x381c8,
1736 0x381d0, 0x381d0,
1737 0x38200, 0x38318,
1738 0x38400, 0x384b4,
1739 0x384c0, 0x3852c,
1740 0x38540, 0x3861c,
1741 0x38800, 0x38828,
1742 0x38834, 0x38834,
1743 0x388c0, 0x38908,
1744 0x38910, 0x389ac,
1745 0x38a00, 0x38a14,
1746 0x38a1c, 0x38a2c,
1747 0x38a44, 0x38a50,
1748 0x38a74, 0x38a74,
1749 0x38a7c, 0x38afc,
1750 0x38b08, 0x38c24,
1751 0x38d00, 0x38d00,
1752 0x38d08, 0x38d14,
1753 0x38d1c, 0x38d20,
1754 0x38d3c, 0x38d3c,
1755 0x38d48, 0x38d50,
1756 0x39200, 0x3920c,
1757 0x39220, 0x39220,
1758 0x39240, 0x39240,
1759 0x39600, 0x3960c,
1760 0x39a00, 0x39a1c,
1761 0x39e00, 0x39e20,
1762 0x39e38, 0x39e3c,
1763 0x39e80, 0x39e80,
1764 0x39e88, 0x39ea8,
1765 0x39eb0, 0x39eb4,
1766 0x39ec8, 0x39ed4,
1767 0x39fb8, 0x3a004,
1768 0x3a200, 0x3a200,
1769 0x3a208, 0x3a240,
1770 0x3a248, 0x3a280,
1771 0x3a288, 0x3a2c0,
1772 0x3a2c8, 0x3a2fc,
1773 0x3a600, 0x3a630,
1774 0x3aa00, 0x3aabc,
1775 0x3ab00, 0x3ab10,
1776 0x3ab20, 0x3ab30,
1777 0x3ab40, 0x3ab50,
1778 0x3ab60, 0x3ab70,
1779 0x3b000, 0x3b028,
1780 0x3b030, 0x3b048,
1781 0x3b060, 0x3b068,
1782 0x3b070, 0x3b09c,
1783 0x3b0f0, 0x3b128,
1784 0x3b130, 0x3b148,
1785 0x3b160, 0x3b168,
1786 0x3b170, 0x3b19c,
1787 0x3b1f0, 0x3b238,
1788 0x3b240, 0x3b240,
1789 0x3b248, 0x3b250,
1790 0x3b25c, 0x3b264,
1791 0x3b270, 0x3b2b8,
1792 0x3b2c0, 0x3b2e4,
1793 0x3b2f8, 0x3b338,
1794 0x3b340, 0x3b340,
1795 0x3b348, 0x3b350,
1796 0x3b35c, 0x3b364,
1797 0x3b370, 0x3b3b8,
1798 0x3b3c0, 0x3b3e4,
1799 0x3b3f8, 0x3b428,
1800 0x3b430, 0x3b448,
1801 0x3b460, 0x3b468,
1802 0x3b470, 0x3b49c,
1803 0x3b4f0, 0x3b528,
1804 0x3b530, 0x3b548,
1805 0x3b560, 0x3b568,
1806 0x3b570, 0x3b59c,
1807 0x3b5f0, 0x3b638,
1808 0x3b640, 0x3b640,
1809 0x3b648, 0x3b650,
1810 0x3b65c, 0x3b664,
1811 0x3b670, 0x3b6b8,
1812 0x3b6c0, 0x3b6e4,
1813 0x3b6f8, 0x3b738,
1814 0x3b740, 0x3b740,
1815 0x3b748, 0x3b750,
1816 0x3b75c, 0x3b764,
1817 0x3b770, 0x3b7b8,
1818 0x3b7c0, 0x3b7e4,
1819 0x3b7f8, 0x3b7fc,
1820 0x3b814, 0x3b814,
1821 0x3b82c, 0x3b82c,
1822 0x3b880, 0x3b88c,
1823 0x3b8e8, 0x3b8ec,
1824 0x3b900, 0x3b928,
1825 0x3b930, 0x3b948,
1826 0x3b960, 0x3b968,
1827 0x3b970, 0x3b99c,
1828 0x3b9f0, 0x3ba38,
1829 0x3ba40, 0x3ba40,
1830 0x3ba48, 0x3ba50,
1831 0x3ba5c, 0x3ba64,
1832 0x3ba70, 0x3bab8,
1833 0x3bac0, 0x3bae4,
1834 0x3baf8, 0x3bb10,
1835 0x3bb28, 0x3bb28,
1836 0x3bb3c, 0x3bb50,
1837 0x3bbf0, 0x3bc10,
1838 0x3bc28, 0x3bc28,
1839 0x3bc3c, 0x3bc50,
1840 0x3bcf0, 0x3bcfc,
1841 0x3c000, 0x3c030,
1842 0x3c100, 0x3c144,
1843 0x3c190, 0x3c1a0,
1844 0x3c1a8, 0x3c1b8,
1845 0x3c1c4, 0x3c1c8,
1846 0x3c1d0, 0x3c1d0,
1847 0x3c200, 0x3c318,
1848 0x3c400, 0x3c4b4,
1849 0x3c4c0, 0x3c52c,
1850 0x3c540, 0x3c61c,
1851 0x3c800, 0x3c828,
1852 0x3c834, 0x3c834,
1853 0x3c8c0, 0x3c908,
1854 0x3c910, 0x3c9ac,
1855 0x3ca00, 0x3ca14,
1856 0x3ca1c, 0x3ca2c,
1857 0x3ca44, 0x3ca50,
1858 0x3ca74, 0x3ca74,
1859 0x3ca7c, 0x3cafc,
1860 0x3cb08, 0x3cc24,
1861 0x3cd00, 0x3cd00,
1862 0x3cd08, 0x3cd14,
1863 0x3cd1c, 0x3cd20,
1864 0x3cd3c, 0x3cd3c,
1865 0x3cd48, 0x3cd50,
1866 0x3d200, 0x3d20c,
1867 0x3d220, 0x3d220,
1868 0x3d240, 0x3d240,
1869 0x3d600, 0x3d60c,
1870 0x3da00, 0x3da1c,
1871 0x3de00, 0x3de20,
1872 0x3de38, 0x3de3c,
1873 0x3de80, 0x3de80,
1874 0x3de88, 0x3dea8,
1875 0x3deb0, 0x3deb4,
1876 0x3dec8, 0x3ded4,
1877 0x3dfb8, 0x3e004,
1878 0x3e200, 0x3e200,
1879 0x3e208, 0x3e240,
1880 0x3e248, 0x3e280,
1881 0x3e288, 0x3e2c0,
1882 0x3e2c8, 0x3e2fc,
1883 0x3e600, 0x3e630,
1884 0x3ea00, 0x3eabc,
1885 0x3eb00, 0x3eb10,
1886 0x3eb20, 0x3eb30,
1887 0x3eb40, 0x3eb50,
1888 0x3eb60, 0x3eb70,
1889 0x3f000, 0x3f028,
1890 0x3f030, 0x3f048,
1891 0x3f060, 0x3f068,
1892 0x3f070, 0x3f09c,
1893 0x3f0f0, 0x3f128,
1894 0x3f130, 0x3f148,
1895 0x3f160, 0x3f168,
1896 0x3f170, 0x3f19c,
1897 0x3f1f0, 0x3f238,
1898 0x3f240, 0x3f240,
1899 0x3f248, 0x3f250,
1900 0x3f25c, 0x3f264,
1901 0x3f270, 0x3f2b8,
1902 0x3f2c0, 0x3f2e4,
1903 0x3f2f8, 0x3f338,
1904 0x3f340, 0x3f340,
1905 0x3f348, 0x3f350,
1906 0x3f35c, 0x3f364,
1907 0x3f370, 0x3f3b8,
1908 0x3f3c0, 0x3f3e4,
1909 0x3f3f8, 0x3f428,
1910 0x3f430, 0x3f448,
1911 0x3f460, 0x3f468,
1912 0x3f470, 0x3f49c,
1913 0x3f4f0, 0x3f528,
1914 0x3f530, 0x3f548,
1915 0x3f560, 0x3f568,
1916 0x3f570, 0x3f59c,
1917 0x3f5f0, 0x3f638,
1918 0x3f640, 0x3f640,
1919 0x3f648, 0x3f650,
1920 0x3f65c, 0x3f664,
1921 0x3f670, 0x3f6b8,
1922 0x3f6c0, 0x3f6e4,
1923 0x3f6f8, 0x3f738,
1924 0x3f740, 0x3f740,
1925 0x3f748, 0x3f750,
1926 0x3f75c, 0x3f764,
1927 0x3f770, 0x3f7b8,
1928 0x3f7c0, 0x3f7e4,
1929 0x3f7f8, 0x3f7fc,
1930 0x3f814, 0x3f814,
1931 0x3f82c, 0x3f82c,
1932 0x3f880, 0x3f88c,
1933 0x3f8e8, 0x3f8ec,
1934 0x3f900, 0x3f928,
1935 0x3f930, 0x3f948,
1936 0x3f960, 0x3f968,
1937 0x3f970, 0x3f99c,
1938 0x3f9f0, 0x3fa38,
1939 0x3fa40, 0x3fa40,
1940 0x3fa48, 0x3fa50,
1941 0x3fa5c, 0x3fa64,
1942 0x3fa70, 0x3fab8,
1943 0x3fac0, 0x3fae4,
1944 0x3faf8, 0x3fb10,
1945 0x3fb28, 0x3fb28,
1946 0x3fb3c, 0x3fb50,
1947 0x3fbf0, 0x3fc10,
1948 0x3fc28, 0x3fc28,
1949 0x3fc3c, 0x3fc50,
1950 0x3fcf0, 0x3fcfc,
1951 0x40000, 0x4000c,
1952 0x40040, 0x40050,
1953 0x40060, 0x40068,
1954 0x4007c, 0x4008c,
1955 0x40094, 0x400b0,
1956 0x400c0, 0x40144,
1957 0x40180, 0x4018c,
1958 0x40200, 0x40254,
1959 0x40260, 0x40264,
1960 0x40270, 0x40288,
1961 0x40290, 0x40298,
1962 0x402ac, 0x402c8,
1963 0x402d0, 0x402e0,
1964 0x402f0, 0x402f0,
1965 0x40300, 0x4033c,
1966 0x403f8, 0x403fc,
1967 0x41304, 0x413c4,
1968 0x41400, 0x4140c,
1969 0x41414, 0x4141c,
1970 0x41480, 0x414d0,
1971 0x44000, 0x44054,
1972 0x4405c, 0x44078,
1973 0x440c0, 0x44174,
1974 0x44180, 0x441ac,
1975 0x441b4, 0x441b8,
1976 0x441c0, 0x44254,
1977 0x4425c, 0x44278,
1978 0x442c0, 0x44374,
1979 0x44380, 0x443ac,
1980 0x443b4, 0x443b8,
1981 0x443c0, 0x44454,
1982 0x4445c, 0x44478,
1983 0x444c0, 0x44574,
1984 0x44580, 0x445ac,
1985 0x445b4, 0x445b8,
1986 0x445c0, 0x44654,
1987 0x4465c, 0x44678,
1988 0x446c0, 0x44774,
1989 0x44780, 0x447ac,
1990 0x447b4, 0x447b8,
1991 0x447c0, 0x44854,
1992 0x4485c, 0x44878,
1993 0x448c0, 0x44974,
1994 0x44980, 0x449ac,
1995 0x449b4, 0x449b8,
1996 0x449c0, 0x449fc,
1997 0x45000, 0x45004,
1998 0x45010, 0x45030,
1999 0x45040, 0x45060,
2000 0x45068, 0x45068,
2001 0x45080, 0x45084,
2002 0x450a0, 0x450b0,
2003 0x45200, 0x45204,
2004 0x45210, 0x45230,
2005 0x45240, 0x45260,
2006 0x45268, 0x45268,
2007 0x45280, 0x45284,
2008 0x452a0, 0x452b0,
2009 0x460c0, 0x460e4,
2010 0x47000, 0x4703c,
2011 0x47044, 0x4708c,
2012 0x47200, 0x47250,
2013 0x47400, 0x47408,
2014 0x47414, 0x47420,
2015 0x47600, 0x47618,
2016 0x47800, 0x47814,
2017 0x48000, 0x4800c,
2018 0x48040, 0x48050,
2019 0x48060, 0x48068,
2020 0x4807c, 0x4808c,
2021 0x48094, 0x480b0,
2022 0x480c0, 0x48144,
2023 0x48180, 0x4818c,
2024 0x48200, 0x48254,
2025 0x48260, 0x48264,
2026 0x48270, 0x48288,
2027 0x48290, 0x48298,
2028 0x482ac, 0x482c8,
2029 0x482d0, 0x482e0,
2030 0x482f0, 0x482f0,
2031 0x48300, 0x4833c,
2032 0x483f8, 0x483fc,
2033 0x49304, 0x493c4,
2034 0x49400, 0x4940c,
2035 0x49414, 0x4941c,
2036 0x49480, 0x494d0,
2037 0x4c000, 0x4c054,
2038 0x4c05c, 0x4c078,
2039 0x4c0c0, 0x4c174,
2040 0x4c180, 0x4c1ac,
2041 0x4c1b4, 0x4c1b8,
2042 0x4c1c0, 0x4c254,
2043 0x4c25c, 0x4c278,
2044 0x4c2c0, 0x4c374,
2045 0x4c380, 0x4c3ac,
2046 0x4c3b4, 0x4c3b8,
2047 0x4c3c0, 0x4c454,
2048 0x4c45c, 0x4c478,
2049 0x4c4c0, 0x4c574,
2050 0x4c580, 0x4c5ac,
2051 0x4c5b4, 0x4c5b8,
2052 0x4c5c0, 0x4c654,
2053 0x4c65c, 0x4c678,
2054 0x4c6c0, 0x4c774,
2055 0x4c780, 0x4c7ac,
2056 0x4c7b4, 0x4c7b8,
2057 0x4c7c0, 0x4c854,
2058 0x4c85c, 0x4c878,
2059 0x4c8c0, 0x4c974,
2060 0x4c980, 0x4c9ac,
2061 0x4c9b4, 0x4c9b8,
2062 0x4c9c0, 0x4c9fc,
2063 0x4d000, 0x4d004,
2064 0x4d010, 0x4d030,
2065 0x4d040, 0x4d060,
2066 0x4d068, 0x4d068,
2067 0x4d080, 0x4d084,
2068 0x4d0a0, 0x4d0b0,
2069 0x4d200, 0x4d204,
2070 0x4d210, 0x4d230,
2071 0x4d240, 0x4d260,
2072 0x4d268, 0x4d268,
2073 0x4d280, 0x4d284,
2074 0x4d2a0, 0x4d2b0,
2075 0x4e0c0, 0x4e0e4,
2076 0x4f000, 0x4f03c,
2077 0x4f044, 0x4f08c,
2078 0x4f200, 0x4f250,
2079 0x4f400, 0x4f408,
2080 0x4f414, 0x4f420,
2081 0x4f600, 0x4f618,
2082 0x4f800, 0x4f814,
2083 0x50000, 0x50084,
2084 0x50090, 0x500cc,
2085 0x50400, 0x50400,
2086 0x50800, 0x50884,
2087 0x50890, 0x508cc,
2088 0x50c00, 0x50c00,
2089 0x51000, 0x5101c,
2090 0x51300, 0x51308,
2091 };
2092
2093 static const unsigned int t5vf_reg_ranges[] = {
2094 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2095 VF_MPS_REG(A_MPS_VF_CTL),
2096 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2097 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2098 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2099 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2100 FW_T4VF_MBDATA_BASE_ADDR,
2101 FW_T4VF_MBDATA_BASE_ADDR +
2102 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2103 };
2104
2105 static const unsigned int t6_reg_ranges[] = {
2106 0x1008, 0x101c,
2107 0x1024, 0x10a8,
2108 0x10b4, 0x10f8,
2109 0x1100, 0x1114,
2110 0x111c, 0x112c,
2111 0x1138, 0x113c,
2112 0x1144, 0x114c,
2113 0x1180, 0x1184,
2114 0x1190, 0x1194,
2115 0x11a0, 0x11a4,
2116 0x11b0, 0x11c4,
2117 0x11fc, 0x123c,
2118 0x1254, 0x1274,
2119 0x1280, 0x133c,
2120 0x1800, 0x18fc,
2121 0x3000, 0x302c,
2122 0x3060, 0x30b0,
2123 0x30b8, 0x30d8,
2124 0x30e0, 0x30fc,
2125 0x3140, 0x357c,
2126 0x35a8, 0x35cc,
2127 0x35ec, 0x35ec,
2128 0x3600, 0x5624,
2129 0x56cc, 0x56ec,
2130 0x56f4, 0x5720,
2131 0x5728, 0x575c,
2132 0x580c, 0x5814,
2133 0x5890, 0x589c,
2134 0x58a4, 0x58ac,
2135 0x58b8, 0x58bc,
2136 0x5940, 0x595c,
2137 0x5980, 0x598c,
2138 0x59b0, 0x59c8,
2139 0x59d0, 0x59dc,
2140 0x59fc, 0x5a18,
2141 0x5a60, 0x5a6c,
2142 0x5a80, 0x5a8c,
2143 0x5a94, 0x5a9c,
2144 0x5b94, 0x5bfc,
2145 0x5c10, 0x5e48,
2146 0x5e50, 0x5e94,
2147 0x5ea0, 0x5eb0,
2148 0x5ec0, 0x5ec0,
2149 0x5ec8, 0x5ed0,
2150 0x5ee0, 0x5ee0,
2151 0x5ef0, 0x5ef0,
2152 0x5f00, 0x5f00,
2153 0x6000, 0x6020,
2154 0x6028, 0x6040,
2155 0x6058, 0x609c,
2156 0x60a8, 0x619c,
2157 0x7700, 0x7798,
2158 0x77c0, 0x7880,
2159 0x78cc, 0x78fc,
2160 0x7b00, 0x7b58,
2161 0x7b60, 0x7b84,
2162 0x7b8c, 0x7c54,
2163 0x7d00, 0x7d38,
2164 0x7d40, 0x7d84,
2165 0x7d8c, 0x7ddc,
2166 0x7de4, 0x7e04,
2167 0x7e10, 0x7e1c,
2168 0x7e24, 0x7e38,
2169 0x7e40, 0x7e44,
2170 0x7e4c, 0x7e78,
2171 0x7e80, 0x7edc,
2172 0x7ee8, 0x7efc,
2173 0x8dc0, 0x8de0,
2174 0x8df8, 0x8e04,
2175 0x8e10, 0x8e84,
2176 0x8ea0, 0x8f88,
2177 0x8fb8, 0x9058,
2178 0x9060, 0x9060,
2179 0x9068, 0x90f8,
2180 0x9100, 0x9124,
2181 0x9400, 0x9470,
2182 0x9600, 0x9600,
2183 0x9608, 0x9638,
2184 0x9640, 0x9704,
2185 0x9710, 0x971c,
2186 0x9800, 0x9808,
2187 0x9810, 0x9864,
2188 0x9c00, 0x9c6c,
2189 0x9c80, 0x9cec,
2190 0x9d00, 0x9d6c,
2191 0x9d80, 0x9dec,
2192 0x9e00, 0x9e6c,
2193 0x9e80, 0x9eec,
2194 0x9f00, 0x9f6c,
2195 0x9f80, 0xa020,
2196 0xd000, 0xd03c,
2197 0xd100, 0xd118,
2198 0xd200, 0xd214,
2199 0xd220, 0xd234,
2200 0xd240, 0xd254,
2201 0xd260, 0xd274,
2202 0xd280, 0xd294,
2203 0xd2a0, 0xd2b4,
2204 0xd2c0, 0xd2d4,
2205 0xd2e0, 0xd2f4,
2206 0xd300, 0xd31c,
2207 0xdfc0, 0xdfe0,
2208 0xe000, 0xf008,
2209 0xf010, 0xf018,
2210 0xf020, 0xf028,
2211 0x11000, 0x11014,
2212 0x11048, 0x1106c,
2213 0x11074, 0x11088,
2214 0x11098, 0x11120,
2215 0x1112c, 0x1117c,
2216 0x11190, 0x112e0,
2217 0x11300, 0x1130c,
2218 0x12000, 0x1206c,
2219 0x19040, 0x1906c,
2220 0x19078, 0x19080,
2221 0x1908c, 0x190e8,
2222 0x190f0, 0x190f8,
2223 0x19100, 0x19110,
2224 0x19120, 0x19124,
2225 0x19150, 0x19194,
2226 0x1919c, 0x191b0,
2227 0x191d0, 0x191e8,
2228 0x19238, 0x19290,
2229 0x192a4, 0x192b0,
2230 0x19348, 0x1934c,
2231 0x193f8, 0x19418,
2232 0x19420, 0x19428,
2233 0x19430, 0x19444,
2234 0x1944c, 0x1946c,
2235 0x19474, 0x19474,
2236 0x19490, 0x194cc,
2237 0x194f0, 0x194f8,
2238 0x19c00, 0x19c48,
2239 0x19c50, 0x19c80,
2240 0x19c94, 0x19c98,
2241 0x19ca0, 0x19cbc,
2242 0x19ce4, 0x19ce4,
2243 0x19cf0, 0x19cf8,
2244 0x19d00, 0x19d28,
2245 0x19d50, 0x19d78,
2246 0x19d94, 0x19d98,
2247 0x19da0, 0x19de0,
2248 0x19df0, 0x19e10,
2249 0x19e50, 0x19e6c,
2250 0x19ea0, 0x19ebc,
2251 0x19ec4, 0x19ef4,
2252 0x19f04, 0x19f2c,
2253 0x19f34, 0x19f34,
2254 0x19f40, 0x19f50,
2255 0x19f90, 0x19fac,
2256 0x19fc4, 0x19fc8,
2257 0x19fd0, 0x19fe4,
2258 0x1a000, 0x1a004,
2259 0x1a010, 0x1a06c,
2260 0x1a0b0, 0x1a0e4,
2261 0x1a0ec, 0x1a0f8,
2262 0x1a100, 0x1a108,
2263 0x1a114, 0x1a130,
2264 0x1a138, 0x1a1c4,
2265 0x1a1fc, 0x1a1fc,
2266 0x1e008, 0x1e00c,
2267 0x1e040, 0x1e044,
2268 0x1e04c, 0x1e04c,
2269 0x1e284, 0x1e290,
2270 0x1e2c0, 0x1e2c0,
2271 0x1e2e0, 0x1e2e0,
2272 0x1e300, 0x1e384,
2273 0x1e3c0, 0x1e3c8,
2274 0x1e408, 0x1e40c,
2275 0x1e440, 0x1e444,
2276 0x1e44c, 0x1e44c,
2277 0x1e684, 0x1e690,
2278 0x1e6c0, 0x1e6c0,
2279 0x1e6e0, 0x1e6e0,
2280 0x1e700, 0x1e784,
2281 0x1e7c0, 0x1e7c8,
2282 0x1e808, 0x1e80c,
2283 0x1e840, 0x1e844,
2284 0x1e84c, 0x1e84c,
2285 0x1ea84, 0x1ea90,
2286 0x1eac0, 0x1eac0,
2287 0x1eae0, 0x1eae0,
2288 0x1eb00, 0x1eb84,
2289 0x1ebc0, 0x1ebc8,
2290 0x1ec08, 0x1ec0c,
2291 0x1ec40, 0x1ec44,
2292 0x1ec4c, 0x1ec4c,
2293 0x1ee84, 0x1ee90,
2294 0x1eec0, 0x1eec0,
2295 0x1eee0, 0x1eee0,
2296 0x1ef00, 0x1ef84,
2297 0x1efc0, 0x1efc8,
2298 0x1f008, 0x1f00c,
2299 0x1f040, 0x1f044,
2300 0x1f04c, 0x1f04c,
2301 0x1f284, 0x1f290,
2302 0x1f2c0, 0x1f2c0,
2303 0x1f2e0, 0x1f2e0,
2304 0x1f300, 0x1f384,
2305 0x1f3c0, 0x1f3c8,
2306 0x1f408, 0x1f40c,
2307 0x1f440, 0x1f444,
2308 0x1f44c, 0x1f44c,
2309 0x1f684, 0x1f690,
2310 0x1f6c0, 0x1f6c0,
2311 0x1f6e0, 0x1f6e0,
2312 0x1f700, 0x1f784,
2313 0x1f7c0, 0x1f7c8,
2314 0x1f808, 0x1f80c,
2315 0x1f840, 0x1f844,
2316 0x1f84c, 0x1f84c,
2317 0x1fa84, 0x1fa90,
2318 0x1fac0, 0x1fac0,
2319 0x1fae0, 0x1fae0,
2320 0x1fb00, 0x1fb84,
2321 0x1fbc0, 0x1fbc8,
2322 0x1fc08, 0x1fc0c,
2323 0x1fc40, 0x1fc44,
2324 0x1fc4c, 0x1fc4c,
2325 0x1fe84, 0x1fe90,
2326 0x1fec0, 0x1fec0,
2327 0x1fee0, 0x1fee0,
2328 0x1ff00, 0x1ff84,
2329 0x1ffc0, 0x1ffc8,
2330 0x30000, 0x30030,
2331 0x30100, 0x30168,
2332 0x30190, 0x301a0,
2333 0x301a8, 0x301b8,
2334 0x301c4, 0x301c8,
2335 0x301d0, 0x301d0,
2336 0x30200, 0x30320,
2337 0x30400, 0x304b4,
2338 0x304c0, 0x3052c,
2339 0x30540, 0x3061c,
2340 0x30800, 0x308a0,
2341 0x308c0, 0x30908,
2342 0x30910, 0x309b8,
2343 0x30a00, 0x30a04,
2344 0x30a0c, 0x30a14,
2345 0x30a1c, 0x30a2c,
2346 0x30a44, 0x30a50,
2347 0x30a74, 0x30a74,
2348 0x30a7c, 0x30afc,
2349 0x30b08, 0x30c24,
2350 0x30d00, 0x30d14,
2351 0x30d1c, 0x30d3c,
2352 0x30d44, 0x30d4c,
2353 0x30d54, 0x30d74,
2354 0x30d7c, 0x30d7c,
2355 0x30de0, 0x30de0,
2356 0x30e00, 0x30ed4,
2357 0x30f00, 0x30fa4,
2358 0x30fc0, 0x30fc4,
2359 0x31000, 0x31004,
2360 0x31080, 0x310fc,
2361 0x31208, 0x31220,
2362 0x3123c, 0x31254,
2363 0x31300, 0x31300,
2364 0x31308, 0x3131c,
2365 0x31338, 0x3133c,
2366 0x31380, 0x31380,
2367 0x31388, 0x313a8,
2368 0x313b4, 0x313b4,
2369 0x31400, 0x31420,
2370 0x31438, 0x3143c,
2371 0x31480, 0x31480,
2372 0x314a8, 0x314a8,
2373 0x314b0, 0x314b4,
2374 0x314c8, 0x314d4,
2375 0x31a40, 0x31a4c,
2376 0x31af0, 0x31b20,
2377 0x31b38, 0x31b3c,
2378 0x31b80, 0x31b80,
2379 0x31ba8, 0x31ba8,
2380 0x31bb0, 0x31bb4,
2381 0x31bc8, 0x31bd4,
2382 0x32140, 0x3218c,
2383 0x321f0, 0x321f4,
2384 0x32200, 0x32200,
2385 0x32218, 0x32218,
2386 0x32400, 0x32400,
2387 0x32408, 0x3241c,
2388 0x32618, 0x32620,
2389 0x32664, 0x32664,
2390 0x326a8, 0x326a8,
2391 0x326ec, 0x326ec,
2392 0x32a00, 0x32abc,
2393 0x32b00, 0x32b18,
2394 0x32b20, 0x32b38,
2395 0x32b40, 0x32b58,
2396 0x32b60, 0x32b78,
2397 0x32c00, 0x32c00,
2398 0x32c08, 0x32c3c,
2399 0x33000, 0x3302c,
2400 0x33034, 0x33050,
2401 0x33058, 0x33058,
2402 0x33060, 0x3308c,
2403 0x3309c, 0x330ac,
2404 0x330c0, 0x330c0,
2405 0x330c8, 0x330d0,
2406 0x330d8, 0x330e0,
2407 0x330ec, 0x3312c,
2408 0x33134, 0x33150,
2409 0x33158, 0x33158,
2410 0x33160, 0x3318c,
2411 0x3319c, 0x331ac,
2412 0x331c0, 0x331c0,
2413 0x331c8, 0x331d0,
2414 0x331d8, 0x331e0,
2415 0x331ec, 0x33290,
2416 0x33298, 0x332c4,
2417 0x332e4, 0x33390,
2418 0x33398, 0x333c4,
2419 0x333e4, 0x3342c,
2420 0x33434, 0x33450,
2421 0x33458, 0x33458,
2422 0x33460, 0x3348c,
2423 0x3349c, 0x334ac,
2424 0x334c0, 0x334c0,
2425 0x334c8, 0x334d0,
2426 0x334d8, 0x334e0,
2427 0x334ec, 0x3352c,
2428 0x33534, 0x33550,
2429 0x33558, 0x33558,
2430 0x33560, 0x3358c,
2431 0x3359c, 0x335ac,
2432 0x335c0, 0x335c0,
2433 0x335c8, 0x335d0,
2434 0x335d8, 0x335e0,
2435 0x335ec, 0x33690,
2436 0x33698, 0x336c4,
2437 0x336e4, 0x33790,
2438 0x33798, 0x337c4,
2439 0x337e4, 0x337fc,
2440 0x33814, 0x33814,
2441 0x33854, 0x33868,
2442 0x33880, 0x3388c,
2443 0x338c0, 0x338d0,
2444 0x338e8, 0x338ec,
2445 0x33900, 0x3392c,
2446 0x33934, 0x33950,
2447 0x33958, 0x33958,
2448 0x33960, 0x3398c,
2449 0x3399c, 0x339ac,
2450 0x339c0, 0x339c0,
2451 0x339c8, 0x339d0,
2452 0x339d8, 0x339e0,
2453 0x339ec, 0x33a90,
2454 0x33a98, 0x33ac4,
2455 0x33ae4, 0x33b10,
2456 0x33b24, 0x33b28,
2457 0x33b38, 0x33b50,
2458 0x33bf0, 0x33c10,
2459 0x33c24, 0x33c28,
2460 0x33c38, 0x33c50,
2461 0x33cf0, 0x33cfc,
2462 0x34000, 0x34030,
2463 0x34100, 0x34168,
2464 0x34190, 0x341a0,
2465 0x341a8, 0x341b8,
2466 0x341c4, 0x341c8,
2467 0x341d0, 0x341d0,
2468 0x34200, 0x34320,
2469 0x34400, 0x344b4,
2470 0x344c0, 0x3452c,
2471 0x34540, 0x3461c,
2472 0x34800, 0x348a0,
2473 0x348c0, 0x34908,
2474 0x34910, 0x349b8,
2475 0x34a00, 0x34a04,
2476 0x34a0c, 0x34a14,
2477 0x34a1c, 0x34a2c,
2478 0x34a44, 0x34a50,
2479 0x34a74, 0x34a74,
2480 0x34a7c, 0x34afc,
2481 0x34b08, 0x34c24,
2482 0x34d00, 0x34d14,
2483 0x34d1c, 0x34d3c,
2484 0x34d44, 0x34d4c,
2485 0x34d54, 0x34d74,
2486 0x34d7c, 0x34d7c,
2487 0x34de0, 0x34de0,
2488 0x34e00, 0x34ed4,
2489 0x34f00, 0x34fa4,
2490 0x34fc0, 0x34fc4,
2491 0x35000, 0x35004,
2492 0x35080, 0x350fc,
2493 0x35208, 0x35220,
2494 0x3523c, 0x35254,
2495 0x35300, 0x35300,
2496 0x35308, 0x3531c,
2497 0x35338, 0x3533c,
2498 0x35380, 0x35380,
2499 0x35388, 0x353a8,
2500 0x353b4, 0x353b4,
2501 0x35400, 0x35420,
2502 0x35438, 0x3543c,
2503 0x35480, 0x35480,
2504 0x354a8, 0x354a8,
2505 0x354b0, 0x354b4,
2506 0x354c8, 0x354d4,
2507 0x35a40, 0x35a4c,
2508 0x35af0, 0x35b20,
2509 0x35b38, 0x35b3c,
2510 0x35b80, 0x35b80,
2511 0x35ba8, 0x35ba8,
2512 0x35bb0, 0x35bb4,
2513 0x35bc8, 0x35bd4,
2514 0x36140, 0x3618c,
2515 0x361f0, 0x361f4,
2516 0x36200, 0x36200,
2517 0x36218, 0x36218,
2518 0x36400, 0x36400,
2519 0x36408, 0x3641c,
2520 0x36618, 0x36620,
2521 0x36664, 0x36664,
2522 0x366a8, 0x366a8,
2523 0x366ec, 0x366ec,
2524 0x36a00, 0x36abc,
2525 0x36b00, 0x36b18,
2526 0x36b20, 0x36b38,
2527 0x36b40, 0x36b58,
2528 0x36b60, 0x36b78,
2529 0x36c00, 0x36c00,
2530 0x36c08, 0x36c3c,
2531 0x37000, 0x3702c,
2532 0x37034, 0x37050,
2533 0x37058, 0x37058,
2534 0x37060, 0x3708c,
2535 0x3709c, 0x370ac,
2536 0x370c0, 0x370c0,
2537 0x370c8, 0x370d0,
2538 0x370d8, 0x370e0,
2539 0x370ec, 0x3712c,
2540 0x37134, 0x37150,
2541 0x37158, 0x37158,
2542 0x37160, 0x3718c,
2543 0x3719c, 0x371ac,
2544 0x371c0, 0x371c0,
2545 0x371c8, 0x371d0,
2546 0x371d8, 0x371e0,
2547 0x371ec, 0x37290,
2548 0x37298, 0x372c4,
2549 0x372e4, 0x37390,
2550 0x37398, 0x373c4,
2551 0x373e4, 0x3742c,
2552 0x37434, 0x37450,
2553 0x37458, 0x37458,
2554 0x37460, 0x3748c,
2555 0x3749c, 0x374ac,
2556 0x374c0, 0x374c0,
2557 0x374c8, 0x374d0,
2558 0x374d8, 0x374e0,
2559 0x374ec, 0x3752c,
2560 0x37534, 0x37550,
2561 0x37558, 0x37558,
2562 0x37560, 0x3758c,
2563 0x3759c, 0x375ac,
2564 0x375c0, 0x375c0,
2565 0x375c8, 0x375d0,
2566 0x375d8, 0x375e0,
2567 0x375ec, 0x37690,
2568 0x37698, 0x376c4,
2569 0x376e4, 0x37790,
2570 0x37798, 0x377c4,
2571 0x377e4, 0x377fc,
2572 0x37814, 0x37814,
2573 0x37854, 0x37868,
2574 0x37880, 0x3788c,
2575 0x378c0, 0x378d0,
2576 0x378e8, 0x378ec,
2577 0x37900, 0x3792c,
2578 0x37934, 0x37950,
2579 0x37958, 0x37958,
2580 0x37960, 0x3798c,
2581 0x3799c, 0x379ac,
2582 0x379c0, 0x379c0,
2583 0x379c8, 0x379d0,
2584 0x379d8, 0x379e0,
2585 0x379ec, 0x37a90,
2586 0x37a98, 0x37ac4,
2587 0x37ae4, 0x37b10,
2588 0x37b24, 0x37b28,
2589 0x37b38, 0x37b50,
2590 0x37bf0, 0x37c10,
2591 0x37c24, 0x37c28,
2592 0x37c38, 0x37c50,
2593 0x37cf0, 0x37cfc,
2594 0x40040, 0x40040,
2595 0x40080, 0x40084,
2596 0x40100, 0x40100,
2597 0x40140, 0x401bc,
2598 0x40200, 0x40214,
2599 0x40228, 0x40228,
2600 0x40240, 0x40258,
2601 0x40280, 0x40280,
2602 0x40304, 0x40304,
2603 0x40330, 0x4033c,
2604 0x41304, 0x413c8,
2605 0x413d0, 0x413dc,
2606 0x413f0, 0x413f0,
2607 0x41400, 0x4140c,
2608 0x41414, 0x4141c,
2609 0x41480, 0x414d0,
2610 0x44000, 0x4407c,
2611 0x440c0, 0x441ac,
2612 0x441b4, 0x4427c,
2613 0x442c0, 0x443ac,
2614 0x443b4, 0x4447c,
2615 0x444c0, 0x445ac,
2616 0x445b4, 0x4467c,
2617 0x446c0, 0x447ac,
2618 0x447b4, 0x4487c,
2619 0x448c0, 0x449ac,
2620 0x449b4, 0x44a7c,
2621 0x44ac0, 0x44bac,
2622 0x44bb4, 0x44c7c,
2623 0x44cc0, 0x44dac,
2624 0x44db4, 0x44e7c,
2625 0x44ec0, 0x44fac,
2626 0x44fb4, 0x4507c,
2627 0x450c0, 0x451ac,
2628 0x451b4, 0x451fc,
2629 0x45800, 0x45804,
2630 0x45810, 0x45830,
2631 0x45840, 0x45860,
2632 0x45868, 0x45868,
2633 0x45880, 0x45884,
2634 0x458a0, 0x458b0,
2635 0x45a00, 0x45a04,
2636 0x45a10, 0x45a30,
2637 0x45a40, 0x45a60,
2638 0x45a68, 0x45a68,
2639 0x45a80, 0x45a84,
2640 0x45aa0, 0x45ab0,
2641 0x460c0, 0x460e4,
2642 0x47000, 0x4703c,
2643 0x47044, 0x4708c,
2644 0x47200, 0x47250,
2645 0x47400, 0x47408,
2646 0x47414, 0x47420,
2647 0x47600, 0x47618,
2648 0x47800, 0x47814,
2649 0x47820, 0x4782c,
2650 0x50000, 0x50084,
2651 0x50090, 0x500cc,
2652 0x50300, 0x50384,
2653 0x50400, 0x50400,
2654 0x50800, 0x50884,
2655 0x50890, 0x508cc,
2656 0x50b00, 0x50b84,
2657 0x50c00, 0x50c00,
2658 0x51000, 0x51020,
2659 0x51028, 0x510b0,
2660 0x51300, 0x51324,
2661 };
2662
2663 static const unsigned int t6vf_reg_ranges[] = {
2664 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2665 VF_MPS_REG(A_MPS_VF_CTL),
2666 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2667 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2668 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2669 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2670 FW_T6VF_MBDATA_BASE_ADDR,
2671 FW_T6VF_MBDATA_BASE_ADDR +
2672 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2673 };
2674
2675 static const unsigned int t7_reg_ranges[] = {
2676 0x1008, 0x101c,
2677 0x1024, 0x10a8,
2678 0x10b4, 0x10f8,
2679 0x1100, 0x1114,
2680 0x111c, 0x112c,
2681 0x1138, 0x113c,
2682 0x1144, 0x115c,
2683 0x1180, 0x1184,
2684 0x1190, 0x1194,
2685 0x11a0, 0x11a4,
2686 0x11b0, 0x11d0,
2687 0x11fc, 0x1278,
2688 0x1280, 0x1368,
2689 0x1700, 0x172c,
2690 0x173c, 0x1760,
2691 0x1800, 0x18fc,
2692 0x3000, 0x3044,
2693 0x30a4, 0x30b0,
2694 0x30b8, 0x30d8,
2695 0x30e0, 0x30e8,
2696 0x3140, 0x357c,
2697 0x35a8, 0x35cc,
2698 0x35e0, 0x35ec,
2699 0x3600, 0x37fc,
2700 0x3804, 0x3818,
2701 0x3880, 0x388c,
2702 0x3900, 0x3904,
2703 0x3910, 0x3978,
2704 0x3980, 0x399c,
2705 0x4700, 0x4720,
2706 0x4728, 0x475c,
2707 0x480c, 0x4814,
2708 0x4890, 0x489c,
2709 0x48a4, 0x48ac,
2710 0x48b8, 0x48bc,
2711 0x4900, 0x4924,
2712 0x4ffc, 0x4ffc,
2713 0x5500, 0x5624,
2714 0x56c4, 0x56ec,
2715 0x56f4, 0x5720,
2716 0x5728, 0x575c,
2717 0x580c, 0x5814,
2718 0x5890, 0x589c,
2719 0x58a4, 0x58ac,
2720 0x58b8, 0x58bc,
2721 0x5940, 0x598c,
2722 0x59b0, 0x59c8,
2723 0x59d0, 0x59dc,
2724 0x59fc, 0x5a18,
2725 0x5a60, 0x5a6c,
2726 0x5a80, 0x5a8c,
2727 0x5a94, 0x5a9c,
2728 0x5b94, 0x5bec,
2729 0x5bf8, 0x5bfc,
2730 0x5c10, 0x5c40,
2731 0x5c4c, 0x5e48,
2732 0x5e50, 0x5e94,
2733 0x5ea0, 0x5eb0,
2734 0x5ec0, 0x5ec0,
2735 0x5ec8, 0x5ed0,
2736 0x5ee0, 0x5ee0,
2737 0x5ef0, 0x5ef0,
2738 0x5f00, 0x5f04,
2739 0x5f0c, 0x5f10,
2740 0x5f20, 0x5f78,
2741 0x5f84, 0x5f88,
2742 0x5f90, 0x5fd8,
2743 0x6000, 0x6020,
2744 0x6028, 0x6030,
2745 0x6044, 0x609c,
2746 0x60a8, 0x60ac,
2747 0x60b8, 0x60ec,
2748 0x6100, 0x6104,
2749 0x6118, 0x611c,
2750 0x6150, 0x6150,
2751 0x6180, 0x61b8,
2752 0x7700, 0x77a8,
2753 0x77b0, 0x7888,
2754 0x78cc, 0x7970,
2755 0x7b00, 0x7b00,
2756 0x7b08, 0x7b0c,
2757 0x7b24, 0x7b84,
2758 0x7b8c, 0x7c2c,
2759 0x7c34, 0x7c40,
2760 0x7c48, 0x7c68,
2761 0x7c70, 0x7c7c,
2762 0x7d00, 0x7ddc,
2763 0x7de4, 0x7e38,
2764 0x7e40, 0x7e44,
2765 0x7e4c, 0x7e74,
2766 0x7e80, 0x7ee0,
2767 0x7ee8, 0x7f0c,
2768 0x7f20, 0x7f5c,
2769 0x8dc0, 0x8de8,
2770 0x8df8, 0x8e04,
2771 0x8e10, 0x8e30,
2772 0x8e7c, 0x8ee8,
2773 0x8f88, 0x8f88,
2774 0x8f90, 0x8fb0,
2775 0x8fb8, 0x9058,
2776 0x9074, 0x90f8,
2777 0x9100, 0x912c,
2778 0x9138, 0x9188,
2779 0x9400, 0x9414,
2780 0x9430, 0x9440,
2781 0x9454, 0x9454,
2782 0x945c, 0x947c,
2783 0x9498, 0x94b8,
2784 0x9600, 0x9600,
2785 0x9608, 0x9638,
2786 0x9640, 0x9704,
2787 0x9710, 0x971c,
2788 0x9800, 0x9804,
2789 0x9854, 0x9854,
2790 0x9c00, 0x9c6c,
2791 0x9c80, 0x9cec,
2792 0x9d00, 0x9d6c,
2793 0x9d80, 0x9dec,
2794 0x9e00, 0x9e6c,
2795 0x9e80, 0x9eec,
2796 0x9f00, 0x9f6c,
2797 0x9f80, 0x9fec,
2798 0xa000, 0xa06c,
2799 0xa080, 0xa0ec,
2800 0xa100, 0xa16c,
2801 0xa180, 0xa1ec,
2802 0xa200, 0xa26c,
2803 0xa280, 0xa2ec,
2804 0xa300, 0xa36c,
2805 0xa380, 0xa458,
2806 0xa460, 0xa4f8,
2807 0xd000, 0xd03c,
2808 0xd100, 0xd134,
2809 0xd200, 0xd214,
2810 0xd220, 0xd234,
2811 0xd240, 0xd254,
2812 0xd260, 0xd274,
2813 0xd280, 0xd294,
2814 0xd2a0, 0xd2b4,
2815 0xd2c0, 0xd2d4,
2816 0xd2e0, 0xd2f4,
2817 0xd300, 0xd31c,
2818 0xdfc0, 0xdfe0,
2819 0xe000, 0xe00c,
2820 0xf000, 0xf008,
2821 0xf010, 0xf06c,
2822 0x11000, 0x11014,
2823 0x11048, 0x11120,
2824 0x11130, 0x11144,
2825 0x11174, 0x11178,
2826 0x11190, 0x111a0,
2827 0x111e4, 0x112f0,
2828 0x11300, 0x1133c,
2829 0x11408, 0x1146c,
2830 0x12000, 0x12004,
2831 0x12060, 0x122c4,
2832 0x19040, 0x1906c,
2833 0x19078, 0x19080,
2834 0x1908c, 0x190e8,
2835 0x190f0, 0x190f8,
2836 0x19100, 0x19110,
2837 0x19120, 0x19124,
2838 0x19150, 0x19194,
2839 0x1919c, 0x191a0,
2840 0x191ac, 0x191c8,
2841 0x191d0, 0x191e4,
2842 0x19250, 0x19250,
2843 0x19258, 0x19268,
2844 0x19278, 0x19278,
2845 0x19280, 0x192b0,
2846 0x192bc, 0x192f0,
2847 0x19300, 0x19308,
2848 0x19310, 0x19318,
2849 0x19320, 0x19328,
2850 0x19330, 0x19330,
2851 0x19348, 0x1934c,
2852 0x193f8, 0x19428,
2853 0x19430, 0x19444,
2854 0x1944c, 0x1946c,
2855 0x19474, 0x1947c,
2856 0x19488, 0x194cc,
2857 0x194f0, 0x194f8,
2858 0x19c00, 0x19c48,
2859 0x19c50, 0x19c80,
2860 0x19c94, 0x19c98,
2861 0x19ca0, 0x19cdc,
2862 0x19ce4, 0x19cf8,
2863 0x19d00, 0x19d30,
2864 0x19d50, 0x19d80,
2865 0x19d94, 0x19d98,
2866 0x19da0, 0x19de0,
2867 0x19df0, 0x19e10,
2868 0x19e50, 0x19e6c,
2869 0x19ea0, 0x19ebc,
2870 0x19ec4, 0x19ef4,
2871 0x19f04, 0x19f2c,
2872 0x19f34, 0x19f34,
2873 0x19f40, 0x19f50,
2874 0x19f90, 0x19fb4,
2875 0x19fbc, 0x19fbc,
2876 0x19fc4, 0x19fc8,
2877 0x19fd0, 0x19fe4,
2878 0x1a000, 0x1a004,
2879 0x1a010, 0x1a06c,
2880 0x1a0b0, 0x1a0e4,
2881 0x1a0ec, 0x1a108,
2882 0x1a114, 0x1a130,
2883 0x1a138, 0x1a1c4,
2884 0x1a1fc, 0x1a29c,
2885 0x1a2a8, 0x1a2b8,
2886 0x1a2c0, 0x1a388,
2887 0x1a398, 0x1a3ac,
2888 0x1e008, 0x1e00c,
2889 0x1e040, 0x1e044,
2890 0x1e04c, 0x1e04c,
2891 0x1e284, 0x1e290,
2892 0x1e2c0, 0x1e2c0,
2893 0x1e2e0, 0x1e2e4,
2894 0x1e300, 0x1e384,
2895 0x1e3c0, 0x1e3c8,
2896 0x1e408, 0x1e40c,
2897 0x1e440, 0x1e444,
2898 0x1e44c, 0x1e44c,
2899 0x1e684, 0x1e690,
2900 0x1e6c0, 0x1e6c0,
2901 0x1e6e0, 0x1e6e4,
2902 0x1e700, 0x1e784,
2903 0x1e7c0, 0x1e7c8,
2904 0x1e808, 0x1e80c,
2905 0x1e840, 0x1e844,
2906 0x1e84c, 0x1e84c,
2907 0x1ea84, 0x1ea90,
2908 0x1eac0, 0x1eac0,
2909 0x1eae0, 0x1eae4,
2910 0x1eb00, 0x1eb84,
2911 0x1ebc0, 0x1ebc8,
2912 0x1ec08, 0x1ec0c,
2913 0x1ec40, 0x1ec44,
2914 0x1ec4c, 0x1ec4c,
2915 0x1ee84, 0x1ee90,
2916 0x1eec0, 0x1eec0,
2917 0x1eee0, 0x1eee4,
2918 0x1ef00, 0x1ef84,
2919 0x1efc0, 0x1efc8,
2920 0x1f008, 0x1f00c,
2921 0x1f040, 0x1f044,
2922 0x1f04c, 0x1f04c,
2923 0x1f284, 0x1f290,
2924 0x1f2c0, 0x1f2c0,
2925 0x1f2e0, 0x1f2e4,
2926 0x1f300, 0x1f384,
2927 0x1f3c0, 0x1f3c8,
2928 0x1f408, 0x1f40c,
2929 0x1f440, 0x1f444,
2930 0x1f44c, 0x1f44c,
2931 0x1f684, 0x1f690,
2932 0x1f6c0, 0x1f6c0,
2933 0x1f6e0, 0x1f6e4,
2934 0x1f700, 0x1f784,
2935 0x1f7c0, 0x1f7c8,
2936 0x1f808, 0x1f80c,
2937 0x1f840, 0x1f844,
2938 0x1f84c, 0x1f84c,
2939 0x1fa84, 0x1fa90,
2940 0x1fac0, 0x1fac0,
2941 0x1fae0, 0x1fae4,
2942 0x1fb00, 0x1fb84,
2943 0x1fbc0, 0x1fbc8,
2944 0x1fc08, 0x1fc0c,
2945 0x1fc40, 0x1fc44,
2946 0x1fc4c, 0x1fc4c,
2947 0x1fe84, 0x1fe90,
2948 0x1fec0, 0x1fec0,
2949 0x1fee0, 0x1fee4,
2950 0x1ff00, 0x1ff84,
2951 0x1ffc0, 0x1ffc8,
2952 0x30000, 0x30038,
2953 0x30100, 0x3017c,
2954 0x30190, 0x301a0,
2955 0x301a8, 0x301b8,
2956 0x301c4, 0x301c8,
2957 0x301d0, 0x301e0,
2958 0x30200, 0x30344,
2959 0x30400, 0x304b4,
2960 0x304c0, 0x3052c,
2961 0x30540, 0x3065c,
2962 0x30800, 0x30848,
2963 0x30850, 0x308a8,
2964 0x308b8, 0x308c0,
2965 0x308cc, 0x308dc,
2966 0x30900, 0x30904,
2967 0x3090c, 0x30914,
2968 0x3091c, 0x30928,
2969 0x30930, 0x3093c,
2970 0x30944, 0x30948,
2971 0x30954, 0x30974,
2972 0x3097c, 0x30980,
2973 0x30a00, 0x30a20,
2974 0x30a38, 0x30a3c,
2975 0x30a50, 0x30a50,
2976 0x30a80, 0x30a80,
2977 0x30a88, 0x30aa8,
2978 0x30ab0, 0x30ab4,
2979 0x30ac8, 0x30ad4,
2980 0x30b28, 0x30b84,
2981 0x30b98, 0x30bb8,
2982 0x30c98, 0x30d14,
2983 0x31000, 0x31020,
2984 0x31038, 0x3103c,
2985 0x31050, 0x31050,
2986 0x31080, 0x31080,
2987 0x31088, 0x310a8,
2988 0x310b0, 0x310b4,
2989 0x310c8, 0x310d4,
2990 0x31128, 0x31184,
2991 0x31198, 0x311b8,
2992 0x32000, 0x32038,
2993 0x32100, 0x3217c,
2994 0x32190, 0x321a0,
2995 0x321a8, 0x321b8,
2996 0x321c4, 0x321c8,
2997 0x321d0, 0x321e0,
2998 0x32200, 0x32344,
2999 0x32400, 0x324b4,
3000 0x324c0, 0x3252c,
3001 0x32540, 0x3265c,
3002 0x32800, 0x32848,
3003 0x32850, 0x328a8,
3004 0x328b8, 0x328c0,
3005 0x328cc, 0x328dc,
3006 0x32900, 0x32904,
3007 0x3290c, 0x32914,
3008 0x3291c, 0x32928,
3009 0x32930, 0x3293c,
3010 0x32944, 0x32948,
3011 0x32954, 0x32974,
3012 0x3297c, 0x32980,
3013 0x32a00, 0x32a20,
3014 0x32a38, 0x32a3c,
3015 0x32a50, 0x32a50,
3016 0x32a80, 0x32a80,
3017 0x32a88, 0x32aa8,
3018 0x32ab0, 0x32ab4,
3019 0x32ac8, 0x32ad4,
3020 0x32b28, 0x32b84,
3021 0x32b98, 0x32bb8,
3022 0x32c98, 0x32d14,
3023 0x33000, 0x33020,
3024 0x33038, 0x3303c,
3025 0x33050, 0x33050,
3026 0x33080, 0x33080,
3027 0x33088, 0x330a8,
3028 0x330b0, 0x330b4,
3029 0x330c8, 0x330d4,
3030 0x33128, 0x33184,
3031 0x33198, 0x331b8,
3032 0x34000, 0x34038,
3033 0x34100, 0x3417c,
3034 0x34190, 0x341a0,
3035 0x341a8, 0x341b8,
3036 0x341c4, 0x341c8,
3037 0x341d0, 0x341e0,
3038 0x34200, 0x34344,
3039 0x34400, 0x344b4,
3040 0x344c0, 0x3452c,
3041 0x34540, 0x3465c,
3042 0x34800, 0x34848,
3043 0x34850, 0x348a8,
3044 0x348b8, 0x348c0,
3045 0x348cc, 0x348dc,
3046 0x34900, 0x34904,
3047 0x3490c, 0x34914,
3048 0x3491c, 0x34928,
3049 0x34930, 0x3493c,
3050 0x34944, 0x34948,
3051 0x34954, 0x34974,
3052 0x3497c, 0x34980,
3053 0x34a00, 0x34a20,
3054 0x34a38, 0x34a3c,
3055 0x34a50, 0x34a50,
3056 0x34a80, 0x34a80,
3057 0x34a88, 0x34aa8,
3058 0x34ab0, 0x34ab4,
3059 0x34ac8, 0x34ad4,
3060 0x34b28, 0x34b84,
3061 0x34b98, 0x34bb8,
3062 0x34c98, 0x34d14,
3063 0x35000, 0x35020,
3064 0x35038, 0x3503c,
3065 0x35050, 0x35050,
3066 0x35080, 0x35080,
3067 0x35088, 0x350a8,
3068 0x350b0, 0x350b4,
3069 0x350c8, 0x350d4,
3070 0x35128, 0x35184,
3071 0x35198, 0x351b8,
3072 0x36000, 0x36038,
3073 0x36100, 0x3617c,
3074 0x36190, 0x361a0,
3075 0x361a8, 0x361b8,
3076 0x361c4, 0x361c8,
3077 0x361d0, 0x361e0,
3078 0x36200, 0x36344,
3079 0x36400, 0x364b4,
3080 0x364c0, 0x3652c,
3081 0x36540, 0x3665c,
3082 0x36800, 0x36848,
3083 0x36850, 0x368a8,
3084 0x368b8, 0x368c0,
3085 0x368cc, 0x368dc,
3086 0x36900, 0x36904,
3087 0x3690c, 0x36914,
3088 0x3691c, 0x36928,
3089 0x36930, 0x3693c,
3090 0x36944, 0x36948,
3091 0x36954, 0x36974,
3092 0x3697c, 0x36980,
3093 0x36a00, 0x36a20,
3094 0x36a38, 0x36a3c,
3095 0x36a50, 0x36a50,
3096 0x36a80, 0x36a80,
3097 0x36a88, 0x36aa8,
3098 0x36ab0, 0x36ab4,
3099 0x36ac8, 0x36ad4,
3100 0x36b28, 0x36b84,
3101 0x36b98, 0x36bb8,
3102 0x36c98, 0x36d14,
3103 0x37000, 0x37020,
3104 0x37038, 0x3703c,
3105 0x37050, 0x37050,
3106 0x37080, 0x37080,
3107 0x37088, 0x370a8,
3108 0x370b0, 0x370b4,
3109 0x370c8, 0x370d4,
3110 0x37128, 0x37184,
3111 0x37198, 0x371b8,
3112 0x38000, 0x380b0,
3113 0x380b8, 0x38130,
3114 0x38140, 0x38140,
3115 0x38150, 0x38154,
3116 0x38160, 0x381c4,
3117 0x381d0, 0x38204,
3118 0x3820c, 0x38214,
3119 0x3821c, 0x3822c,
3120 0x38244, 0x38244,
3121 0x38254, 0x38274,
3122 0x3827c, 0x38280,
3123 0x38300, 0x38304,
3124 0x3830c, 0x38314,
3125 0x3831c, 0x3832c,
3126 0x38344, 0x38344,
3127 0x38354, 0x38374,
3128 0x3837c, 0x38380,
3129 0x38400, 0x38424,
3130 0x38438, 0x3843c,
3131 0x38480, 0x38480,
3132 0x384a8, 0x384a8,
3133 0x384b0, 0x384b4,
3134 0x384c8, 0x38514,
3135 0x38600, 0x3860c,
3136 0x3861c, 0x38624,
3137 0x38900, 0x38924,
3138 0x38938, 0x3893c,
3139 0x38980, 0x38980,
3140 0x389a8, 0x389a8,
3141 0x389b0, 0x389b4,
3142 0x389c8, 0x38a14,
3143 0x38b00, 0x38b0c,
3144 0x38b1c, 0x38b24,
3145 0x38e00, 0x38e00,
3146 0x38e18, 0x38e20,
3147 0x38e38, 0x38e40,
3148 0x38e58, 0x38e60,
3149 0x38e78, 0x38e80,
3150 0x38e98, 0x38ea0,
3151 0x38eb8, 0x38ec0,
3152 0x38ed8, 0x38ee0,
3153 0x38ef8, 0x38f08,
3154 0x38f10, 0x38f2c,
3155 0x38f80, 0x38ffc,
3156 0x39080, 0x39080,
3157 0x39088, 0x39090,
3158 0x39100, 0x39108,
3159 0x39120, 0x39128,
3160 0x39140, 0x39148,
3161 0x39160, 0x39168,
3162 0x39180, 0x39188,
3163 0x391a0, 0x391a8,
3164 0x391c0, 0x391c8,
3165 0x391e0, 0x391e8,
3166 0x39200, 0x39200,
3167 0x39208, 0x39240,
3168 0x39300, 0x39300,
3169 0x39308, 0x39340,
3170 0x39400, 0x39400,
3171 0x39408, 0x39440,
3172 0x39500, 0x39500,
3173 0x39508, 0x39540,
3174 0x39600, 0x39600,
3175 0x39608, 0x39640,
3176 0x39700, 0x39700,
3177 0x39708, 0x39740,
3178 0x39800, 0x39800,
3179 0x39808, 0x39840,
3180 0x39900, 0x39900,
3181 0x39908, 0x39940,
3182 0x39a00, 0x39a04,
3183 0x39a10, 0x39a14,
3184 0x39a1c, 0x39aa8,
3185 0x39b00, 0x39ecc,
3186 0x3a000, 0x3a004,
3187 0x3a050, 0x3a084,
3188 0x3a090, 0x3a09c,
3189 0x3a93c, 0x3a93c,
3190 0x3b93c, 0x3b93c,
3191 0x3c93c, 0x3c93c,
3192 0x3d93c, 0x3d93c,
3193 0x3e000, 0x3e020,
3194 0x3e03c, 0x3e05c,
3195 0x3e100, 0x3e120,
3196 0x3e13c, 0x3e15c,
3197 0x3e200, 0x3e220,
3198 0x3e23c, 0x3e25c,
3199 0x3e300, 0x3e320,
3200 0x3e33c, 0x3e35c,
3201 0x3f000, 0x3f034,
3202 0x3f100, 0x3f130,
3203 0x3f200, 0x3f218,
3204 0x44000, 0x44014,
3205 0x44020, 0x44028,
3206 0x44030, 0x44030,
3207 0x44100, 0x44114,
3208 0x44120, 0x44128,
3209 0x44130, 0x44130,
3210 0x44200, 0x44214,
3211 0x44220, 0x44228,
3212 0x44230, 0x44230,
3213 0x44300, 0x44314,
3214 0x44320, 0x44328,
3215 0x44330, 0x44330,
3216 0x44400, 0x44414,
3217 0x44420, 0x44428,
3218 0x44430, 0x44430,
3219 0x44500, 0x44514,
3220 0x44520, 0x44528,
3221 0x44530, 0x44530,
3222 0x44714, 0x44718,
3223 0x44730, 0x44730,
3224 0x447c0, 0x447c0,
3225 0x447f0, 0x447f0,
3226 0x447f8, 0x447fc,
3227 0x45000, 0x45014,
3228 0x45020, 0x45028,
3229 0x45030, 0x45030,
3230 0x45100, 0x45114,
3231 0x45120, 0x45128,
3232 0x45130, 0x45130,
3233 0x45200, 0x45214,
3234 0x45220, 0x45228,
3235 0x45230, 0x45230,
3236 0x45300, 0x45314,
3237 0x45320, 0x45328,
3238 0x45330, 0x45330,
3239 0x45400, 0x45414,
3240 0x45420, 0x45428,
3241 0x45430, 0x45430,
3242 0x45500, 0x45514,
3243 0x45520, 0x45528,
3244 0x45530, 0x45530,
3245 0x45714, 0x45718,
3246 0x45730, 0x45730,
3247 0x457c0, 0x457c0,
3248 0x457f0, 0x457f0,
3249 0x457f8, 0x457fc,
3250 0x46000, 0x46010,
3251 0x46020, 0x46034,
3252 0x46040, 0x46050,
3253 0x46060, 0x46088,
3254 0x47000, 0x4709c,
3255 0x470c0, 0x470d4,
3256 0x47100, 0x471a8,
3257 0x471b0, 0x471e8,
3258 0x47200, 0x47210,
3259 0x4721c, 0x47230,
3260 0x47238, 0x47238,
3261 0x47240, 0x472ac,
3262 0x472d0, 0x472f4,
3263 0x47300, 0x47310,
3264 0x47318, 0x47348,
3265 0x47350, 0x47354,
3266 0x47380, 0x47388,
3267 0x47390, 0x47394,
3268 0x47400, 0x47448,
3269 0x47450, 0x47458,
3270 0x47500, 0x4751c,
3271 0x47530, 0x4754c,
3272 0x47560, 0x4757c,
3273 0x47590, 0x475ac,
3274 0x47600, 0x47630,
3275 0x47640, 0x47644,
3276 0x47660, 0x4769c,
3277 0x47700, 0x47710,
3278 0x47740, 0x47750,
3279 0x4775c, 0x4779c,
3280 0x477b0, 0x477bc,
3281 0x477c4, 0x477c8,
3282 0x477d4, 0x477fc,
3283 0x48000, 0x48004,
3284 0x48018, 0x4801c,
3285 0x49304, 0x493f0,
3286 0x49400, 0x49410,
3287 0x49460, 0x494f4,
3288 0x50000, 0x50084,
3289 0x50090, 0x500cc,
3290 0x50300, 0x50384,
3291 0x50400, 0x50404,
3292 0x50800, 0x50884,
3293 0x50890, 0x508cc,
3294 0x50b00, 0x50b84,
3295 0x50c00, 0x50c04,
3296 0x51000, 0x51020,
3297 0x51028, 0x510c4,
3298 0x51104, 0x51108,
3299 0x51200, 0x51274,
3300 0x51300, 0x51324,
3301 0x51400, 0x51548,
3302 0x51550, 0x51554,
3303 0x5155c, 0x51584,
3304 0x5158c, 0x515c8,
3305 0x515f0, 0x515f4,
3306 0x58000, 0x58004,
3307 0x58018, 0x5801c,
3308 0x59304, 0x593f0,
3309 0x59400, 0x59410,
3310 0x59460, 0x594f4,
3311 };
3312
3313 u32 *buf_end = (u32 *)(buf + buf_size);
3314 const unsigned int *reg_ranges;
3315 int reg_ranges_size, range;
3316 unsigned int chip_version = chip_id(adap);
3317
3318 /*
3319 * Select the right set of register ranges to dump depending on the
3320 * adapter chip type.
3321 */
3322 switch (chip_version) {
3323 case CHELSIO_T4:
3324 if (adap->flags & IS_VF) {
3325 reg_ranges = t4vf_reg_ranges;
3326 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
3327 } else {
3328 reg_ranges = t4_reg_ranges;
3329 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
3330 }
3331 break;
3332
3333 case CHELSIO_T5:
3334 if (adap->flags & IS_VF) {
3335 reg_ranges = t5vf_reg_ranges;
3336 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
3337 } else {
3338 reg_ranges = t5_reg_ranges;
3339 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
3340 }
3341 break;
3342
3343 case CHELSIO_T6:
3344 if (adap->flags & IS_VF) {
3345 reg_ranges = t6vf_reg_ranges;
3346 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
3347 } else {
3348 reg_ranges = t6_reg_ranges;
3349 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
3350 }
3351 break;
3352
3353 case CHELSIO_T7:
3354 if (adap->flags & IS_VF) {
3355 reg_ranges = t6vf_reg_ranges;
3356 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
3357 } else {
3358 reg_ranges = t7_reg_ranges;
3359 reg_ranges_size = ARRAY_SIZE(t7_reg_ranges);
3360 }
3361 break;
3362
3363 default:
3364 CH_ERR(adap,
3365 "Unsupported chip version %d\n", chip_version);
3366 return;
3367 }
3368
3369 /*
3370 * Clear the register buffer and insert the appropriate register
3371 * values selected by the above register ranges.
3372 */
3373 memset(buf, 0, buf_size);
3374 for (range = 0; range < reg_ranges_size; range += 2) {
3375 unsigned int reg = reg_ranges[range];
3376 unsigned int last_reg = reg_ranges[range + 1];
3377 u32 *bufp = (u32 *)(buf + reg);
3378
3379 /*
3380 * Iterate across the register range filling in the register
3381 * buffer but don't write past the end of the register buffer.
3382 */
3383 while (reg <= last_reg && bufp < buf_end) {
3384 *bufp++ = t4_read_reg(adap, reg);
3385 reg += sizeof(u32);
3386 }
3387 }
3388 }
3389
3390 /*
3391 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID
3392 * header followed by one or more VPD-R sections, each with its own header.
3393 */
3394 struct t4_vpd_hdr {
3395 u8 id_tag;
3396 u8 id_len[2];
3397 u8 id_data[ID_LEN];
3398 };
3399
3400 struct t4_vpdr_hdr {
3401 u8 vpdr_tag;
3402 u8 vpdr_len[2];
3403 };
3404
3405 /*
3406 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
3407 */
3408 #define EEPROM_DELAY 10 /* 10us per poll spin */
3409 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
3410
3411 #define EEPROM_STAT_ADDR 0x7bfc
3412 #define VPD_SIZE 0x800
3413 #define VPD_BASE 0x400
3414 #define VPD_BASE_OLD 0
3415 #define VPD_LEN 1024
3416 #define VPD_INFO_FLD_HDR_SIZE 3
3417 #define CHELSIO_VPD_UNIQUE_ID 0x82
3418
3419 /*
3420 * Small utility function to wait till any outstanding VPD Access is complete.
3421 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
3422 * VPD Access in flight. This allows us to handle the problem of having a
3423 * previous VPD Access time out and prevent an attempt to inject a new VPD
3424 * Request before any in-flight VPD reguest has completed.
3425 */
t4_seeprom_wait(struct adapter * adapter)3426 static int t4_seeprom_wait(struct adapter *adapter)
3427 {
3428 unsigned int base = adapter->params.pci.vpd_cap_addr;
3429 int max_poll;
3430
3431 /*
3432 * If no VPD Access is in flight, we can just return success right
3433 * away.
3434 */
3435 if (!adapter->vpd_busy)
3436 return 0;
3437
3438 /*
3439 * Poll the VPD Capability Address/Flag register waiting for it
3440 * to indicate that the operation is complete.
3441 */
3442 max_poll = EEPROM_MAX_POLL;
3443 do {
3444 u16 val;
3445
3446 udelay(EEPROM_DELAY);
3447 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
3448
3449 /*
3450 * If the operation is complete, mark the VPD as no longer
3451 * busy and return success.
3452 */
3453 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
3454 adapter->vpd_busy = 0;
3455 return 0;
3456 }
3457 } while (--max_poll);
3458
3459 /*
3460 * Failure! Note that we leave the VPD Busy status set in order to
3461 * avoid pushing a new VPD Access request into the VPD Capability till
3462 * the current operation eventually succeeds. It's a bug to issue a
3463 * new request when an existing request is in flight and will result
3464 * in corrupt hardware state.
3465 */
3466 return -ETIMEDOUT;
3467 }
3468
3469 /**
3470 * t4_seeprom_read - read a serial EEPROM location
3471 * @adapter: adapter to read
3472 * @addr: EEPROM virtual address
3473 * @data: where to store the read data
3474 *
3475 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
3476 * VPD capability. Note that this function must be called with a virtual
3477 * address.
3478 */
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)3479 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3480 {
3481 unsigned int base = adapter->params.pci.vpd_cap_addr;
3482 int ret;
3483
3484 /*
3485 * VPD Accesses must alway be 4-byte aligned!
3486 */
3487 if (addr >= EEPROMVSIZE || (addr & 3))
3488 return -EINVAL;
3489
3490 /*
3491 * Wait for any previous operation which may still be in flight to
3492 * complete.
3493 */
3494 ret = t4_seeprom_wait(adapter);
3495 if (ret) {
3496 CH_ERR(adapter, "VPD still busy from previous operation\n");
3497 return ret;
3498 }
3499
3500 /*
3501 * Issue our new VPD Read request, mark the VPD as being busy and wait
3502 * for our request to complete. If it doesn't complete, note the
3503 * error and return it to our caller. Note that we do not reset the
3504 * VPD Busy status!
3505 */
3506 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3507 adapter->vpd_busy = 1;
3508 adapter->vpd_flag = PCI_VPD_ADDR_F;
3509 ret = t4_seeprom_wait(adapter);
3510 if (ret) {
3511 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3512 return ret;
3513 }
3514
3515 /*
3516 * Grab the returned data, swizzle it into our endianness and
3517 * return success.
3518 */
3519 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3520 *data = le32_to_cpu(*data);
3521 return 0;
3522 }
3523
3524 /**
3525 * t4_seeprom_write - write a serial EEPROM location
3526 * @adapter: adapter to write
3527 * @addr: virtual EEPROM address
3528 * @data: value to write
3529 *
3530 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
3531 * VPD capability. Note that this function must be called with a virtual
3532 * address.
3533 */
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)3534 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3535 {
3536 unsigned int base = adapter->params.pci.vpd_cap_addr;
3537 int ret;
3538 u32 stats_reg;
3539 int max_poll;
3540
3541 /*
3542 * VPD Accesses must alway be 4-byte aligned!
3543 */
3544 if (addr >= EEPROMVSIZE || (addr & 3))
3545 return -EINVAL;
3546
3547 /*
3548 * Wait for any previous operation which may still be in flight to
3549 * complete.
3550 */
3551 ret = t4_seeprom_wait(adapter);
3552 if (ret) {
3553 CH_ERR(adapter, "VPD still busy from previous operation\n");
3554 return ret;
3555 }
3556
3557 /*
3558 * Issue our new VPD Read request, mark the VPD as being busy and wait
3559 * for our request to complete. If it doesn't complete, note the
3560 * error and return it to our caller. Note that we do not reset the
3561 * VPD Busy status!
3562 */
3563 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3564 cpu_to_le32(data));
3565 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3566 (u16)addr | PCI_VPD_ADDR_F);
3567 adapter->vpd_busy = 1;
3568 adapter->vpd_flag = 0;
3569 ret = t4_seeprom_wait(adapter);
3570 if (ret) {
3571 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3572 return ret;
3573 }
3574
3575 /*
3576 * Reset PCI_VPD_DATA register after a transaction and wait for our
3577 * request to complete. If it doesn't complete, return error.
3578 */
3579 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3580 max_poll = EEPROM_MAX_POLL;
3581 do {
3582 udelay(EEPROM_DELAY);
3583 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3584 } while ((stats_reg & 0x1) && --max_poll);
3585 if (!max_poll)
3586 return -ETIMEDOUT;
3587
3588 /* Return success! */
3589 return 0;
3590 }
3591
3592 /**
3593 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
3594 * @phys_addr: the physical EEPROM address
3595 * @fn: the PCI function number
3596 * @sz: size of function-specific area
3597 *
3598 * Translate a physical EEPROM address to virtual. The first 1K is
3599 * accessed through virtual addresses starting at 31K, the rest is
3600 * accessed through virtual addresses starting at 0.
3601 *
3602 * The mapping is as follows:
3603 * [0..1K) -> [31K..32K)
3604 * [1K..1K+A) -> [ES-A..ES)
3605 * [1K+A..ES) -> [0..ES-A-1K)
3606 *
3607 * where A = @fn * @sz, and ES = EEPROM size.
3608 */
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)3609 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3610 {
3611 fn *= sz;
3612 if (phys_addr < 1024)
3613 return phys_addr + (31 << 10);
3614 if (phys_addr < 1024 + fn)
3615 return EEPROMSIZE - fn + phys_addr - 1024;
3616 if (phys_addr < EEPROMSIZE)
3617 return phys_addr - 1024 - fn;
3618 return -EINVAL;
3619 }
3620
3621 /**
3622 * t4_seeprom_wp - enable/disable EEPROM write protection
3623 * @adapter: the adapter
3624 * @enable: whether to enable or disable write protection
3625 *
3626 * Enables or disables write protection on the serial EEPROM.
3627 */
t4_seeprom_wp(struct adapter * adapter,int enable)3628 int t4_seeprom_wp(struct adapter *adapter, int enable)
3629 {
3630 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3631 }
3632
3633 /**
3634 * get_vpd_keyword_val - Locates an information field keyword in the VPD
3635 * @vpd: Pointer to buffered vpd data structure
3636 * @kw: The keyword to search for
3637 * @region: VPD region to search (starting from 0)
3638 *
3639 * Returns the value of the information field keyword or
3640 * -ENOENT otherwise.
3641 */
get_vpd_keyword_val(const u8 * vpd,const char * kw,int region)3642 static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
3643 {
3644 int i, tag;
3645 unsigned int offset, len;
3646 const struct t4_vpdr_hdr *vpdr;
3647
3648 offset = sizeof(struct t4_vpd_hdr);
3649 vpdr = (const void *)(vpd + offset);
3650 tag = vpdr->vpdr_tag;
3651 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
3652 while (region--) {
3653 offset += sizeof(struct t4_vpdr_hdr) + len;
3654 vpdr = (const void *)(vpd + offset);
3655 if (++tag != vpdr->vpdr_tag)
3656 return -ENOENT;
3657 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
3658 }
3659 offset += sizeof(struct t4_vpdr_hdr);
3660
3661 if (offset + len > VPD_LEN) {
3662 return -ENOENT;
3663 }
3664
3665 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3666 if (memcmp(vpd + i , kw , 2) == 0){
3667 i += VPD_INFO_FLD_HDR_SIZE;
3668 return i;
3669 }
3670
3671 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
3672 }
3673
3674 return -ENOENT;
3675 }
3676
3677
3678 /**
3679 * get_vpd_params - read VPD parameters from VPD EEPROM
3680 * @adapter: adapter to read
3681 * @p: where to store the parameters
3682 * @vpd: caller provided temporary space to read the VPD into
3683 *
3684 * Reads card parameters stored in VPD EEPROM.
3685 */
get_vpd_params(struct adapter * adapter,struct vpd_params * p,uint16_t device_id,u32 * buf)3686 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
3687 uint16_t device_id, u32 *buf)
3688 {
3689 int i, ret, addr;
3690 int ec, sn, pn, na, md;
3691 u8 csum;
3692 const u8 *vpd = (const u8 *)buf;
3693
3694 /*
3695 * Card information normally starts at VPD_BASE but early cards had
3696 * it at 0.
3697 */
3698 ret = t4_seeprom_read(adapter, VPD_BASE, buf);
3699 if (ret)
3700 return (ret);
3701
3702 /*
3703 * The VPD shall have a unique identifier specified by the PCI SIG.
3704 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3705 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3706 * is expected to automatically put this entry at the
3707 * beginning of the VPD.
3708 */
3709 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3710
3711 for (i = 0; i < VPD_LEN; i += 4) {
3712 ret = t4_seeprom_read(adapter, addr + i, buf++);
3713 if (ret)
3714 return ret;
3715 }
3716
3717 #define FIND_VPD_KW(var,name) do { \
3718 var = get_vpd_keyword_val(vpd, name, 0); \
3719 if (var < 0) { \
3720 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3721 return -EINVAL; \
3722 } \
3723 } while (0)
3724
3725 FIND_VPD_KW(i, "RV");
3726 for (csum = 0; i >= 0; i--)
3727 csum += vpd[i];
3728
3729 if (csum) {
3730 CH_ERR(adapter,
3731 "corrupted VPD EEPROM, actual csum %u\n", csum);
3732 return -EINVAL;
3733 }
3734
3735 FIND_VPD_KW(ec, "EC");
3736 FIND_VPD_KW(sn, "SN");
3737 FIND_VPD_KW(pn, "PN");
3738 FIND_VPD_KW(na, "NA");
3739 #undef FIND_VPD_KW
3740
3741 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3742 strstrip(p->id);
3743 memcpy(p->ec, vpd + ec, EC_LEN);
3744 strstrip(p->ec);
3745 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3746 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3747 strstrip(p->sn);
3748 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3749 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3750 strstrip((char *)p->pn);
3751 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3752 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3753 strstrip((char *)p->na);
3754
3755 if (device_id & 0x80)
3756 return 0; /* Custom card */
3757
3758 md = get_vpd_keyword_val(vpd, "VF", 1);
3759 if (md < 0) {
3760 snprintf(p->md, sizeof(p->md), "unknown");
3761 } else {
3762 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3763 memcpy(p->md, vpd + md, min(i, MD_LEN));
3764 strstrip((char *)p->md);
3765 }
3766
3767 return 0;
3768 }
3769
3770 /* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */
3771 static const struct t4_flash_loc_entry t4_flash_loc_arr[] = {
3772 [FLASH_LOC_EXP_ROM] = { 0, 6 },
3773 [FLASH_LOC_IBFT] = { 6, 1 },
3774 [FLASH_LOC_BOOTCFG] = { 7, 1 },
3775 [FLASH_LOC_FW] = { 8, 16 },
3776 [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 },
3777 [FLASH_LOC_ISCSI_CRASH] = { 29, 1 },
3778 [FLASH_LOC_FCOE_CRASH] = { 30, 1 },
3779 [FLASH_LOC_CFG] = { 31, 1 },
3780 [FLASH_LOC_CUDBG] = { 32, 32 },
3781 [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */
3782 [FLASH_LOC_END] = { 64, 0 },
3783 };
3784
3785 /* Flash Layout {start sector, # of sectors} for T7 adapters */
3786 static const struct t4_flash_loc_entry t7_flash_loc_arr[] = {
3787 [FLASH_LOC_VPD] = { 0, 1 },
3788 [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 },
3789 [FLASH_LOC_FW] = { 2, 29 },
3790 [FLASH_LOC_CFG] = { 31, 1 },
3791 [FLASH_LOC_EXP_ROM] = { 32, 15 },
3792 [FLASH_LOC_IBFT] = { 47, 1 },
3793 [FLASH_LOC_BOOTCFG] = { 48, 1 },
3794 [FLASH_LOC_DPU_BOOT] = { 49, 13 },
3795 [FLASH_LOC_ISCSI_CRASH] = { 62, 1 },
3796 [FLASH_LOC_FCOE_CRASH] = { 63, 1 },
3797 [FLASH_LOC_VPD_BACKUP] = { 64, 1 },
3798 [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 },
3799 [FLASH_LOC_FW_BACKUP] = { 66, 29 },
3800 [FLASH_LOC_CFG_BACK] = { 95, 1 },
3801 [FLASH_LOC_CUDBG] = { 96, 48 },
3802 [FLASH_LOC_CHIP_DUMP] = { 144, 48 },
3803 [FLASH_LOC_DPU_AREA] = { 192, 64 },
3804 [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */
3805 [FLASH_LOC_END] = { 256, 0 },
3806 };
3807
3808 int
t4_flash_loc_start(struct adapter * adap,enum t4_flash_loc loc,unsigned int * lenp)3809 t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
3810 unsigned int *lenp)
3811 {
3812 const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ?
3813 &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc];
3814
3815 if (lenp != NULL)
3816 *lenp = FLASH_MAX_SIZE(l->nsecs);
3817 return (FLASH_START(l->start_sec));
3818 }
3819
3820 /* serial flash and firmware constants and flash config file constants */
3821 enum {
3822 SF_ATTEMPTS = 10, /* max retries for SF operations */
3823
3824 /* flash command opcodes */
3825 SF_PROG_PAGE = 2, /* program 256B page */
3826 SF_WR_DISABLE = 4, /* disable writes */
3827 SF_RD_STATUS = 5, /* read status register */
3828 SF_WR_ENABLE = 6, /* enable writes */
3829 SF_RD_DATA_FAST = 0xb, /* read flash */
3830 SF_RD_ID = 0x9f, /* read ID */
3831 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */
3832 };
3833
3834 /**
3835 * sf1_read - read data from the serial flash
3836 * @adapter: the adapter
3837 * @byte_cnt: number of bytes to read
3838 * @cont: whether another operation will be chained
3839 * @lock: whether to lock SF for PL access only
3840 * @valp: where to store the read data
3841 *
3842 * Reads up to 4 bytes of data from the serial flash. The location of
3843 * the read needs to be specified prior to calling this by issuing the
3844 * appropriate commands to the serial flash.
3845 */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)3846 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3847 int lock, u32 *valp)
3848 {
3849 int ret;
3850 uint32_t op;
3851
3852 if (!byte_cnt || byte_cnt > 4)
3853 return -EINVAL;
3854 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3855 return -EBUSY;
3856 op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1);
3857 if (chip_id(adapter) >= CHELSIO_T7)
3858 op |= F_QUADREADDISABLE;
3859 t4_write_reg(adapter, A_SF_OP, op);
3860 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3861 if (!ret)
3862 *valp = t4_read_reg(adapter, A_SF_DATA);
3863 return ret;
3864 }
3865
3866 /**
3867 * sf1_write - write data to the serial flash
3868 * @adapter: the adapter
3869 * @byte_cnt: number of bytes to write
3870 * @cont: whether another operation will be chained
3871 * @lock: whether to lock SF for PL access only
3872 * @val: value to write
3873 *
3874 * Writes up to 4 bytes of data to the serial flash. The location of
3875 * the write needs to be specified prior to calling this by issuing the
3876 * appropriate commands to the serial flash.
3877 */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)3878 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3879 int lock, u32 val)
3880 {
3881 if (!byte_cnt || byte_cnt > 4)
3882 return -EINVAL;
3883 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3884 return -EBUSY;
3885 t4_write_reg(adapter, A_SF_DATA, val);
3886 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3887 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3888 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3889 }
3890
3891 /**
3892 * flash_wait_op - wait for a flash operation to complete
3893 * @adapter: the adapter
3894 * @attempts: max number of polls of the status register
3895 * @delay: delay between polls in ms
3896 *
3897 * Wait for a flash operation to complete by polling the status register.
3898 */
flash_wait_op(struct adapter * adapter,int attempts,int delay)3899 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3900 {
3901 int ret;
3902 u32 status;
3903
3904 while (1) {
3905 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3906 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3907 return ret;
3908 if (!(status & 1))
3909 return 0;
3910 if (--attempts == 0)
3911 return -EAGAIN;
3912 if (delay)
3913 msleep(delay);
3914 }
3915 }
3916
3917 /**
3918 * t4_read_flash - read words from serial flash
3919 * @adapter: the adapter
3920 * @addr: the start address for the read
3921 * @nwords: how many 32-bit words to read
3922 * @data: where to store the read data
3923 * @byte_oriented: whether to store data as bytes or as words
3924 *
3925 * Read the specified number of 32-bit words from the serial flash.
3926 * If @byte_oriented is set the read data is stored as a byte array
3927 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3928 * natural endianness.
3929 */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)3930 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3931 unsigned int nwords, u32 *data, int byte_oriented)
3932 {
3933 int ret;
3934
3935 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3936 return -EINVAL;
3937
3938 addr = swab32(addr) | SF_RD_DATA_FAST;
3939
3940 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3941 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3942 return ret;
3943
3944 for ( ; nwords; nwords--, data++) {
3945 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3946 if (nwords == 1)
3947 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3948 if (ret)
3949 return ret;
3950 if (byte_oriented)
3951 *data = (__force __u32)(cpu_to_be32(*data));
3952 }
3953 return 0;
3954 }
3955
3956 /**
3957 * t4_write_flash - write up to a page of data to the serial flash
3958 * @adapter: the adapter
3959 * @addr: the start address to write
3960 * @n: length of data to write in bytes
3961 * @data: the data to write
3962 * @byte_oriented: whether to store data as bytes or as words
3963 *
3964 * Writes up to a page of data (256 bytes) to the serial flash starting
3965 * at the given address. All the data must be written to the same page.
3966 * If @byte_oriented is set the write data is stored as byte stream
3967 * (i.e. matches what on disk), otherwise in big-endian.
3968 */
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data,int byte_oriented)3969 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3970 unsigned int n, const u8 *data, int byte_oriented)
3971 {
3972 int ret;
3973 u32 buf[SF_PAGE_SIZE / 4];
3974 unsigned int i, c, left, val, offset = addr & 0xff;
3975
3976 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3977 return -EINVAL;
3978
3979 val = swab32(addr) | SF_PROG_PAGE;
3980
3981 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3982 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3983 goto unlock;
3984
3985 for (left = n; left; left -= c) {
3986 c = min(left, 4U);
3987 for (val = 0, i = 0; i < c; ++i)
3988 val = (val << 8) + *data++;
3989
3990 if (!byte_oriented)
3991 val = cpu_to_be32(val);
3992
3993 ret = sf1_write(adapter, c, c != left, 1, val);
3994 if (ret)
3995 goto unlock;
3996 }
3997 ret = flash_wait_op(adapter, 8, 1);
3998 if (ret)
3999 goto unlock;
4000
4001 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4002
4003 /* Read the page to verify the write succeeded */
4004 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
4005 byte_oriented);
4006 if (ret)
4007 return ret;
4008
4009 if (memcmp(data - n, (u8 *)buf + offset, n)) {
4010 CH_ERR(adapter,
4011 "failed to correctly write the flash page at %#x\n",
4012 addr);
4013 return -EIO;
4014 }
4015 return 0;
4016
4017 unlock:
4018 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4019 return ret;
4020 }
4021
4022 /**
4023 * t4_get_fw_version - read the firmware version
4024 * @adapter: the adapter
4025 * @vers: where to place the version
4026 *
4027 * Reads the FW version from flash.
4028 */
t4_get_fw_version(struct adapter * adapter,u32 * vers)4029 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
4030 {
4031 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
4032
4033 return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
4034 1, vers, 0);
4035 }
4036
4037 /**
4038 * t4_get_fw_hdr - read the firmware header
4039 * @adapter: the adapter
4040 * @hdr: where to place the version
4041 *
4042 * Reads the FW header from flash into caller provided buffer.
4043 */
t4_get_fw_hdr(struct adapter * adapter,struct fw_hdr * hdr)4044 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
4045 {
4046 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
4047
4048 return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t),
4049 (uint32_t *)hdr, 1);
4050 }
4051
4052 /**
4053 * t4_get_bs_version - read the firmware bootstrap version
4054 * @adapter: the adapter
4055 * @vers: where to place the version
4056 *
4057 * Reads the FW Bootstrap version from flash.
4058 */
t4_get_bs_version(struct adapter * adapter,u32 * vers)4059 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
4060 {
4061 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP,
4062 NULL);
4063
4064 return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
4065 1, vers, 0);
4066 }
4067
4068 /**
4069 * t4_get_tp_version - read the TP microcode version
4070 * @adapter: the adapter
4071 * @vers: where to place the version
4072 *
4073 * Reads the TP microcode version from flash.
4074 */
t4_get_tp_version(struct adapter * adapter,u32 * vers)4075 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
4076 {
4077 const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
4078
4079 return t4_read_flash(adapter, start +
4080 offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0);
4081 }
4082
4083 /**
4084 * t4_get_exprom_version - return the Expansion ROM version (if any)
4085 * @adapter: the adapter
4086 * @vers: where to place the version
4087 *
4088 * Reads the Expansion ROM header from FLASH and returns the version
4089 * number (if present) through the @vers return value pointer. We return
4090 * this in the Firmware Version Format since it's convenient. Return
4091 * 0 on success, -ENOENT if no Expansion ROM is present.
4092 */
t4_get_exprom_version(struct adapter * adapter,u32 * vers)4093 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
4094 {
4095 struct exprom_header {
4096 unsigned char hdr_arr[16]; /* must start with 0x55aa */
4097 unsigned char hdr_ver[4]; /* Expansion ROM version */
4098 } *hdr;
4099 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
4100 sizeof(u32))];
4101 int ret;
4102 const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL);
4103
4104 ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf),
4105 exprom_header_buf, 0);
4106 if (ret)
4107 return ret;
4108
4109 hdr = (struct exprom_header *)exprom_header_buf;
4110 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
4111 return -ENOENT;
4112
4113 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
4114 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
4115 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
4116 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
4117 return 0;
4118 }
4119
4120 /**
4121 * t4_get_scfg_version - return the Serial Configuration version
4122 * @adapter: the adapter
4123 * @vers: where to place the version
4124 *
4125 * Reads the Serial Configuration Version via the Firmware interface
4126 * (thus this can only be called once we're ready to issue Firmware
4127 * commands). The format of the Serial Configuration version is
4128 * adapter specific. Returns 0 on success, an error on failure.
4129 *
4130 * Note that early versions of the Firmware didn't include the ability
4131 * to retrieve the Serial Configuration version, so we zero-out the
4132 * return-value parameter in that case to avoid leaving it with
4133 * garbage in it.
4134 *
4135 * Also note that the Firmware will return its cached copy of the Serial
4136 * Initialization Revision ID, not the actual Revision ID as written in
4137 * the Serial EEPROM. This is only an issue if a new VPD has been written
4138 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
4139 * it's best to defer calling this routine till after a FW_RESET_CMD has
4140 * been issued if the Host Driver will be performing a full adapter
4141 * initialization.
4142 */
t4_get_scfg_version(struct adapter * adapter,u32 * vers)4143 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
4144 {
4145 u32 scfgrev_param;
4146 int ret;
4147
4148 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4149 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
4150 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4151 1, &scfgrev_param, vers);
4152 if (ret)
4153 *vers = 0;
4154 return ret;
4155 }
4156
4157 /**
4158 * t4_get_vpd_version - return the VPD version
4159 * @adapter: the adapter
4160 * @vers: where to place the version
4161 *
4162 * Reads the VPD via the Firmware interface (thus this can only be called
4163 * once we're ready to issue Firmware commands). The format of the
4164 * VPD version is adapter specific. Returns 0 on success, an error on
4165 * failure.
4166 *
4167 * Note that early versions of the Firmware didn't include the ability
4168 * to retrieve the VPD version, so we zero-out the return-value parameter
4169 * in that case to avoid leaving it with garbage in it.
4170 *
4171 * Also note that the Firmware will return its cached copy of the VPD
4172 * Revision ID, not the actual Revision ID as written in the Serial
4173 * EEPROM. This is only an issue if a new VPD has been written and the
4174 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
4175 * to defer calling this routine till after a FW_RESET_CMD has been issued
4176 * if the Host Driver will be performing a full adapter initialization.
4177 */
t4_get_vpd_version(struct adapter * adapter,u32 * vers)4178 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
4179 {
4180 u32 vpdrev_param;
4181 int ret;
4182
4183 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4184 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
4185 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4186 1, &vpdrev_param, vers);
4187 if (ret)
4188 *vers = 0;
4189 return ret;
4190 }
4191
4192 /**
4193 * t4_get_version_info - extract various chip/firmware version information
4194 * @adapter: the adapter
4195 *
4196 * Reads various chip/firmware version numbers and stores them into the
4197 * adapter Adapter Parameters structure. If any of the efforts fails
4198 * the first failure will be returned, but all of the version numbers
4199 * will be read.
4200 */
t4_get_version_info(struct adapter * adapter)4201 int t4_get_version_info(struct adapter *adapter)
4202 {
4203 int ret = 0;
4204
4205 #define FIRST_RET(__getvinfo) \
4206 do { \
4207 int __ret = __getvinfo; \
4208 if (__ret && !ret) \
4209 ret = __ret; \
4210 } while (0)
4211
4212 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
4213 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
4214 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
4215 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
4216 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
4217 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
4218
4219 #undef FIRST_RET
4220
4221 return ret;
4222 }
4223
4224 /**
4225 * t4_flash_erase_sectors - erase a range of flash sectors
4226 * @adapter: the adapter
4227 * @start: the first sector to erase
4228 * @end: the last sector to erase
4229 *
4230 * Erases the sectors in the given inclusive range.
4231 */
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)4232 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4233 {
4234 int ret = 0;
4235
4236 if (end >= adapter->params.sf_nsec)
4237 return -EINVAL;
4238
4239 while (start <= end) {
4240 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4241 (ret = sf1_write(adapter, 4, 0, 1,
4242 SF_ERASE_SECTOR | (start << 8))) != 0 ||
4243 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4244 CH_ERR(adapter,
4245 "erase of flash sector %d failed, error %d\n",
4246 start, ret);
4247 break;
4248 }
4249 start++;
4250 }
4251 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4252 return ret;
4253 }
4254
4255 /**
4256 * t4_flash_cfg_addr - return the address of the flash configuration file
4257 * @adapter: the adapter
4258 *
4259 * Return the address within the flash where the Firmware Configuration
4260 * File is stored, or an error if the device FLASH is too small to contain
4261 * a Firmware Configuration File.
4262 */
t4_flash_cfg_addr(struct adapter * adapter,unsigned int * lenp)4263 int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp)
4264 {
4265 unsigned int len = 0;
4266 const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len);
4267
4268 /*
4269 * If the device FLASH isn't large enough to hold a Firmware
4270 * Configuration File, return an error.
4271 */
4272 if (adapter->params.sf_size < cfg_start + len)
4273 return -ENOSPC;
4274 if (lenp != NULL)
4275 *lenp = len;
4276 return (cfg_start);
4277 }
4278
4279 /*
4280 * Return TRUE if the specified firmware matches the adapter. I.e. T4
4281 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
4282 * and emit an error message for mismatched firmware to save our caller the
4283 * effort ...
4284 */
t4_fw_matches_chip(struct adapter * adap,const struct fw_hdr * hdr)4285 static int t4_fw_matches_chip(struct adapter *adap,
4286 const struct fw_hdr *hdr)
4287 {
4288 /*
4289 * The expression below will return FALSE for any unsupported adapter
4290 * which will keep us "honest" in the future ...
4291 */
4292 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
4293 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
4294 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) ||
4295 (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7))
4296 return 1;
4297
4298 CH_ERR(adap,
4299 "FW image (%d) is not suitable for this adapter (%d)\n",
4300 hdr->chip, chip_id(adap));
4301 return 0;
4302 }
4303
4304 /**
4305 * t4_load_fw - download firmware
4306 * @adap: the adapter
4307 * @fw_data: the firmware image to write
4308 * @size: image size
4309 *
4310 * Write the supplied firmware image to the card's serial flash.
4311 */
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size)4312 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
4313 {
4314 u32 csum;
4315 int ret, addr;
4316 unsigned int i;
4317 u8 first_page[SF_PAGE_SIZE];
4318 const u32 *p = (const u32 *)fw_data;
4319 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4320 unsigned int fw_start_sec;
4321 unsigned int fw_start;
4322 unsigned int fw_size;
4323 enum t4_flash_loc loc;
4324
4325 loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ?
4326 FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW;
4327 fw_start = t4_flash_loc_start(adap, loc, &fw_size);
4328 fw_start_sec = fw_start / SF_SEC_SIZE;
4329
4330 if (!size) {
4331 CH_ERR(adap, "FW image has no data\n");
4332 return -EINVAL;
4333 }
4334 if (size & 511) {
4335 CH_ERR(adap,
4336 "FW image size not multiple of 512 bytes\n");
4337 return -EINVAL;
4338 }
4339 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4340 CH_ERR(adap,
4341 "FW image size differs from size in FW header\n");
4342 return -EINVAL;
4343 }
4344 if (size > fw_size) {
4345 CH_ERR(adap, "FW image too large, max is %u bytes\n",
4346 fw_size);
4347 return -EFBIG;
4348 }
4349 if (!t4_fw_matches_chip(adap, hdr))
4350 return -EINVAL;
4351
4352 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4353 csum += be32_to_cpu(p[i]);
4354
4355 if (csum != 0xffffffff) {
4356 CH_ERR(adap,
4357 "corrupted firmware image, checksum %#x\n", csum);
4358 return -EINVAL;
4359 }
4360
4361 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
4362 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4363 if (ret)
4364 goto out;
4365
4366 /*
4367 * We write the correct version at the end so the driver can see a bad
4368 * version if the FW write fails. Start by writing a copy of the
4369 * first page with a bad version.
4370 */
4371 memcpy(first_page, fw_data, SF_PAGE_SIZE);
4372 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4373 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4374 if (ret)
4375 goto out;
4376
4377 addr = fw_start;
4378 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4379 addr += SF_PAGE_SIZE;
4380 fw_data += SF_PAGE_SIZE;
4381 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4382 if (ret)
4383 goto out;
4384 }
4385
4386 ret = t4_write_flash(adap,
4387 fw_start + offsetof(struct fw_hdr, fw_ver),
4388 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4389 out:
4390 if (ret)
4391 CH_ERR(adap, "firmware download failed, error %d\n",
4392 ret);
4393 return ret;
4394 }
4395
4396 /**
4397 * t4_fwcache - firmware cache operation
4398 * @adap: the adapter
4399 * @op : the operation (flush or flush and invalidate)
4400 */
t4_fwcache(struct adapter * adap,enum fw_params_param_dev_fwcache op)4401 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4402 {
4403 struct fw_params_cmd c;
4404
4405 memset(&c, 0, sizeof(c));
4406 c.op_to_vfn =
4407 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4408 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4409 V_FW_PARAMS_CMD_PFN(adap->pf) |
4410 V_FW_PARAMS_CMD_VFN(0));
4411 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4412 c.param[0].mnem =
4413 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4414 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4415 c.param[0].val = cpu_to_be32(op);
4416
4417 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4418 }
4419
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)4420 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4421 unsigned int *pif_req_wrptr,
4422 unsigned int *pif_rsp_wrptr)
4423 {
4424 int i, j;
4425 u32 cfg, val, req, rsp;
4426
4427 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4428 if (cfg & F_LADBGEN)
4429 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4430
4431 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4432 req = G_POLADBGWRPTR(val);
4433 rsp = G_PILADBGWRPTR(val);
4434 if (pif_req_wrptr)
4435 *pif_req_wrptr = req;
4436 if (pif_rsp_wrptr)
4437 *pif_rsp_wrptr = rsp;
4438
4439 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4440 for (j = 0; j < 6; j++) {
4441 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4442 V_PILADBGRDPTR(rsp));
4443 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4444 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4445 req++;
4446 rsp++;
4447 }
4448 req = (req + 2) & M_POLADBGRDPTR;
4449 rsp = (rsp + 2) & M_PILADBGRDPTR;
4450 }
4451 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4452 }
4453
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)4454 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4455 {
4456 u32 cfg;
4457 int i, j, idx;
4458
4459 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4460 if (cfg & F_LADBGEN)
4461 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4462
4463 for (i = 0; i < CIM_MALA_SIZE; i++) {
4464 for (j = 0; j < 5; j++) {
4465 idx = 8 * i + j;
4466 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4467 V_PILADBGRDPTR(idx));
4468 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4469 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4470 }
4471 }
4472 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4473 }
4474
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)4475 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4476 {
4477 unsigned int i, j;
4478
4479 for (i = 0; i < 8; i++) {
4480 u32 *p = la_buf + i;
4481
4482 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4483 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4484 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4485 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4486 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4487 }
4488 }
4489
4490 /**
4491 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
4492 * @caps16: a 16-bit Port Capabilities value
4493 *
4494 * Returns the equivalent 32-bit Port Capabilities value.
4495 */
fwcaps16_to_caps32(uint16_t caps16)4496 static uint32_t fwcaps16_to_caps32(uint16_t caps16)
4497 {
4498 uint32_t caps32 = 0;
4499
4500 #define CAP16_TO_CAP32(__cap) \
4501 do { \
4502 if (caps16 & FW_PORT_CAP_##__cap) \
4503 caps32 |= FW_PORT_CAP32_##__cap; \
4504 } while (0)
4505
4506 CAP16_TO_CAP32(SPEED_100M);
4507 CAP16_TO_CAP32(SPEED_1G);
4508 CAP16_TO_CAP32(SPEED_25G);
4509 CAP16_TO_CAP32(SPEED_10G);
4510 CAP16_TO_CAP32(SPEED_40G);
4511 CAP16_TO_CAP32(SPEED_100G);
4512 CAP16_TO_CAP32(FC_RX);
4513 CAP16_TO_CAP32(FC_TX);
4514 CAP16_TO_CAP32(ANEG);
4515 CAP16_TO_CAP32(FORCE_PAUSE);
4516 CAP16_TO_CAP32(MDIAUTO);
4517 CAP16_TO_CAP32(MDISTRAIGHT);
4518 CAP16_TO_CAP32(FEC_RS);
4519 CAP16_TO_CAP32(FEC_BASER_RS);
4520 CAP16_TO_CAP32(802_3_PAUSE);
4521 CAP16_TO_CAP32(802_3_ASM_DIR);
4522
4523 #undef CAP16_TO_CAP32
4524
4525 return caps32;
4526 }
4527
4528 /**
4529 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4530 * @caps32: a 32-bit Port Capabilities value
4531 *
4532 * Returns the equivalent 16-bit Port Capabilities value. Note that
4533 * not all 32-bit Port Capabilities can be represented in the 16-bit
4534 * Port Capabilities and some fields/values may not make it.
4535 */
fwcaps32_to_caps16(uint32_t caps32)4536 static uint16_t fwcaps32_to_caps16(uint32_t caps32)
4537 {
4538 uint16_t caps16 = 0;
4539
4540 #define CAP32_TO_CAP16(__cap) \
4541 do { \
4542 if (caps32 & FW_PORT_CAP32_##__cap) \
4543 caps16 |= FW_PORT_CAP_##__cap; \
4544 } while (0)
4545
4546 CAP32_TO_CAP16(SPEED_100M);
4547 CAP32_TO_CAP16(SPEED_1G);
4548 CAP32_TO_CAP16(SPEED_10G);
4549 CAP32_TO_CAP16(SPEED_25G);
4550 CAP32_TO_CAP16(SPEED_40G);
4551 CAP32_TO_CAP16(SPEED_100G);
4552 CAP32_TO_CAP16(FC_RX);
4553 CAP32_TO_CAP16(FC_TX);
4554 CAP32_TO_CAP16(802_3_PAUSE);
4555 CAP32_TO_CAP16(802_3_ASM_DIR);
4556 CAP32_TO_CAP16(ANEG);
4557 CAP32_TO_CAP16(FORCE_PAUSE);
4558 CAP32_TO_CAP16(MDIAUTO);
4559 CAP32_TO_CAP16(MDISTRAIGHT);
4560 CAP32_TO_CAP16(FEC_RS);
4561 CAP32_TO_CAP16(FEC_BASER_RS);
4562
4563 #undef CAP32_TO_CAP16
4564
4565 return caps16;
4566 }
4567
fwcap_to_fec(uint32_t caps,bool unset_means_none)4568 static int8_t fwcap_to_fec(uint32_t caps, bool unset_means_none)
4569 {
4570 int8_t fec = 0;
4571
4572 if ((caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) == 0)
4573 return (unset_means_none ? FEC_NONE : 0);
4574
4575 if (caps & FW_PORT_CAP32_FEC_RS)
4576 fec |= FEC_RS;
4577 if (caps & FW_PORT_CAP32_FEC_BASER_RS)
4578 fec |= FEC_BASER_RS;
4579 if (caps & FW_PORT_CAP32_FEC_NO_FEC)
4580 fec |= FEC_NONE;
4581
4582 return (fec);
4583 }
4584
4585 /*
4586 * Note that 0 is not translated to NO_FEC.
4587 */
fec_to_fwcap(int8_t fec)4588 static uint32_t fec_to_fwcap(int8_t fec)
4589 {
4590 uint32_t caps = 0;
4591
4592 /* Only real FECs allowed. */
4593 MPASS((fec & ~M_FW_PORT_CAP32_FEC) == 0);
4594
4595 if (fec & FEC_RS)
4596 caps |= FW_PORT_CAP32_FEC_RS;
4597 if (fec & FEC_BASER_RS)
4598 caps |= FW_PORT_CAP32_FEC_BASER_RS;
4599 if (fec & FEC_NONE)
4600 caps |= FW_PORT_CAP32_FEC_NO_FEC;
4601
4602 return (caps);
4603 }
4604
4605 /**
4606 * t4_link_l1cfg - apply link configuration to MAC/PHY
4607 * @phy: the PHY to setup
4608 * @mac: the MAC to setup
4609 * @lc: the requested link configuration
4610 *
4611 * Set up a port's MAC and PHY according to a desired link configuration.
4612 * - If the PHY can auto-negotiate first decide what to advertise, then
4613 * enable/disable auto-negotiation as desired, and reset.
4614 * - If the PHY does not auto-negotiate just reset it.
4615 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4616 * otherwise do it later based on the outcome of auto-negotiation.
4617 */
t4_link_l1cfg(struct adapter * adap,unsigned int mbox,unsigned int port,struct link_config * lc)4618 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
4619 struct link_config *lc)
4620 {
4621 struct fw_port_cmd c;
4622 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4623 unsigned int aneg, fc, fec, speed, rcap;
4624
4625 fc = 0;
4626 if (lc->requested_fc & PAUSE_RX)
4627 fc |= FW_PORT_CAP32_FC_RX;
4628 if (lc->requested_fc & PAUSE_TX)
4629 fc |= FW_PORT_CAP32_FC_TX;
4630 if (!(lc->requested_fc & PAUSE_AUTONEG))
4631 fc |= FW_PORT_CAP32_FORCE_PAUSE;
4632
4633 if (lc->requested_aneg == AUTONEG_DISABLE)
4634 aneg = 0;
4635 else if (lc->requested_aneg == AUTONEG_ENABLE)
4636 aneg = FW_PORT_CAP32_ANEG;
4637 else
4638 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
4639
4640 if (aneg) {
4641 speed = lc->pcaps &
4642 V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4643 } else if (lc->requested_speed != 0)
4644 speed = speed_to_fwcap(lc->requested_speed);
4645 else
4646 speed = fwcap_top_speed(lc->pcaps);
4647
4648 fec = 0;
4649 if (fec_supported(speed)) {
4650 int force_fec;
4651
4652 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
4653 force_fec = lc->force_fec;
4654 else
4655 force_fec = 0;
4656
4657 if (lc->requested_fec == FEC_AUTO) {
4658 if (force_fec > 0) {
4659 /*
4660 * Must use FORCE_FEC even though requested FEC
4661 * is AUTO. Set all the FEC bits valid for the
4662 * speed and let the firmware pick one.
4663 */
4664 fec |= FW_PORT_CAP32_FORCE_FEC;
4665 if (speed & FW_PORT_CAP32_SPEED_25G) {
4666 fec |= FW_PORT_CAP32_FEC_RS;
4667 fec |= FW_PORT_CAP32_FEC_BASER_RS;
4668 fec |= FW_PORT_CAP32_FEC_NO_FEC;
4669 } else {
4670 fec |= FW_PORT_CAP32_FEC_RS;
4671 fec |= FW_PORT_CAP32_FEC_NO_FEC;
4672 }
4673 } else {
4674 /*
4675 * Set only 1b. Old firmwares can't deal with
4676 * multiple bits and new firmwares are free to
4677 * ignore this and try whatever FECs they want
4678 * because we aren't setting FORCE_FEC here.
4679 */
4680 fec |= fec_to_fwcap(lc->fec_hint);
4681 MPASS(powerof2(fec));
4682
4683 /*
4684 * Override the hint if the FEC is not valid for
4685 * the potential top speed. Request the best
4686 * FEC at that speed instead.
4687 */
4688 if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 &&
4689 fec == FW_PORT_CAP32_FEC_BASER_RS) {
4690 fec = FW_PORT_CAP32_FEC_RS;
4691 }
4692 }
4693 } else {
4694 /*
4695 * User has explicitly requested some FEC(s). Set
4696 * FORCE_FEC unless prohibited from using it.
4697 */
4698 if (force_fec != 0)
4699 fec |= FW_PORT_CAP32_FORCE_FEC;
4700 fec |= fec_to_fwcap(lc->requested_fec &
4701 M_FW_PORT_CAP32_FEC);
4702 if (lc->requested_fec & FEC_MODULE)
4703 fec |= fec_to_fwcap(lc->fec_hint);
4704 }
4705
4706 /*
4707 * This is for compatibility with old firmwares. The original
4708 * way to request NO_FEC was to not set any of the FEC bits. New
4709 * firmwares understand this too.
4710 */
4711 if (fec == FW_PORT_CAP32_FEC_NO_FEC)
4712 fec = 0;
4713 }
4714
4715 /* Force AN on for BT cards. */
4716 if (isset(&adap->bt_map, port))
4717 aneg = lc->pcaps & FW_PORT_CAP32_ANEG;
4718
4719 rcap = aneg | speed | fc | fec;
4720 if ((rcap | lc->pcaps) != lc->pcaps) {
4721 #ifdef INVARIANTS
4722 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x, removed 0x%x\n", rcap,
4723 lc->pcaps, rcap & (rcap ^ lc->pcaps));
4724 #endif
4725 rcap &= lc->pcaps;
4726 }
4727 rcap |= mdi;
4728
4729 memset(&c, 0, sizeof(c));
4730 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4731 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4732 V_FW_PORT_CMD_PORTID(port));
4733 if (adap->params.port_caps32) {
4734 c.action_to_len16 =
4735 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
4736 FW_LEN16(c));
4737 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4738 } else {
4739 c.action_to_len16 =
4740 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4741 FW_LEN16(c));
4742 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4743 }
4744
4745 lc->requested_caps = rcap;
4746 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4747 }
4748
4749 /**
4750 * t4_restart_aneg - restart autonegotiation
4751 * @adap: the adapter
4752 * @mbox: mbox to use for the FW command
4753 * @port: the port id
4754 *
4755 * Restarts autonegotiation for the selected port.
4756 */
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)4757 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4758 {
4759 struct fw_port_cmd c;
4760
4761 memset(&c, 0, sizeof(c));
4762 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4763 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4764 V_FW_PORT_CMD_PORTID(port));
4765 c.action_to_len16 =
4766 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4767 FW_LEN16(c));
4768 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4769 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4770 }
4771
4772 struct intr_details {
4773 u32 mask;
4774 const char *msg;
4775 };
4776
4777 struct intr_action {
4778 u32 mask;
4779 int arg;
4780 bool (*action)(struct adapter *, int, bool);
4781 };
4782
4783 #define NONFATAL_IF_DISABLED 1
4784 struct intr_info {
4785 const char *name; /* name of the INT_CAUSE register */
4786 int cause_reg; /* INT_CAUSE register */
4787 int enable_reg; /* INT_ENABLE register */
4788 u32 fatal; /* bits that are fatal */
4789 int flags; /* hints */
4790 const struct intr_details *details;
4791 const struct intr_action *actions;
4792 };
4793
4794 static inline char
intr_alert_char(u32 cause,u32 enable,u32 fatal)4795 intr_alert_char(u32 cause, u32 enable, u32 fatal)
4796 {
4797
4798 if (cause & fatal)
4799 return ('!');
4800 if (cause & enable)
4801 return ('*');
4802 return ('-');
4803 }
4804
4805 static void
t4_show_intr_info(struct adapter * adap,const struct intr_info * ii,u32 cause)4806 t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
4807 {
4808 u32 enable, fatal, leftover;
4809 const struct intr_details *details;
4810 char alert;
4811
4812 enable = t4_read_reg(adap, ii->enable_reg);
4813 if (ii->flags & NONFATAL_IF_DISABLED)
4814 fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
4815 else
4816 fatal = ii->fatal;
4817 alert = intr_alert_char(cause, enable, fatal);
4818 CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
4819 alert, ii->name, ii->cause_reg, cause, enable, fatal);
4820
4821 leftover = cause;
4822 for (details = ii->details; details && details->mask != 0; details++) {
4823 u32 msgbits = details->mask & cause;
4824 if (msgbits == 0)
4825 continue;
4826 alert = intr_alert_char(msgbits, enable, ii->fatal);
4827 CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits,
4828 details->msg);
4829 leftover &= ~msgbits;
4830 }
4831 if (leftover != 0 && leftover != cause)
4832 CH_ALERT(adap, " ? [0x%08x]\n", leftover);
4833 }
4834
4835 /*
4836 * Returns true for fatal error.
4837 */
4838 static bool
t4_handle_intr(struct adapter * adap,const struct intr_info * ii,u32 additional_cause,bool verbose)4839 t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
4840 u32 additional_cause, bool verbose)
4841 {
4842 u32 cause, fatal;
4843 bool rc;
4844 const struct intr_action *action;
4845
4846 /*
4847 * Read and display cause. Note that the top level PL_INT_CAUSE is a
4848 * bit special and we need to completely ignore the bits that are not in
4849 * PL_INT_ENABLE.
4850 */
4851 cause = t4_read_reg(adap, ii->cause_reg);
4852 if (ii->cause_reg == A_PL_INT_CAUSE)
4853 cause &= t4_read_reg(adap, ii->enable_reg);
4854 if (verbose || cause != 0)
4855 t4_show_intr_info(adap, ii, cause);
4856 fatal = cause & ii->fatal;
4857 if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED)
4858 fatal &= t4_read_reg(adap, ii->enable_reg);
4859 cause |= additional_cause;
4860 if (cause == 0)
4861 return (false);
4862
4863 rc = fatal != 0;
4864 for (action = ii->actions; action && action->mask != 0; action++) {
4865 if (!(action->mask & cause))
4866 continue;
4867 rc |= (action->action)(adap, action->arg, verbose);
4868 }
4869
4870 /* clear */
4871 t4_write_reg(adap, ii->cause_reg, cause);
4872 (void)t4_read_reg(adap, ii->cause_reg);
4873
4874 return (rc);
4875 }
4876
4877 /*
4878 * Interrupt handler for the PCIE module.
4879 */
pcie_intr_handler(struct adapter * adap,int arg,bool verbose)4880 static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
4881 {
4882 static const struct intr_details sysbus_intr_details[] = {
4883 { F_RNPP, "RXNP array parity error" },
4884 { F_RPCP, "RXPC array parity error" },
4885 { F_RCIP, "RXCIF array parity error" },
4886 { F_RCCP, "Rx completions control array parity error" },
4887 { F_RFTP, "RXFT array parity error" },
4888 { 0 }
4889 };
4890 static const struct intr_info sysbus_intr_info = {
4891 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS",
4892 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4893 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE,
4894 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP,
4895 .flags = 0,
4896 .details = sysbus_intr_details,
4897 .actions = NULL,
4898 };
4899 static const struct intr_details pcie_port_intr_details[] = {
4900 { F_TPCP, "TXPC array parity error" },
4901 { F_TNPP, "TXNP array parity error" },
4902 { F_TFTP, "TXFT array parity error" },
4903 { F_TCAP, "TXCA array parity error" },
4904 { F_TCIP, "TXCIF array parity error" },
4905 { F_RCAP, "RXCA array parity error" },
4906 { F_OTDD, "outbound request TLP discarded" },
4907 { F_RDPE, "Rx data parity error" },
4908 { F_TDUE, "Tx uncorrectable data error" },
4909 { 0 }
4910 };
4911 static const struct intr_info pcie_port_intr_info = {
4912 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS",
4913 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4914 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE,
4915 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP |
4916 F_OTDD | F_RDPE | F_TDUE,
4917 .flags = 0,
4918 .details = pcie_port_intr_details,
4919 .actions = NULL,
4920 };
4921 static const struct intr_details pcie_intr_details[] = {
4922 { F_MSIADDRLPERR, "MSI AddrL parity error" },
4923 { F_MSIADDRHPERR, "MSI AddrH parity error" },
4924 { F_MSIDATAPERR, "MSI data parity error" },
4925 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" },
4926 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" },
4927 { F_MSIXDATAPERR, "MSI-X data parity error" },
4928 { F_MSIXDIPERR, "MSI-X DI parity error" },
4929 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" },
4930 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" },
4931 { F_TARTAGPERR, "PCIe target tag FIFO parity error" },
4932 { F_CCNTPERR, "PCIe CMD channel count parity error" },
4933 { F_CREQPERR, "PCIe CMD channel request parity error" },
4934 { F_CRSPPERR, "PCIe CMD channel response parity error" },
4935 { F_DCNTPERR, "PCIe DMA channel count parity error" },
4936 { F_DREQPERR, "PCIe DMA channel request parity error" },
4937 { F_DRSPPERR, "PCIe DMA channel response parity error" },
4938 { F_HCNTPERR, "PCIe HMA channel count parity error" },
4939 { F_HREQPERR, "PCIe HMA channel request parity error" },
4940 { F_HRSPPERR, "PCIe HMA channel response parity error" },
4941 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" },
4942 { F_FIDPERR, "PCIe FID parity error" },
4943 { F_INTXCLRPERR, "PCIe INTx clear parity error" },
4944 { F_MATAGPERR, "PCIe MA tag parity error" },
4945 { F_PIOTAGPERR, "PCIe PIO tag parity error" },
4946 { F_RXCPLPERR, "PCIe Rx completion parity error" },
4947 { F_RXWRPERR, "PCIe Rx write parity error" },
4948 { F_RPLPERR, "PCIe replay buffer parity error" },
4949 { F_PCIESINT, "PCIe core secondary fault" },
4950 { F_PCIEPINT, "PCIe core primary fault" },
4951 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" },
4952 { 0 }
4953 };
4954 static const struct intr_details t5_pcie_intr_details[] = {
4955 { F_IPGRPPERR, "Parity errors observed by IP" },
4956 { F_NONFATALERR, "PCIe non-fatal error" },
4957 { F_READRSPERR, "Outbound read error" },
4958 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" },
4959 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" },
4960 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" },
4961 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" },
4962 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" },
4963 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" },
4964 { F_MAGRPPERR, "MA group FIFO parity error" },
4965 { F_VFIDPERR, "VFID SRAM parity error" },
4966 { F_FIDPERR, "FID SRAM parity error" },
4967 { F_CFGSNPPERR, "config snoop FIFO parity error" },
4968 { F_HRSPPERR, "HMA channel response data SRAM parity error" },
4969 { F_HREQRDPERR, "HMA channel read request SRAM parity error" },
4970 { F_HREQWRPERR, "HMA channel write request SRAM parity error" },
4971 { F_DRSPPERR, "DMA channel response data SRAM parity error" },
4972 { F_DREQRDPERR, "DMA channel write request SRAM parity error" },
4973 { F_CRSPPERR, "CMD channel response data SRAM parity error" },
4974 { F_CREQRDPERR, "CMD channel read request SRAM parity error" },
4975 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" },
4976 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" },
4977 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" },
4978 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" },
4979 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" },
4980 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" },
4981 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" },
4982 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" },
4983 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" },
4984 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" },
4985 { F_MSTGRPPERR, "Master response read queue SRAM parity error" },
4986 { 0 }
4987 };
4988 struct intr_info pcie_intr_info = {
4989 .name = "PCIE_INT_CAUSE",
4990 .cause_reg = A_PCIE_INT_CAUSE,
4991 .enable_reg = A_PCIE_INT_ENABLE,
4992 .fatal = 0xffffffff,
4993 .flags = NONFATAL_IF_DISABLED,
4994 .details = NULL,
4995 .actions = NULL,
4996 };
4997 bool fatal = false;
4998
4999 if (is_t4(adap)) {
5000 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
5001 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
5002
5003 pcie_intr_info.details = pcie_intr_details;
5004 } else {
5005 pcie_intr_info.details = t5_pcie_intr_details;
5006 }
5007 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
5008
5009 return (fatal);
5010 }
5011
5012 /*
5013 * TP interrupt handler.
5014 */
tp_intr_handler(struct adapter * adap,int arg,bool verbose)5015 static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
5016 {
5017 static const struct intr_details tp_intr_details[] = {
5018 { 0x3fffffff, "TP parity error" },
5019 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" },
5020 { 0 }
5021 };
5022 static const struct intr_info tp_intr_info = {
5023 .name = "TP_INT_CAUSE",
5024 .cause_reg = A_TP_INT_CAUSE,
5025 .enable_reg = A_TP_INT_ENABLE,
5026 .fatal = 0x7fffffff,
5027 .flags = NONFATAL_IF_DISABLED,
5028 .details = tp_intr_details,
5029 .actions = NULL,
5030 };
5031
5032 return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
5033 }
5034
5035 /*
5036 * SGE interrupt handler.
5037 */
sge_intr_handler(struct adapter * adap,int arg,bool verbose)5038 static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
5039 {
5040 static const struct intr_info sge_int1_info = {
5041 .name = "SGE_INT_CAUSE1",
5042 .cause_reg = A_SGE_INT_CAUSE1,
5043 .enable_reg = A_SGE_INT_ENABLE1,
5044 .fatal = 0xffffffff,
5045 .flags = NONFATAL_IF_DISABLED,
5046 .details = NULL,
5047 .actions = NULL,
5048 };
5049 static const struct intr_info sge_int2_info = {
5050 .name = "SGE_INT_CAUSE2",
5051 .cause_reg = A_SGE_INT_CAUSE2,
5052 .enable_reg = A_SGE_INT_ENABLE2,
5053 .fatal = 0xffffffff,
5054 .flags = NONFATAL_IF_DISABLED,
5055 .details = NULL,
5056 .actions = NULL,
5057 };
5058 static const struct intr_details sge_int3_details[] = {
5059 { F_ERR_FLM_DBP,
5060 "DBP pointer delivery for invalid context or QID" },
5061 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
5062 "Invalid QID or header request by IDMA" },
5063 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
5064 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
5065 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
5066 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
5067 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
5068 { F_ERR_TIMER_ABOVE_MAX_QID,
5069 "SGE GTS with timer 0-5 for IQID > 1023" },
5070 { F_ERR_CPL_EXCEED_IQE_SIZE,
5071 "SGE received CPL exceeding IQE size" },
5072 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
5073 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
5074 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
5075 { F_ERR_DROPPED_DB, "SGE DB dropped" },
5076 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
5077 "SGE IQID > 1023 received CPL for FL" },
5078 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5079 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
5080 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
5081 { F_ERR_ING_CTXT_PRIO,
5082 "Ingress context manager priority user error" },
5083 { F_ERR_EGR_CTXT_PRIO,
5084 "Egress context manager priority user error" },
5085 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" },
5086 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" },
5087 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
5088 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
5089 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
5090 { 0x0000000f, "SGE context access for invalid queue" },
5091 { 0 }
5092 };
5093 static const struct intr_details t6_sge_int3_details[] = {
5094 { F_ERR_FLM_DBP,
5095 "DBP pointer delivery for invalid context or QID" },
5096 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
5097 "Invalid QID or header request by IDMA" },
5098 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
5099 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
5100 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
5101 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
5102 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
5103 { F_ERR_TIMER_ABOVE_MAX_QID,
5104 "SGE GTS with timer 0-5 for IQID > 1023" },
5105 { F_ERR_CPL_EXCEED_IQE_SIZE,
5106 "SGE received CPL exceeding IQE size" },
5107 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
5108 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
5109 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
5110 { F_ERR_DROPPED_DB, "SGE DB dropped" },
5111 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
5112 "SGE IQID > 1023 received CPL for FL" },
5113 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5114 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
5115 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
5116 { F_ERR_ING_CTXT_PRIO,
5117 "Ingress context manager priority user error" },
5118 { F_ERR_EGR_CTXT_PRIO,
5119 "Egress context manager priority user error" },
5120 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" },
5121 { F_FATAL_WRE_LEN,
5122 "SGE WRE packet less than advertized length" },
5123 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
5124 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
5125 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
5126 { 0x0000000f, "SGE context access for invalid queue" },
5127 { 0 }
5128 };
5129 struct intr_info sge_int3_info = {
5130 .name = "SGE_INT_CAUSE3",
5131 .cause_reg = A_SGE_INT_CAUSE3,
5132 .enable_reg = A_SGE_INT_ENABLE3,
5133 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE,
5134 .flags = 0,
5135 .details = NULL,
5136 .actions = NULL,
5137 };
5138 static const struct intr_info sge_int4_info = {
5139 .name = "SGE_INT_CAUSE4",
5140 .cause_reg = A_SGE_INT_CAUSE4,
5141 .enable_reg = A_SGE_INT_ENABLE4,
5142 .fatal = 0,
5143 .flags = 0,
5144 .details = NULL,
5145 .actions = NULL,
5146 };
5147 static const struct intr_info sge_int5_info = {
5148 .name = "SGE_INT_CAUSE5",
5149 .cause_reg = A_SGE_INT_CAUSE5,
5150 .enable_reg = A_SGE_INT_ENABLE5,
5151 .fatal = 0xffffffff,
5152 .flags = NONFATAL_IF_DISABLED,
5153 .details = NULL,
5154 .actions = NULL,
5155 };
5156 static const struct intr_info sge_int6_info = {
5157 .name = "SGE_INT_CAUSE6",
5158 .cause_reg = A_SGE_INT_CAUSE6,
5159 .enable_reg = A_SGE_INT_ENABLE6,
5160 .fatal = 0,
5161 .flags = 0,
5162 .details = NULL,
5163 .actions = NULL,
5164 };
5165
5166 bool fatal;
5167 u32 v;
5168
5169 if (chip_id(adap) <= CHELSIO_T5) {
5170 sge_int3_info.details = sge_int3_details;
5171 } else {
5172 sge_int3_info.details = t6_sge_int3_details;
5173 }
5174
5175 fatal = false;
5176 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
5177 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
5178 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
5179 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
5180 if (chip_id(adap) >= CHELSIO_T5)
5181 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
5182 if (chip_id(adap) >= CHELSIO_T6)
5183 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
5184
5185 v = t4_read_reg(adap, A_SGE_ERROR_STATS);
5186 if (v & F_ERROR_QID_VALID) {
5187 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
5188 if (v & F_UNCAPTURED_ERROR)
5189 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
5190 t4_write_reg(adap, A_SGE_ERROR_STATS,
5191 F_ERROR_QID_VALID | F_UNCAPTURED_ERROR);
5192 }
5193
5194 return (fatal);
5195 }
5196
5197 /*
5198 * CIM interrupt handler.
5199 */
cim_intr_handler(struct adapter * adap,int arg,bool verbose)5200 static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
5201 {
5202 static const struct intr_details cim_host_intr_details[] = {
5203 /* T6+ */
5204 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" },
5205
5206 /* T5+ */
5207 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" },
5208 { F_PLCIM_MSTRSPDATAPARERR,
5209 "PL2CIM master response data parity error" },
5210 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" },
5211 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" },
5212 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" },
5213 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" },
5214 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" },
5215 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" },
5216
5217 /* T4+ */
5218 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" },
5219 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" },
5220 { F_MBHOSTPARERR, "CIM mailbox host read parity error" },
5221 { F_MBUPPARERR, "CIM mailbox uP parity error" },
5222 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" },
5223 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" },
5224 { F_IBQULPPARERR, "CIM IBQ ULP parity error" },
5225 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" },
5226 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */
5227 "CIM IBQ PCIe/SGE_HI parity error" },
5228 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" },
5229 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" },
5230 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" },
5231 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" },
5232 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" },
5233 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" },
5234 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" },
5235 { F_TIMER1INT, "CIM TIMER0 interrupt" },
5236 { F_TIMER0INT, "CIM TIMER0 interrupt" },
5237 { F_PREFDROPINT, "CIM control register prefetch drop" },
5238 { 0}
5239 };
5240 static const struct intr_info cim_host_intr_info = {
5241 .name = "CIM_HOST_INT_CAUSE",
5242 .cause_reg = A_CIM_HOST_INT_CAUSE,
5243 .enable_reg = A_CIM_HOST_INT_ENABLE,
5244 .fatal = 0x007fffe6,
5245 .flags = NONFATAL_IF_DISABLED,
5246 .details = cim_host_intr_details,
5247 .actions = NULL,
5248 };
5249 static const struct intr_details cim_host_upacc_intr_details[] = {
5250 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" },
5251 { F_TIMEOUTMAINT, "CIM PIF MA timeout" },
5252 { F_TIMEOUTINT, "CIM PIF timeout" },
5253 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" },
5254 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" },
5255 { F_BLKWRPLINT, "CIM block write to PL space" },
5256 { F_BLKRDPLINT, "CIM block read from PL space" },
5257 { F_SGLWRPLINT,
5258 "CIM single write to PL space with illegal BEs" },
5259 { F_SGLRDPLINT,
5260 "CIM single read from PL space with illegal BEs" },
5261 { F_BLKWRCTLINT, "CIM block write to CTL space" },
5262 { F_BLKRDCTLINT, "CIM block read from CTL space" },
5263 { F_SGLWRCTLINT,
5264 "CIM single write to CTL space with illegal BEs" },
5265 { F_SGLRDCTLINT,
5266 "CIM single read from CTL space with illegal BEs" },
5267 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" },
5268 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" },
5269 { F_SGLWREEPROMINT,
5270 "CIM single write to EEPROM space with illegal BEs" },
5271 { F_SGLRDEEPROMINT,
5272 "CIM single read from EEPROM space with illegal BEs" },
5273 { F_BLKWRFLASHINT, "CIM block write to flash space" },
5274 { F_BLKRDFLASHINT, "CIM block read from flash space" },
5275 { F_SGLWRFLASHINT, "CIM single write to flash space" },
5276 { F_SGLRDFLASHINT,
5277 "CIM single read from flash space with illegal BEs" },
5278 { F_BLKWRBOOTINT, "CIM block write to boot space" },
5279 { F_BLKRDBOOTINT, "CIM block read from boot space" },
5280 { F_SGLWRBOOTINT, "CIM single write to boot space" },
5281 { F_SGLRDBOOTINT,
5282 "CIM single read from boot space with illegal BEs" },
5283 { F_ILLWRBEINT, "CIM illegal write BEs" },
5284 { F_ILLRDBEINT, "CIM illegal read BEs" },
5285 { F_ILLRDINT, "CIM illegal read" },
5286 { F_ILLWRINT, "CIM illegal write" },
5287 { F_ILLTRANSINT, "CIM illegal transaction" },
5288 { F_RSVDSPACEINT, "CIM reserved space access" },
5289 {0}
5290 };
5291 static const struct intr_info cim_host_upacc_intr_info = {
5292 .name = "CIM_HOST_UPACC_INT_CAUSE",
5293 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
5294 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
5295 .fatal = 0x3fffeeff,
5296 .flags = NONFATAL_IF_DISABLED,
5297 .details = cim_host_upacc_intr_details,
5298 .actions = NULL,
5299 };
5300 static const struct intr_info cim_pf_host_intr_info = {
5301 .name = "CIM_PF_HOST_INT_CAUSE",
5302 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
5303 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE),
5304 .fatal = 0,
5305 .flags = 0,
5306 .details = NULL,
5307 .actions = NULL,
5308 };
5309 u32 val, fw_err;
5310 bool fatal;
5311
5312 /*
5313 * When the Firmware detects an internal error which normally wouldn't
5314 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
5315 * to make sure the Host sees the Firmware Crash. So if we have a
5316 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
5317 * interrupt.
5318 */
5319 fw_err = t4_read_reg(adap, A_PCIE_FW);
5320 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
5321 if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) ||
5322 G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) {
5323 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
5324 }
5325
5326 fatal = (fw_err & F_PCIE_FW_ERR) != 0;
5327 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
5328 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
5329 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
5330 if (fatal)
5331 t4_os_cim_err(adap);
5332
5333 return (fatal);
5334 }
5335
5336 /*
5337 * ULP RX interrupt handler.
5338 */
ulprx_intr_handler(struct adapter * adap,int arg,bool verbose)5339 static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
5340 {
5341 static const struct intr_details ulprx_intr_details[] = {
5342 /* T5+ */
5343 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" },
5344 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" },
5345
5346 /* T4+ */
5347 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" },
5348 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" },
5349 { 0x007fffff, "ULPRX parity error" },
5350 { 0 }
5351 };
5352 static const struct intr_info ulprx_intr_info = {
5353 .name = "ULP_RX_INT_CAUSE",
5354 .cause_reg = A_ULP_RX_INT_CAUSE,
5355 .enable_reg = A_ULP_RX_INT_ENABLE,
5356 .fatal = 0x07ffffff,
5357 .flags = NONFATAL_IF_DISABLED,
5358 .details = ulprx_intr_details,
5359 .actions = NULL,
5360 };
5361 static const struct intr_info ulprx_intr2_info = {
5362 .name = "ULP_RX_INT_CAUSE_2",
5363 .cause_reg = A_ULP_RX_INT_CAUSE_2,
5364 .enable_reg = A_ULP_RX_INT_ENABLE_2,
5365 .fatal = 0,
5366 .flags = 0,
5367 .details = NULL,
5368 .actions = NULL,
5369 };
5370 bool fatal = false;
5371
5372 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
5373 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
5374
5375 return (fatal);
5376 }
5377
5378 /*
5379 * ULP TX interrupt handler.
5380 */
ulptx_intr_handler(struct adapter * adap,int arg,bool verbose)5381 static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
5382 {
5383 static const struct intr_details ulptx_intr_details[] = {
5384 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
5385 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" },
5386 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" },
5387 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" },
5388 { 0x0fffffff, "ULPTX parity error" },
5389 { 0 }
5390 };
5391 static const struct intr_info ulptx_intr_info = {
5392 .name = "ULP_TX_INT_CAUSE",
5393 .cause_reg = A_ULP_TX_INT_CAUSE,
5394 .enable_reg = A_ULP_TX_INT_ENABLE,
5395 .fatal = 0x0fffffff,
5396 .flags = NONFATAL_IF_DISABLED,
5397 .details = ulptx_intr_details,
5398 .actions = NULL,
5399 };
5400 static const struct intr_info ulptx_intr2_info = {
5401 .name = "ULP_TX_INT_CAUSE_2",
5402 .cause_reg = A_ULP_TX_INT_CAUSE_2,
5403 .enable_reg = A_ULP_TX_INT_ENABLE_2,
5404 .fatal = 0xf0,
5405 .flags = NONFATAL_IF_DISABLED,
5406 .details = NULL,
5407 .actions = NULL,
5408 };
5409 bool fatal = false;
5410
5411 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
5412 fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
5413
5414 return (fatal);
5415 }
5416
pmtx_dump_dbg_stats(struct adapter * adap,int arg,bool verbose)5417 static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
5418 {
5419 int i;
5420 u32 data[17];
5421
5422 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
5423 ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
5424 for (i = 0; i < ARRAY_SIZE(data); i++) {
5425 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
5426 A_PM_TX_DBG_STAT0 + i, data[i]);
5427 }
5428
5429 return (false);
5430 }
5431
5432 /*
5433 * PM TX interrupt handler.
5434 */
pmtx_intr_handler(struct adapter * adap,int arg,bool verbose)5435 static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
5436 {
5437 static const struct intr_action pmtx_intr_actions[] = {
5438 { 0xffffffff, 0, pmtx_dump_dbg_stats },
5439 { 0 },
5440 };
5441 static const struct intr_details pmtx_intr_details[] = {
5442 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
5443 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
5444 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
5445 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" },
5446 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" },
5447 { 0x00f00000, "PMTX icspi FIFO Rx framing error" },
5448 { 0x000f0000, "PMTX icspi FIFO Tx framing error" },
5449 { 0x0000f000, "PMTX oespi FIFO Rx framing error" },
5450 { 0x00000f00, "PMTX oespi FIFO Tx framing error" },
5451 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" },
5452 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" },
5453 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" },
5454 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" },
5455 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
5456 { 0 }
5457 };
5458 static const struct intr_info pmtx_intr_info = {
5459 .name = "PM_TX_INT_CAUSE",
5460 .cause_reg = A_PM_TX_INT_CAUSE,
5461 .enable_reg = A_PM_TX_INT_ENABLE,
5462 .fatal = 0xffffffff,
5463 .flags = 0,
5464 .details = pmtx_intr_details,
5465 .actions = pmtx_intr_actions,
5466 };
5467
5468 return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
5469 }
5470
5471 /*
5472 * PM RX interrupt handler.
5473 */
pmrx_intr_handler(struct adapter * adap,int arg,bool verbose)5474 static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
5475 {
5476 static const struct intr_details pmrx_intr_details[] = {
5477 /* T6+ */
5478 { 0x18000000, "PMRX ospi overflow" },
5479 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
5480 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" },
5481 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" },
5482 { F_SDC_ERR, "PMRX SDC error" },
5483
5484 /* T4+ */
5485 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" },
5486 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" },
5487 { 0x0003c000, "PMRX iespi Rx framing error" },
5488 { 0x00003c00, "PMRX iespi Tx framing error" },
5489 { 0x00000300, "PMRX ocspi Rx framing error" },
5490 { 0x000000c0, "PMRX ocspi Tx framing error" },
5491 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" },
5492 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" },
5493 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" },
5494 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" },
5495 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
5496 { 0 }
5497 };
5498 static const struct intr_info pmrx_intr_info = {
5499 .name = "PM_RX_INT_CAUSE",
5500 .cause_reg = A_PM_RX_INT_CAUSE,
5501 .enable_reg = A_PM_RX_INT_ENABLE,
5502 .fatal = 0x1fffffff,
5503 .flags = NONFATAL_IF_DISABLED,
5504 .details = pmrx_intr_details,
5505 .actions = NULL,
5506 };
5507
5508 return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
5509 }
5510
5511 /*
5512 * CPL switch interrupt handler.
5513 */
cplsw_intr_handler(struct adapter * adap,int arg,bool verbose)5514 static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
5515 {
5516 static const struct intr_details cplsw_intr_details[] = {
5517 /* T5+ */
5518 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
5519 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
5520
5521 /* T4+ */
5522 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" },
5523 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" },
5524 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" },
5525 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" },
5526 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" },
5527 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
5528 { 0 }
5529 };
5530 static const struct intr_info cplsw_intr_info = {
5531 .name = "CPL_INTR_CAUSE",
5532 .cause_reg = A_CPL_INTR_CAUSE,
5533 .enable_reg = A_CPL_INTR_ENABLE,
5534 .fatal = 0xff,
5535 .flags = NONFATAL_IF_DISABLED,
5536 .details = cplsw_intr_details,
5537 .actions = NULL,
5538 };
5539
5540 return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
5541 }
5542
5543 #define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
5544 #define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR)
5545 #define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \
5546 F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \
5547 F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \
5548 F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR)
5549 #define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
5550 F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
5551 F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
5552
5553 /*
5554 * LE interrupt handler.
5555 */
le_intr_handler(struct adapter * adap,int arg,bool verbose)5556 static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
5557 {
5558 static const struct intr_details le_intr_details[] = {
5559 { F_REQQPARERR, "LE request queue parity error" },
5560 { F_UNKNOWNCMD, "LE unknown command" },
5561 { F_ACTRGNFULL, "LE active region full" },
5562 { F_PARITYERR, "LE parity error" },
5563 { F_LIPMISS, "LE LIP miss" },
5564 { F_LIP0, "LE 0 LIP error" },
5565 { 0 }
5566 };
5567 static const struct intr_details t6_le_intr_details[] = {
5568 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" },
5569 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" },
5570 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" },
5571 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" },
5572 { F_TOTCNTERR, "LE total active < TCAM count" },
5573 { F_CMDPRSRINTERR, "LE internal error in parser" },
5574 { F_CMDTIDERR, "Incorrect tid in LE command" },
5575 { F_T6_ACTRGNFULL, "LE active region full" },
5576 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" },
5577 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" },
5578 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" },
5579 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" },
5580 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" },
5581 { F_TCAMACCFAIL, "LE TCAM access failure" },
5582 { F_T6_UNKNOWNCMD, "LE unknown command" },
5583 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" },
5584 { F_T6_LIPMISS, "LE CLIP lookup miss" },
5585 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" },
5586 { 0 }
5587 };
5588 struct intr_info le_intr_info = {
5589 .name = "LE_DB_INT_CAUSE",
5590 .cause_reg = A_LE_DB_INT_CAUSE,
5591 .enable_reg = A_LE_DB_INT_ENABLE,
5592 .fatal = 0,
5593 .flags = NONFATAL_IF_DISABLED,
5594 .details = NULL,
5595 .actions = NULL,
5596 };
5597
5598 if (chip_id(adap) <= CHELSIO_T5) {
5599 le_intr_info.details = le_intr_details;
5600 le_intr_info.fatal = T5_LE_FATAL_MASK;
5601 } else {
5602 le_intr_info.details = t6_le_intr_details;
5603 le_intr_info.fatal = T6_LE_FATAL_MASK;
5604 }
5605
5606 return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
5607 }
5608
5609 /*
5610 * MPS interrupt handler.
5611 */
mps_intr_handler(struct adapter * adap,int arg,bool verbose)5612 static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
5613 {
5614 static const struct intr_details mps_rx_perr_intr_details[] = {
5615 { 0xffffffff, "MPS Rx parity error" },
5616 { 0 }
5617 };
5618 static const struct intr_info mps_rx_perr_intr_info = {
5619 .name = "MPS_RX_PERR_INT_CAUSE",
5620 .cause_reg = A_MPS_RX_PERR_INT_CAUSE,
5621 .enable_reg = A_MPS_RX_PERR_INT_ENABLE,
5622 .fatal = 0xffffffff,
5623 .flags = NONFATAL_IF_DISABLED,
5624 .details = mps_rx_perr_intr_details,
5625 .actions = NULL,
5626 };
5627 static const struct intr_details mps_tx_intr_details[] = {
5628 { F_PORTERR, "MPS Tx destination port is disabled" },
5629 { F_FRMERR, "MPS Tx framing error" },
5630 { F_SECNTERR, "MPS Tx SOP/EOP error" },
5631 { F_BUBBLE, "MPS Tx underflow" },
5632 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" },
5633 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" },
5634 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" },
5635 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" },
5636 { 0 }
5637 };
5638 static const struct intr_info mps_tx_intr_info = {
5639 .name = "MPS_TX_INT_CAUSE",
5640 .cause_reg = A_MPS_TX_INT_CAUSE,
5641 .enable_reg = A_MPS_TX_INT_ENABLE,
5642 .fatal = 0x1ffff,
5643 .flags = NONFATAL_IF_DISABLED,
5644 .details = mps_tx_intr_details,
5645 .actions = NULL,
5646 };
5647 static const struct intr_details mps_trc_intr_details[] = {
5648 { F_MISCPERR, "MPS TRC misc parity error" },
5649 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
5650 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" },
5651 { 0 }
5652 };
5653 static const struct intr_info mps_trc_intr_info = {
5654 .name = "MPS_TRC_INT_CAUSE",
5655 .cause_reg = A_MPS_TRC_INT_CAUSE,
5656 .enable_reg = A_MPS_TRC_INT_ENABLE,
5657 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
5658 .flags = 0,
5659 .details = mps_trc_intr_details,
5660 .actions = NULL,
5661 };
5662 static const struct intr_info t7_mps_trc_intr_info = {
5663 .name = "T7_MPS_TRC_INT_CAUSE",
5664 .cause_reg = A_T7_MPS_TRC_INT_CAUSE,
5665 .enable_reg = A_T7_MPS_TRC_INT_ENABLE,
5666 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
5667 .flags = 0,
5668 .details = mps_trc_intr_details,
5669 .actions = NULL,
5670 };
5671 static const struct intr_details mps_stat_sram_intr_details[] = {
5672 { 0xffffffff, "MPS statistics SRAM parity error" },
5673 { 0 }
5674 };
5675 static const struct intr_info mps_stat_sram_intr_info = {
5676 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM",
5677 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5678 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
5679 .fatal = 0x1fffffff,
5680 .flags = NONFATAL_IF_DISABLED,
5681 .details = mps_stat_sram_intr_details,
5682 .actions = NULL,
5683 };
5684 static const struct intr_details mps_stat_tx_intr_details[] = {
5685 { 0xffffff, "MPS statistics Tx FIFO parity error" },
5686 { 0 }
5687 };
5688 static const struct intr_info mps_stat_tx_intr_info = {
5689 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO",
5690 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5691 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
5692 .fatal = 0xffffff,
5693 .flags = NONFATAL_IF_DISABLED,
5694 .details = mps_stat_tx_intr_details,
5695 .actions = NULL,
5696 };
5697 static const struct intr_details mps_stat_rx_intr_details[] = {
5698 { 0xffffff, "MPS statistics Rx FIFO parity error" },
5699 { 0 }
5700 };
5701 static const struct intr_info mps_stat_rx_intr_info = {
5702 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO",
5703 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5704 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO,
5705 .fatal = 0xffffff,
5706 .flags = 0,
5707 .details = mps_stat_rx_intr_details,
5708 .actions = NULL,
5709 };
5710 static const struct intr_details mps_cls_intr_details[] = {
5711 { F_HASHSRAM, "MPS hash SRAM parity error" },
5712 { F_MATCHTCAM, "MPS match TCAM parity error" },
5713 { F_MATCHSRAM, "MPS match SRAM parity error" },
5714 { 0 }
5715 };
5716 static const struct intr_info mps_cls_intr_info = {
5717 .name = "MPS_CLS_INT_CAUSE",
5718 .cause_reg = A_MPS_CLS_INT_CAUSE,
5719 .enable_reg = A_MPS_CLS_INT_ENABLE,
5720 .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM,
5721 .flags = 0,
5722 .details = mps_cls_intr_details,
5723 .actions = NULL,
5724 };
5725 static const struct intr_details mps_stat_sram1_intr_details[] = {
5726 { 0xff, "MPS statistics SRAM1 parity error" },
5727 { 0 }
5728 };
5729 static const struct intr_info mps_stat_sram1_intr_info = {
5730 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1",
5731 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1,
5732 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1,
5733 .fatal = 0xff,
5734 .flags = 0,
5735 .details = mps_stat_sram1_intr_details,
5736 .actions = NULL,
5737 };
5738
5739 bool fatal;
5740
5741 fatal = false;
5742 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
5743 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
5744 if (chip_id(adap) > CHELSIO_T6)
5745 fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
5746 else
5747 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
5748 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
5749 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
5750 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
5751 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
5752 if (chip_id(adap) > CHELSIO_T4) {
5753 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
5754 verbose);
5755 }
5756
5757 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
5758 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */
5759
5760 return (fatal);
5761
5762 }
5763
5764 /*
5765 * EDC/MC interrupt handler.
5766 */
mem_intr_handler(struct adapter * adap,int idx,bool verbose)5767 static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
5768 {
5769 static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
5770 unsigned int count_reg, v;
5771 static const struct intr_details mem_intr_details[] = {
5772 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
5773 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
5774 { F_PERR_INT_CAUSE, "FIFO parity error" },
5775 { 0 }
5776 };
5777 struct intr_info ii = {
5778 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
5779 .details = mem_intr_details,
5780 .flags = 0,
5781 .actions = NULL,
5782 };
5783 bool fatal;
5784
5785 switch (idx) {
5786 case MEM_EDC0:
5787 ii.name = "EDC0_INT_CAUSE";
5788 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
5789 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
5790 count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
5791 break;
5792 case MEM_EDC1:
5793 ii.name = "EDC1_INT_CAUSE";
5794 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
5795 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
5796 count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
5797 break;
5798 case MEM_MC0:
5799 ii.name = "MC0_INT_CAUSE";
5800 if (is_t4(adap)) {
5801 ii.cause_reg = A_MC_INT_CAUSE;
5802 ii.enable_reg = A_MC_INT_ENABLE;
5803 count_reg = A_MC_ECC_STATUS;
5804 } else {
5805 ii.cause_reg = A_MC_P_INT_CAUSE;
5806 ii.enable_reg = A_MC_P_INT_ENABLE;
5807 count_reg = A_MC_P_ECC_STATUS;
5808 }
5809 break;
5810 case MEM_MC1:
5811 ii.name = "MC1_INT_CAUSE";
5812 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
5813 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
5814 count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
5815 break;
5816 }
5817
5818 fatal = t4_handle_intr(adap, &ii, 0, verbose);
5819
5820 v = t4_read_reg(adap, count_reg);
5821 if (v != 0) {
5822 if (G_ECC_UECNT(v) != 0) {
5823 CH_ALERT(adap,
5824 "%s: %u uncorrectable ECC data error(s)\n",
5825 name[idx], G_ECC_UECNT(v));
5826 }
5827 if (G_ECC_CECNT(v) != 0) {
5828 if (idx <= MEM_EDC1)
5829 t4_edc_err_read(adap, idx);
5830 CH_WARN_RATELIMIT(adap,
5831 "%s: %u correctable ECC data error(s)\n",
5832 name[idx], G_ECC_CECNT(v));
5833 }
5834 t4_write_reg(adap, count_reg, 0xffffffff);
5835 }
5836
5837 return (fatal);
5838 }
5839
ma_wrap_status(struct adapter * adap,int arg,bool verbose)5840 static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
5841 {
5842 u32 v;
5843
5844 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
5845 CH_ALERT(adap,
5846 "MA address wrap-around error by client %u to address %#x\n",
5847 G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
5848 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
5849
5850 return (false);
5851 }
5852
5853
5854 /*
5855 * MA interrupt handler.
5856 */
ma_intr_handler(struct adapter * adap,int arg,bool verbose)5857 static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
5858 {
5859 static const struct intr_action ma_intr_actions[] = {
5860 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
5861 { 0 },
5862 };
5863 static const struct intr_info ma_intr_info = {
5864 .name = "MA_INT_CAUSE",
5865 .cause_reg = A_MA_INT_CAUSE,
5866 .enable_reg = A_MA_INT_ENABLE,
5867 .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
5868 .flags = NONFATAL_IF_DISABLED,
5869 .details = NULL,
5870 .actions = ma_intr_actions,
5871 };
5872 static const struct intr_info ma_perr_status1 = {
5873 .name = "MA_PARITY_ERROR_STATUS1",
5874 .cause_reg = A_MA_PARITY_ERROR_STATUS1,
5875 .enable_reg = A_MA_PARITY_ERROR_ENABLE1,
5876 .fatal = 0xffffffff,
5877 .flags = 0,
5878 .details = NULL,
5879 .actions = NULL,
5880 };
5881 static const struct intr_info ma_perr_status2 = {
5882 .name = "MA_PARITY_ERROR_STATUS2",
5883 .cause_reg = A_MA_PARITY_ERROR_STATUS2,
5884 .enable_reg = A_MA_PARITY_ERROR_ENABLE2,
5885 .fatal = 0xffffffff,
5886 .flags = 0,
5887 .details = NULL,
5888 .actions = NULL,
5889 };
5890 bool fatal;
5891
5892 fatal = false;
5893 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
5894 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
5895 if (chip_id(adap) > CHELSIO_T4)
5896 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
5897
5898 return (fatal);
5899 }
5900
5901 /*
5902 * SMB interrupt handler.
5903 */
smb_intr_handler(struct adapter * adap,int arg,bool verbose)5904 static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
5905 {
5906 static const struct intr_details smb_intr_details[] = {
5907 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
5908 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
5909 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
5910 { 0 }
5911 };
5912 static const struct intr_info smb_intr_info = {
5913 .name = "SMB_INT_CAUSE",
5914 .cause_reg = A_SMB_INT_CAUSE,
5915 .enable_reg = A_SMB_INT_ENABLE,
5916 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
5917 .flags = 0,
5918 .details = smb_intr_details,
5919 .actions = NULL,
5920 };
5921
5922 return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
5923 }
5924
5925 /*
5926 * NC-SI interrupt handler.
5927 */
ncsi_intr_handler(struct adapter * adap,int arg,bool verbose)5928 static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
5929 {
5930 static const struct intr_details ncsi_intr_details[] = {
5931 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
5932 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
5933 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
5934 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
5935 { 0 }
5936 };
5937 static const struct intr_info ncsi_intr_info = {
5938 .name = "NCSI_INT_CAUSE",
5939 .cause_reg = A_NCSI_INT_CAUSE,
5940 .enable_reg = A_NCSI_INT_ENABLE,
5941 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
5942 F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
5943 .flags = 0,
5944 .details = ncsi_intr_details,
5945 .actions = NULL,
5946 };
5947
5948 return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
5949 }
5950
5951 /*
5952 * MAC interrupt handler.
5953 */
mac_intr_handler(struct adapter * adap,int port,bool verbose)5954 static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
5955 {
5956 static const struct intr_details mac_intr_details[] = {
5957 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
5958 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
5959 { 0 }
5960 };
5961 char name[32];
5962 struct intr_info ii;
5963 bool fatal = false;
5964
5965 if (is_t4(adap)) {
5966 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
5967 ii.name = &name[0];
5968 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5969 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN);
5970 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5971 ii.flags = 0;
5972 ii.details = mac_intr_details;
5973 ii.actions = NULL;
5974 } else if (chip_id(adap) < CHELSIO_T7) {
5975 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
5976 ii.name = &name[0];
5977 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5978 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN);
5979 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5980 ii.flags = 0;
5981 ii.details = mac_intr_details;
5982 ii.actions = NULL;
5983 } else {
5984 snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
5985 ii.name = &name[0];
5986 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
5987 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
5988 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5989 ii.flags = 0;
5990 ii.details = mac_intr_details;
5991 ii.actions = NULL;
5992 }
5993 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5994
5995 if (chip_id(adap) > CHELSIO_T6) {
5996 snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
5997 ii.name = &name[0];
5998 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
5999 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
6000 ii.fatal = 0;
6001 ii.flags = 0;
6002 ii.details = NULL;
6003 ii.actions = NULL;
6004 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
6005 } else if (chip_id(adap) >= CHELSIO_T5) {
6006 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
6007 ii.name = &name[0];
6008 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
6009 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
6010 ii.fatal = 0;
6011 ii.flags = 0;
6012 ii.details = NULL;
6013 ii.actions = NULL;
6014 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
6015 }
6016
6017 if (chip_id(adap) > CHELSIO_T6) {
6018 snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
6019 ii.name = &name[0];
6020 ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
6021 ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
6022 ii.fatal = 0;
6023 ii.flags = 0;
6024 ii.details = NULL;
6025 ii.actions = NULL;
6026 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
6027 } else if (is_t6(adap)) {
6028 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
6029 ii.name = &name[0];
6030 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
6031 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
6032 ii.fatal = 0;
6033 ii.flags = 0;
6034 ii.details = NULL;
6035 ii.actions = NULL;
6036 fatal |= t4_handle_intr(adap, &ii, 0, verbose);
6037 }
6038
6039 return (fatal);
6040 }
6041
pl_timeout_status(struct adapter * adap,int arg,bool verbose)6042 static bool pl_timeout_status(struct adapter *adap, int arg, bool verbose)
6043 {
6044
6045 CH_ALERT(adap, " PL_TIMEOUT_STATUS 0x%08x 0x%08x\n",
6046 t4_read_reg(adap, A_PL_TIMEOUT_STATUS0),
6047 t4_read_reg(adap, A_PL_TIMEOUT_STATUS1));
6048
6049 return (false);
6050 }
6051
plpl_intr_handler(struct adapter * adap,int arg,bool verbose)6052 static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
6053 {
6054 static const struct intr_action plpl_intr_actions[] = {
6055 { F_TIMEOUT, 0, pl_timeout_status },
6056 { 0 },
6057 };
6058 static const struct intr_details plpl_intr_details[] = {
6059 { F_PL_BUSPERR, "Bus parity error" },
6060 { F_FATALPERR, "Fatal parity error" },
6061 { F_INVALIDACCESS, "Global reserved memory access" },
6062 { F_TIMEOUT, "Bus timeout" },
6063 { F_PLERR, "Module reserved access" },
6064 { F_PERRVFID, "VFID_MAP parity error" },
6065 { 0 }
6066 };
6067 static const struct intr_info plpl_intr_info = {
6068 .name = "PL_PL_INT_CAUSE",
6069 .cause_reg = A_PL_PL_INT_CAUSE,
6070 .enable_reg = A_PL_PL_INT_ENABLE,
6071 .fatal = F_FATALPERR | F_PERRVFID,
6072 .flags = NONFATAL_IF_DISABLED,
6073 .details = plpl_intr_details,
6074 .actions = plpl_intr_actions,
6075 };
6076
6077 return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
6078 }
6079
6080 /**
6081 * t4_slow_intr_handler - control path interrupt handler
6082 * @adap: the adapter
6083 * @verbose: increased verbosity, for debug
6084 *
6085 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
6086 * The designation 'slow' is because it involves register reads, while
6087 * data interrupts typically don't involve any MMIOs.
6088 */
t4_slow_intr_handler(struct adapter * adap,bool verbose)6089 bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
6090 {
6091 static const struct intr_details pl_intr_details[] = {
6092 { F_MC1, "MC1" },
6093 { F_UART, "UART" },
6094 { F_ULP_TX, "ULP TX" },
6095 { F_SGE, "SGE" },
6096 { F_HMA, "HMA" },
6097 { F_CPL_SWITCH, "CPL Switch" },
6098 { F_ULP_RX, "ULP RX" },
6099 { F_PM_RX, "PM RX" },
6100 { F_PM_TX, "PM TX" },
6101 { F_MA, "MA" },
6102 { F_TP, "TP" },
6103 { F_LE, "LE" },
6104 { F_EDC1, "EDC1" },
6105 { F_EDC0, "EDC0" },
6106 { F_MC, "MC0" },
6107 { F_PCIE, "PCIE" },
6108 { F_PMU, "PMU" },
6109 { F_MAC3, "MAC3" },
6110 { F_MAC2, "MAC2" },
6111 { F_MAC1, "MAC1" },
6112 { F_MAC0, "MAC0" },
6113 { F_SMB, "SMB" },
6114 { F_SF, "SF" },
6115 { F_PL, "PL" },
6116 { F_NCSI, "NC-SI" },
6117 { F_MPS, "MPS" },
6118 { F_MI, "MI" },
6119 { F_DBG, "DBG" },
6120 { F_I2CM, "I2CM" },
6121 { F_CIM, "CIM" },
6122 { 0 }
6123 };
6124 static const struct intr_details t7_pl_intr_details[] = {
6125 { F_T7_MC1, "MC1" },
6126 { F_T7_ULP_TX, "ULP TX" },
6127 { F_T7_SGE, "SGE" },
6128 { F_T7_CPL_SWITCH, "CPL Switch" },
6129 { F_T7_ULP_RX, "ULP RX" },
6130 { F_T7_PM_RX, "PM RX" },
6131 { F_T7_PM_TX, "PM TX" },
6132 { F_T7_MA, "MA" },
6133 { F_T7_TP, "TP" },
6134 { F_T7_LE, "LE" },
6135 { F_T7_EDC1, "EDC1" },
6136 { F_T7_EDC0, "EDC0" },
6137 { F_T7_MC0, "MC0" },
6138 { F_T7_PCIE, "PCIE" },
6139 { F_MAC3, "MAC3" },
6140 { F_MAC2, "MAC2" },
6141 { F_MAC1, "MAC1" },
6142 { F_MAC0, "MAC0" },
6143 { F_SMB, "SMB" },
6144 { F_PL, "PL" },
6145 { F_NCSI, "NC-SI" },
6146 { F_MPS, "MPS" },
6147 { F_DBG, "DBG" },
6148 { F_I2CM, "I2CM" },
6149 { F_MI, "MI" },
6150 { F_CIM, "CIM" },
6151 { 0 }
6152 };
6153 struct intr_info pl_perr_cause = {
6154 .name = "PL_PERR_CAUSE",
6155 .cause_reg = A_PL_PERR_CAUSE,
6156 .enable_reg = A_PL_PERR_ENABLE,
6157 .fatal = 0xffffffff,
6158 .flags = NONFATAL_IF_DISABLED,
6159 .details = NULL,
6160 .actions = NULL,
6161 };
6162 static const struct intr_action pl_intr_action[] = {
6163 { F_MC1, MEM_MC1, mem_intr_handler },
6164 { F_ULP_TX, -1, ulptx_intr_handler },
6165 { F_SGE, -1, sge_intr_handler },
6166 { F_CPL_SWITCH, -1, cplsw_intr_handler },
6167 { F_ULP_RX, -1, ulprx_intr_handler },
6168 { F_PM_RX, -1, pmrx_intr_handler},
6169 { F_PM_TX, -1, pmtx_intr_handler},
6170 { F_MA, -1, ma_intr_handler },
6171 { F_TP, -1, tp_intr_handler },
6172 { F_LE, -1, le_intr_handler },
6173 { F_EDC1, MEM_EDC1, mem_intr_handler },
6174 { F_EDC0, MEM_EDC0, mem_intr_handler },
6175 { F_MC0, MEM_MC0, mem_intr_handler },
6176 { F_PCIE, -1, pcie_intr_handler },
6177 { F_MAC3, 3, mac_intr_handler},
6178 { F_MAC2, 2, mac_intr_handler},
6179 { F_MAC1, 1, mac_intr_handler},
6180 { F_MAC0, 0, mac_intr_handler},
6181 { F_SMB, -1, smb_intr_handler},
6182 { F_PL, -1, plpl_intr_handler },
6183 { F_NCSI, -1, ncsi_intr_handler},
6184 { F_MPS, -1, mps_intr_handler },
6185 { F_CIM, -1, cim_intr_handler },
6186 { 0 }
6187 };
6188 static const struct intr_action t7_pl_intr_action[] = {
6189 { F_T7_ULP_TX, -1, ulptx_intr_handler },
6190 { F_T7_SGE, -1, sge_intr_handler },
6191 { F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
6192 { F_T7_ULP_RX, -1, ulprx_intr_handler },
6193 { F_T7_PM_RX, -1, pmrx_intr_handler},
6194 { F_T7_PM_TX, -1, pmtx_intr_handler},
6195 { F_T7_MA, -1, ma_intr_handler },
6196 { F_T7_TP, -1, tp_intr_handler },
6197 { F_T7_LE, -1, le_intr_handler },
6198 { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
6199 { F_T7_EDC0, MEM_EDC0, mem_intr_handler },
6200 { F_T7_MC1, MEM_MC1, mem_intr_handler },
6201 { F_T7_MC0, MEM_MC0, mem_intr_handler },
6202 { F_T7_PCIE, -1, pcie_intr_handler },
6203 { F_MAC3, 3, mac_intr_handler},
6204 { F_MAC2, 2, mac_intr_handler},
6205 { F_MAC1, 1, mac_intr_handler},
6206 { F_MAC0, 0, mac_intr_handler},
6207 { F_SMB, -1, smb_intr_handler},
6208 { F_PL, -1, plpl_intr_handler },
6209 { F_NCSI, -1, ncsi_intr_handler},
6210 { F_MPS, -1, mps_intr_handler },
6211 { F_CIM, -1, cim_intr_handler },
6212 { 0 }
6213 };
6214 struct intr_info pl_intr_info = {
6215 .name = "PL_INT_CAUSE",
6216 .cause_reg = A_PL_INT_CAUSE,
6217 .enable_reg = A_PL_INT_ENABLE,
6218 .fatal = 0,
6219 .flags = 0,
6220 .details = NULL,
6221 .actions = NULL,
6222 };
6223 u32 perr;
6224
6225 if (chip_id(adap) >= CHELSIO_T7) {
6226 pl_perr_cause.details = t7_pl_intr_details;
6227 pl_intr_info.details = t7_pl_intr_details;
6228 pl_intr_info.actions = t7_pl_intr_action;
6229 } else {
6230 pl_perr_cause.details = pl_intr_details;
6231 pl_intr_info.details = pl_intr_details;
6232 pl_intr_info.actions = pl_intr_action;
6233 }
6234
6235 perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
6236 if (verbose || perr != 0) {
6237 t4_show_intr_info(adap, &pl_perr_cause, perr);
6238 if (perr != 0)
6239 t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
6240 if (verbose)
6241 perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
6242 }
6243
6244 return (t4_handle_intr(adap, &pl_intr_info, perr, verbose));
6245 }
6246
6247 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
6248
6249 /**
6250 * t4_intr_enable - enable interrupts
6251 * @adapter: the adapter whose interrupts should be enabled
6252 *
6253 * Enable PF-specific interrupts for the calling function and the top-level
6254 * interrupt concentrator for global interrupts. Interrupts are already
6255 * enabled at each module, here we just enable the roots of the interrupt
6256 * hierarchies.
6257 *
6258 * Note: this function should be called only when the driver manages
6259 * non PF-specific interrupts from the various HW modules. Only one PCI
6260 * function at a time should be doing this.
6261 */
t4_intr_enable(struct adapter * adap)6262 void t4_intr_enable(struct adapter *adap)
6263 {
6264 u32 mask, val;
6265
6266 if (chip_id(adap) <= CHELSIO_T5)
6267 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
6268 F_DBFIFO_LP_INT;
6269 else
6270 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
6271 val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
6272 F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
6273 F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
6274 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
6275 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
6276 mask = val;
6277 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
6278 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
6279 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
6280 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
6281 }
6282
6283 /**
6284 * t4_intr_disable - disable interrupts
6285 * @adap: the adapter whose interrupts should be disabled
6286 *
6287 * Disable interrupts. We only disable the top-level interrupt
6288 * concentrators. The caller must be a PCI function managing global
6289 * interrupts.
6290 */
t4_intr_disable(struct adapter * adap)6291 void t4_intr_disable(struct adapter *adap)
6292 {
6293
6294 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
6295 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
6296 }
6297
6298 /**
6299 * hash_mac_addr - return the hash value of a MAC address
6300 * @addr: the 48-bit Ethernet MAC address
6301 *
6302 * Hashes a MAC address according to the hash function used by HW inexact
6303 * (hash) address matching.
6304 */
hash_mac_addr(const u8 * addr)6305 static int hash_mac_addr(const u8 *addr)
6306 {
6307 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
6308 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
6309 a ^= b;
6310 a ^= (a >> 12);
6311 a ^= (a >> 6);
6312 return a & 0x3f;
6313 }
6314
6315 /**
6316 * t4_config_rss_range - configure a portion of the RSS mapping table
6317 * @adapter: the adapter
6318 * @mbox: mbox to use for the FW command
6319 * @viid: virtual interface whose RSS subtable is to be written
6320 * @start: start entry in the table to write
6321 * @n: how many table entries to write
6322 * @rspq: values for the "response queue" (Ingress Queue) lookup table
6323 * @nrspq: number of values in @rspq
6324 *
6325 * Programs the selected part of the VI's RSS mapping table with the
6326 * provided values. If @nrspq < @n the supplied values are used repeatedly
6327 * until the full table range is populated.
6328 *
6329 * The caller must ensure the values in @rspq are in the range allowed for
6330 * @viid.
6331 */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)6332 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
6333 int start, int n, const u16 *rspq, unsigned int nrspq)
6334 {
6335 int ret;
6336 const u16 *rsp = rspq;
6337 const u16 *rsp_end = rspq + nrspq;
6338 struct fw_rss_ind_tbl_cmd cmd;
6339
6340 memset(&cmd, 0, sizeof(cmd));
6341 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
6342 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6343 V_FW_RSS_IND_TBL_CMD_VIID(viid));
6344 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
6345
6346 /*
6347 * Each firmware RSS command can accommodate up to 32 RSS Ingress
6348 * Queue Identifiers. These Ingress Queue IDs are packed three to
6349 * a 32-bit word as 10-bit values with the upper remaining 2 bits
6350 * reserved.
6351 */
6352 while (n > 0) {
6353 int nq = min(n, 32);
6354 int nq_packed = 0;
6355 __be32 *qp = &cmd.iq0_to_iq2;
6356
6357 /*
6358 * Set up the firmware RSS command header to send the next
6359 * "nq" Ingress Queue IDs to the firmware.
6360 */
6361 cmd.niqid = cpu_to_be16(nq);
6362 cmd.startidx = cpu_to_be16(start);
6363
6364 /*
6365 * "nq" more done for the start of the next loop.
6366 */
6367 start += nq;
6368 n -= nq;
6369
6370 /*
6371 * While there are still Ingress Queue IDs to stuff into the
6372 * current firmware RSS command, retrieve them from the
6373 * Ingress Queue ID array and insert them into the command.
6374 */
6375 while (nq > 0) {
6376 /*
6377 * Grab up to the next 3 Ingress Queue IDs (wrapping
6378 * around the Ingress Queue ID array if necessary) and
6379 * insert them into the firmware RSS command at the
6380 * current 3-tuple position within the commad.
6381 */
6382 u16 qbuf[3];
6383 u16 *qbp = qbuf;
6384 int nqbuf = min(3, nq);
6385
6386 nq -= nqbuf;
6387 qbuf[0] = qbuf[1] = qbuf[2] = 0;
6388 while (nqbuf && nq_packed < 32) {
6389 nqbuf--;
6390 nq_packed++;
6391 *qbp++ = *rsp++;
6392 if (rsp >= rsp_end)
6393 rsp = rspq;
6394 }
6395 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
6396 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
6397 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
6398 }
6399
6400 /*
6401 * Send this portion of the RRS table update to the firmware;
6402 * bail out on any errors.
6403 */
6404 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
6405 if (ret)
6406 return ret;
6407 }
6408 return 0;
6409 }
6410
6411 /**
6412 * t4_config_glbl_rss - configure the global RSS mode
6413 * @adapter: the adapter
6414 * @mbox: mbox to use for the FW command
6415 * @mode: global RSS mode
6416 * @flags: mode-specific flags
6417 *
6418 * Sets the global RSS mode.
6419 */
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)6420 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
6421 unsigned int flags)
6422 {
6423 struct fw_rss_glb_config_cmd c;
6424
6425 memset(&c, 0, sizeof(c));
6426 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
6427 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
6428 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6429 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
6430 c.u.manual.mode_pkd =
6431 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
6432 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
6433 c.u.basicvirtual.mode_keymode =
6434 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
6435 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
6436 } else
6437 return -EINVAL;
6438 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
6439 }
6440
6441 /**
6442 * t4_config_vi_rss - configure per VI RSS settings
6443 * @adapter: the adapter
6444 * @mbox: mbox to use for the FW command
6445 * @viid: the VI id
6446 * @flags: RSS flags
6447 * @defq: id of the default RSS queue for the VI.
6448 * @skeyidx: RSS secret key table index for non-global mode
6449 * @skey: RSS vf_scramble key for VI.
6450 *
6451 * Configures VI-specific RSS properties.
6452 */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq,unsigned int skeyidx,unsigned int skey)6453 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
6454 unsigned int flags, unsigned int defq, unsigned int skeyidx,
6455 unsigned int skey)
6456 {
6457 struct fw_rss_vi_config_cmd c;
6458
6459 memset(&c, 0, sizeof(c));
6460 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
6461 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6462 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
6463 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6464 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
6465 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
6466 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
6467 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
6468 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
6469
6470 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
6471 }
6472
6473 /* Read an RSS table row */
rd_rss_row(struct adapter * adap,int row,u32 * val)6474 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
6475 {
6476 if (chip_id(adap) < CHELSIO_T7) {
6477 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
6478 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE,
6479 F_LKPTBLROWVLD, 1, 5, 0, val);
6480 } else {
6481 t4_write_reg(adap, A_TP_RSS_CONFIG_SRAM, 0xB0000 | row);
6482 return t7_wait_sram_done(adap, A_TP_RSS_CONFIG_SRAM,
6483 A_TP_RSS_LKP_TABLE, 5, 0, val);
6484 }
6485 }
6486
6487 /**
6488 * t4_read_rss - read the contents of the RSS mapping table
6489 * @adapter: the adapter
6490 * @map: holds the contents of the RSS mapping table
6491 *
6492 * Reads the contents of the RSS hash->queue mapping table.
6493 */
t4_read_rss(struct adapter * adapter,u16 * map)6494 int t4_read_rss(struct adapter *adapter, u16 *map)
6495 {
6496 u32 val;
6497 int i, ret;
6498 int rss_nentries = adapter->chip_params->rss_nentries;
6499
6500 for (i = 0; i < rss_nentries / 2; ++i) {
6501 ret = rd_rss_row(adapter, i, &val);
6502 if (ret)
6503 return ret;
6504 *map++ = G_LKPTBLQUEUE0(val);
6505 *map++ = G_LKPTBLQUEUE1(val);
6506 }
6507 return 0;
6508 }
6509
6510 /**
6511 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
6512 * @adap: the adapter
6513 * @cmd: TP fw ldst address space type
6514 * @vals: where the indirect register values are stored/written
6515 * @nregs: how many indirect registers to read/write
6516 * @start_idx: index of first indirect register to read/write
6517 * @rw: Read (1) or Write (0)
6518 * @sleep_ok: if true we may sleep while awaiting command completion
6519 *
6520 * Access TP indirect registers through LDST
6521 **/
t4_tp_fw_ldst_rw(struct adapter * adap,int cmd,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw,bool sleep_ok)6522 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
6523 unsigned int nregs, unsigned int start_index,
6524 unsigned int rw, bool sleep_ok)
6525 {
6526 int ret = 0;
6527 unsigned int i;
6528 struct fw_ldst_cmd c;
6529
6530 for (i = 0; i < nregs; i++) {
6531 memset(&c, 0, sizeof(c));
6532 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6533 F_FW_CMD_REQUEST |
6534 (rw ? F_FW_CMD_READ :
6535 F_FW_CMD_WRITE) |
6536 V_FW_LDST_CMD_ADDRSPACE(cmd));
6537 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6538
6539 c.u.addrval.addr = cpu_to_be32(start_index + i);
6540 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
6541 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
6542 sleep_ok);
6543 if (ret)
6544 return ret;
6545
6546 if (rw)
6547 vals[i] = be32_to_cpu(c.u.addrval.val);
6548 }
6549 return 0;
6550 }
6551
6552 /**
6553 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
6554 * @adap: the adapter
6555 * @reg_addr: Address Register
6556 * @reg_data: Data register
6557 * @buff: where the indirect register values are stored/written
6558 * @nregs: how many indirect registers to read/write
6559 * @start_index: index of first indirect register to read/write
6560 * @rw: READ(1) or WRITE(0)
6561 * @sleep_ok: if true we may sleep while awaiting command completion
6562 *
6563 * Read/Write TP indirect registers through LDST if possible.
6564 * Else, use backdoor access
6565 **/
t4_tp_indirect_rw(struct adapter * adap,u32 reg_addr,u32 reg_data,u32 * buff,u32 nregs,u32 start_index,int rw,bool sleep_ok)6566 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
6567 u32 *buff, u32 nregs, u32 start_index, int rw,
6568 bool sleep_ok)
6569 {
6570 int rc = -EINVAL;
6571 int cmd;
6572
6573 switch (reg_addr) {
6574 case A_TP_PIO_ADDR:
6575 cmd = FW_LDST_ADDRSPC_TP_PIO;
6576 break;
6577 case A_TP_TM_PIO_ADDR:
6578 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
6579 break;
6580 case A_TP_MIB_INDEX:
6581 cmd = FW_LDST_ADDRSPC_TP_MIB;
6582 break;
6583 default:
6584 goto indirect_access;
6585 }
6586
6587 if (t4_use_ldst(adap))
6588 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
6589 sleep_ok);
6590
6591 indirect_access:
6592
6593 if (rc) {
6594 if (rw)
6595 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
6596 start_index);
6597 else
6598 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
6599 start_index);
6600 }
6601 }
6602
6603 /**
6604 * t4_tp_pio_read - Read TP PIO registers
6605 * @adap: the adapter
6606 * @buff: where the indirect register values are written
6607 * @nregs: how many indirect registers to read
6608 * @start_index: index of first indirect register to read
6609 * @sleep_ok: if true we may sleep while awaiting command completion
6610 *
6611 * Read TP PIO Registers
6612 **/
t4_tp_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)6613 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
6614 u32 start_index, bool sleep_ok)
6615 {
6616 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
6617 start_index, 1, sleep_ok);
6618 }
6619
6620 /**
6621 * t4_tp_pio_write - Write TP PIO registers
6622 * @adap: the adapter
6623 * @buff: where the indirect register values are stored
6624 * @nregs: how many indirect registers to write
6625 * @start_index: index of first indirect register to write
6626 * @sleep_ok: if true we may sleep while awaiting command completion
6627 *
6628 * Write TP PIO Registers
6629 **/
t4_tp_pio_write(struct adapter * adap,const u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)6630 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
6631 u32 start_index, bool sleep_ok)
6632 {
6633 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
6634 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
6635 }
6636
6637 /**
6638 * t4_tp_tm_pio_read - Read TP TM PIO registers
6639 * @adap: the adapter
6640 * @buff: where the indirect register values are written
6641 * @nregs: how many indirect registers to read
6642 * @start_index: index of first indirect register to read
6643 * @sleep_ok: if true we may sleep while awaiting command completion
6644 *
6645 * Read TP TM PIO Registers
6646 **/
t4_tp_tm_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)6647 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
6648 u32 start_index, bool sleep_ok)
6649 {
6650 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
6651 nregs, start_index, 1, sleep_ok);
6652 }
6653
6654 /**
6655 * t4_tp_mib_read - Read TP MIB registers
6656 * @adap: the adapter
6657 * @buff: where the indirect register values are written
6658 * @nregs: how many indirect registers to read
6659 * @start_index: index of first indirect register to read
6660 * @sleep_ok: if true we may sleep while awaiting command completion
6661 *
6662 * Read TP MIB Registers
6663 **/
t4_tp_mib_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)6664 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
6665 bool sleep_ok)
6666 {
6667 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
6668 start_index, 1, sleep_ok);
6669 }
6670
6671 /**
6672 * t4_read_rss_key - read the global RSS key
6673 * @adap: the adapter
6674 * @key: 10-entry array holding the 320-bit RSS key
6675 * @sleep_ok: if true we may sleep while awaiting command completion
6676 *
6677 * Reads the global 320-bit RSS key.
6678 */
t4_read_rss_key(struct adapter * adap,u32 * key,bool sleep_ok)6679 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
6680 {
6681 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
6682 }
6683
6684 /**
6685 * t4_write_rss_key - program one of the RSS keys
6686 * @adap: the adapter
6687 * @key: 10-entry array holding the 320-bit RSS key
6688 * @idx: which RSS key to write
6689 * @sleep_ok: if true we may sleep while awaiting command completion
6690 *
6691 * Writes one of the RSS keys with the given 320-bit value. If @idx is
6692 * 0..15 the corresponding entry in the RSS key table is written,
6693 * otherwise the global RSS key is written.
6694 */
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx,bool sleep_ok)6695 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
6696 bool sleep_ok)
6697 {
6698 u8 rss_key_addr_cnt = 16;
6699 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
6700
6701 /*
6702 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
6703 * allows access to key addresses 16-63 by using KeyWrAddrX
6704 * as index[5:4](upper 2) into key table
6705 */
6706 if ((chip_id(adap) > CHELSIO_T5) &&
6707 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
6708 rss_key_addr_cnt = 32;
6709
6710 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
6711
6712 if (idx >= 0 && idx < rss_key_addr_cnt) {
6713 if (rss_key_addr_cnt > 16)
6714 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
6715 vrt | V_KEYWRADDRX(idx >> 4) |
6716 V_T6_VFWRADDR(idx) | F_KEYWREN);
6717 else
6718 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
6719 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
6720 }
6721 }
6722
6723 /**
6724 * t4_read_rss_pf_config - read PF RSS Configuration Table
6725 * @adapter: the adapter
6726 * @index: the entry in the PF RSS table to read
6727 * @valp: where to store the returned value
6728 * @sleep_ok: if true we may sleep while awaiting command completion
6729 *
6730 * Reads the PF RSS Configuration Table at the specified index and returns
6731 * the value found there.
6732 */
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp,bool sleep_ok)6733 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
6734 u32 *valp, bool sleep_ok)
6735 {
6736 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
6737 }
6738
6739 /**
6740 * t4_write_rss_pf_config - write PF RSS Configuration Table
6741 * @adapter: the adapter
6742 * @index: the entry in the VF RSS table to read
6743 * @val: the value to store
6744 * @sleep_ok: if true we may sleep while awaiting command completion
6745 *
6746 * Writes the PF RSS Configuration Table at the specified index with the
6747 * specified value.
6748 */
t4_write_rss_pf_config(struct adapter * adapter,unsigned int index,u32 val,bool sleep_ok)6749 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
6750 u32 val, bool sleep_ok)
6751 {
6752 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
6753 sleep_ok);
6754 }
6755
6756 /**
6757 * t4_read_rss_vf_config - read VF RSS Configuration Table
6758 * @adapter: the adapter
6759 * @index: the entry in the VF RSS table to read
6760 * @vfl: where to store the returned VFL
6761 * @vfh: where to store the returned VFH
6762 * @sleep_ok: if true we may sleep while awaiting command completion
6763 *
6764 * Reads the VF RSS Configuration Table at the specified index and returns
6765 * the (VFL, VFH) values found there.
6766 */
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh,bool sleep_ok)6767 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
6768 u32 *vfl, u32 *vfh, bool sleep_ok)
6769 {
6770 u32 vrt, mask, data;
6771
6772 if (chip_id(adapter) <= CHELSIO_T5) {
6773 mask = V_VFWRADDR(M_VFWRADDR);
6774 data = V_VFWRADDR(index);
6775 } else {
6776 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
6777 data = V_T6_VFWRADDR(index);
6778 }
6779 /*
6780 * Request that the index'th VF Table values be read into VFL/VFH.
6781 */
6782 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
6783 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
6784 vrt |= data | F_VFRDEN;
6785 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
6786
6787 /*
6788 * Grab the VFL/VFH values ...
6789 */
6790 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
6791 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
6792 }
6793
6794 /**
6795 * t4_write_rss_vf_config - write VF RSS Configuration Table
6796 *
6797 * @adapter: the adapter
6798 * @index: the entry in the VF RSS table to write
6799 * @vfl: the VFL to store
6800 * @vfh: the VFH to store
6801 *
6802 * Writes the VF RSS Configuration Table at the specified index with the
6803 * specified (VFL, VFH) values.
6804 */
t4_write_rss_vf_config(struct adapter * adapter,unsigned int index,u32 vfl,u32 vfh,bool sleep_ok)6805 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
6806 u32 vfl, u32 vfh, bool sleep_ok)
6807 {
6808 u32 vrt, mask, data;
6809
6810 if (chip_id(adapter) <= CHELSIO_T5) {
6811 mask = V_VFWRADDR(M_VFWRADDR);
6812 data = V_VFWRADDR(index);
6813 } else {
6814 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
6815 data = V_T6_VFWRADDR(index);
6816 }
6817
6818 /*
6819 * Load up VFL/VFH with the values to be written ...
6820 */
6821 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
6822 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
6823
6824 /*
6825 * Write the VFL/VFH into the VF Table at index'th location.
6826 */
6827 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
6828 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
6829 vrt |= data | F_VFRDEN;
6830 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
6831 }
6832
6833 /**
6834 * t4_read_rss_pf_map - read PF RSS Map
6835 * @adapter: the adapter
6836 * @sleep_ok: if true we may sleep while awaiting command completion
6837 *
6838 * Reads the PF RSS Map register and returns its value.
6839 */
t4_read_rss_pf_map(struct adapter * adapter,bool sleep_ok)6840 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
6841 {
6842 u32 pfmap;
6843
6844 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6845
6846 return pfmap;
6847 }
6848
6849 /**
6850 * t4_write_rss_pf_map - write PF RSS Map
6851 * @adapter: the adapter
6852 * @pfmap: PF RSS Map value
6853 *
6854 * Writes the specified value to the PF RSS Map register.
6855 */
t4_write_rss_pf_map(struct adapter * adapter,u32 pfmap,bool sleep_ok)6856 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
6857 {
6858 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6859 }
6860
6861 /**
6862 * t4_read_rss_pf_mask - read PF RSS Mask
6863 * @adapter: the adapter
6864 * @sleep_ok: if true we may sleep while awaiting command completion
6865 *
6866 * Reads the PF RSS Mask register and returns its value.
6867 */
t4_read_rss_pf_mask(struct adapter * adapter,bool sleep_ok)6868 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
6869 {
6870 u32 pfmask;
6871
6872 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6873
6874 return pfmask;
6875 }
6876
6877 /**
6878 * t4_write_rss_pf_mask - write PF RSS Mask
6879 * @adapter: the adapter
6880 * @pfmask: PF RSS Mask value
6881 *
6882 * Writes the specified value to the PF RSS Mask register.
6883 */
t4_write_rss_pf_mask(struct adapter * adapter,u32 pfmask,bool sleep_ok)6884 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
6885 {
6886 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6887 }
6888
6889 /**
6890 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
6891 * @adap: the adapter
6892 * @v4: holds the TCP/IP counter values
6893 * @v6: holds the TCP/IPv6 counter values
6894 * @sleep_ok: if true we may sleep while awaiting command completion
6895 *
6896 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6897 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6898 */
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6,bool sleep_ok)6899 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6900 struct tp_tcp_stats *v6, bool sleep_ok)
6901 {
6902 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6903
6904 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6905 #define STAT(x) val[STAT_IDX(x)]
6906 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6907
6908 if (v4) {
6909 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6910 A_TP_MIB_TCP_OUT_RST, sleep_ok);
6911 v4->tcp_out_rsts = STAT(OUT_RST);
6912 v4->tcp_in_segs = STAT64(IN_SEG);
6913 v4->tcp_out_segs = STAT64(OUT_SEG);
6914 v4->tcp_retrans_segs = STAT64(RXT_SEG);
6915 }
6916 if (v6) {
6917 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6918 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6919 v6->tcp_out_rsts = STAT(OUT_RST);
6920 v6->tcp_in_segs = STAT64(IN_SEG);
6921 v6->tcp_out_segs = STAT64(OUT_SEG);
6922 v6->tcp_retrans_segs = STAT64(RXT_SEG);
6923 }
6924 #undef STAT64
6925 #undef STAT
6926 #undef STAT_IDX
6927 }
6928
6929 /**
6930 * t4_tp_get_err_stats - read TP's error MIB counters
6931 * @adap: the adapter
6932 * @st: holds the counter values
6933 * @sleep_ok: if true we may sleep while awaiting command completion
6934 *
6935 * Returns the values of TP's error counters.
6936 */
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st,bool sleep_ok)6937 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6938 bool sleep_ok)
6939 {
6940 int nchan = adap->chip_params->nchan;
6941
6942 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6943 sleep_ok);
6944
6945 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6946 sleep_ok);
6947
6948 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6949 sleep_ok);
6950
6951 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6952 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6953
6954 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6955 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6956
6957 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6958 sleep_ok);
6959
6960 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6961 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6962
6963 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6964 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6965
6966 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6967 sleep_ok);
6968 }
6969
6970 /**
6971 * t4_tp_get_err_stats - read TP's error MIB counters
6972 * @adap: the adapter
6973 * @st: holds the counter values
6974 * @sleep_ok: if true we may sleep while awaiting command completion
6975 *
6976 * Returns the values of TP's error counters.
6977 */
t4_tp_get_tnl_stats(struct adapter * adap,struct tp_tnl_stats * st,bool sleep_ok)6978 void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st,
6979 bool sleep_ok)
6980 {
6981 int nchan = adap->chip_params->nchan;
6982
6983 t4_tp_mib_read(adap, st->out_pkt, nchan, A_TP_MIB_TNL_OUT_PKT_0,
6984 sleep_ok);
6985 t4_tp_mib_read(adap, st->in_pkt, nchan, A_TP_MIB_TNL_IN_PKT_0,
6986 sleep_ok);
6987 }
6988
6989 /**
6990 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
6991 * @adap: the adapter
6992 * @st: holds the counter values
6993 *
6994 * Returns the values of TP's proxy counters.
6995 */
t4_tp_get_proxy_stats(struct adapter * adap,struct tp_proxy_stats * st,bool sleep_ok)6996 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
6997 bool sleep_ok)
6998 {
6999 int nchan = adap->chip_params->nchan;
7000
7001 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
7002 }
7003
7004 /**
7005 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
7006 * @adap: the adapter
7007 * @st: holds the counter values
7008 * @sleep_ok: if true we may sleep while awaiting command completion
7009 *
7010 * Returns the values of TP's CPL counters.
7011 */
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st,bool sleep_ok)7012 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
7013 bool sleep_ok)
7014 {
7015 int nchan = adap->chip_params->nchan;
7016
7017 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
7018
7019 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
7020 }
7021
7022 /**
7023 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
7024 * @adap: the adapter
7025 * @st: holds the counter values
7026 *
7027 * Returns the values of TP's RDMA counters.
7028 */
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st,bool sleep_ok)7029 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
7030 bool sleep_ok)
7031 {
7032 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
7033 sleep_ok);
7034
7035 if (chip_id(adap) >= CHELSIO_T7)
7036 /* read RDMA stats IN and OUT for all ports at once */
7037 t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0,
7038 sleep_ok);
7039 }
7040
7041 /**
7042 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
7043 * @adap: the adapter
7044 * @idx: the port index
7045 * @st: holds the counter values
7046 * @sleep_ok: if true we may sleep while awaiting command completion
7047 *
7048 * Returns the values of TP's FCoE counters for the selected port.
7049 */
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st,bool sleep_ok)7050 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
7051 struct tp_fcoe_stats *st, bool sleep_ok)
7052 {
7053 u32 val[2];
7054
7055 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
7056 sleep_ok);
7057
7058 t4_tp_mib_read(adap, &st->frames_drop, 1,
7059 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
7060
7061 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
7062 sleep_ok);
7063
7064 st->octets_ddp = ((u64)val[0] << 32) | val[1];
7065 }
7066
7067 /**
7068 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
7069 * @adap: the adapter
7070 * @st: holds the counter values
7071 * @sleep_ok: if true we may sleep while awaiting command completion
7072 *
7073 * Returns the values of TP's counters for non-TCP directly-placed packets.
7074 */
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st,bool sleep_ok)7075 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
7076 bool sleep_ok)
7077 {
7078 u32 val[4];
7079
7080 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
7081
7082 st->frames = val[0];
7083 st->drops = val[1];
7084 st->octets = ((u64)val[2] << 32) | val[3];
7085 }
7086
7087 /**
7088 * t4_tp_get_tid_stats - read TP's tid MIB counters.
7089 * @adap: the adapter
7090 * @st: holds the counter values
7091 * @sleep_ok: if true we may sleep while awaiting command completion
7092 *
7093 * Returns the values of TP's counters for tids.
7094 */
t4_tp_get_tid_stats(struct adapter * adap,struct tp_tid_stats * st,bool sleep_ok)7095 void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st,
7096 bool sleep_ok)
7097 {
7098
7099 t4_tp_mib_read(adap, &st->del, 4, A_TP_MIB_TID_DEL, sleep_ok);
7100 }
7101
7102 /**
7103 * t4_read_mtu_tbl - returns the values in the HW path MTU table
7104 * @adap: the adapter
7105 * @mtus: where to store the MTU values
7106 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
7107 *
7108 * Reads the HW path MTU table.
7109 */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)7110 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
7111 {
7112 u32 v;
7113 int i;
7114
7115 for (i = 0; i < NMTUS; ++i) {
7116 t4_write_reg(adap, A_TP_MTU_TABLE,
7117 V_MTUINDEX(0xff) | V_MTUVALUE(i));
7118 v = t4_read_reg(adap, A_TP_MTU_TABLE);
7119 mtus[i] = G_MTUVALUE(v);
7120 if (mtu_log)
7121 mtu_log[i] = G_MTUWIDTH(v);
7122 }
7123 }
7124
7125 /**
7126 * t4_read_cong_tbl - reads the congestion control table
7127 * @adap: the adapter
7128 * @incr: where to store the alpha values
7129 *
7130 * Reads the additive increments programmed into the HW congestion
7131 * control table.
7132 */
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])7133 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
7134 {
7135 unsigned int mtu, w;
7136
7137 for (mtu = 0; mtu < NMTUS; ++mtu)
7138 for (w = 0; w < NCCTRL_WIN; ++w) {
7139 t4_write_reg(adap, A_TP_CCTRL_TABLE,
7140 V_ROWINDEX(0xffff) | (mtu << 5) | w);
7141 incr[mtu][w] = (u16)t4_read_reg(adap,
7142 A_TP_CCTRL_TABLE) & 0x1fff;
7143 }
7144 }
7145
7146 /**
7147 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
7148 * @adap: the adapter
7149 * @addr: the indirect TP register address
7150 * @mask: specifies the field within the register to modify
7151 * @val: new value for the field
7152 *
7153 * Sets a field of an indirect TP register to the given value.
7154 */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)7155 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
7156 unsigned int mask, unsigned int val)
7157 {
7158 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
7159 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
7160 t4_write_reg(adap, A_TP_PIO_DATA, val);
7161 }
7162
7163 /**
7164 * init_cong_ctrl - initialize congestion control parameters
7165 * @a: the alpha values for congestion control
7166 * @b: the beta values for congestion control
7167 *
7168 * Initialize the congestion control parameters.
7169 */
init_cong_ctrl(unsigned short * a,unsigned short * b)7170 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
7171 {
7172 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
7173 a[9] = 2;
7174 a[10] = 3;
7175 a[11] = 4;
7176 a[12] = 5;
7177 a[13] = 6;
7178 a[14] = 7;
7179 a[15] = 8;
7180 a[16] = 9;
7181 a[17] = 10;
7182 a[18] = 14;
7183 a[19] = 17;
7184 a[20] = 21;
7185 a[21] = 25;
7186 a[22] = 30;
7187 a[23] = 35;
7188 a[24] = 45;
7189 a[25] = 60;
7190 a[26] = 80;
7191 a[27] = 100;
7192 a[28] = 200;
7193 a[29] = 300;
7194 a[30] = 400;
7195 a[31] = 500;
7196
7197 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
7198 b[9] = b[10] = 1;
7199 b[11] = b[12] = 2;
7200 b[13] = b[14] = b[15] = b[16] = 3;
7201 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
7202 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
7203 b[28] = b[29] = 6;
7204 b[30] = b[31] = 7;
7205 }
7206
7207 /* The minimum additive increment value for the congestion control table */
7208 #define CC_MIN_INCR 2U
7209
7210 /**
7211 * t4_load_mtus - write the MTU and congestion control HW tables
7212 * @adap: the adapter
7213 * @mtus: the values for the MTU table
7214 * @alpha: the values for the congestion control alpha parameter
7215 * @beta: the values for the congestion control beta parameter
7216 *
7217 * Write the HW MTU table with the supplied MTUs and the high-speed
7218 * congestion control table with the supplied alpha, beta, and MTUs.
7219 * We write the two tables together because the additive increments
7220 * depend on the MTUs.
7221 */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)7222 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
7223 const unsigned short *alpha, const unsigned short *beta)
7224 {
7225 static const unsigned int avg_pkts[NCCTRL_WIN] = {
7226 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
7227 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
7228 28672, 40960, 57344, 81920, 114688, 163840, 229376
7229 };
7230
7231 unsigned int i, w;
7232
7233 for (i = 0; i < NMTUS; ++i) {
7234 unsigned int mtu = mtus[i];
7235 unsigned int log2 = fls(mtu);
7236
7237 if (!(mtu & ((1 << log2) >> 2))) /* round */
7238 log2--;
7239 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
7240 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
7241
7242 for (w = 0; w < NCCTRL_WIN; ++w) {
7243 unsigned int inc;
7244
7245 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
7246 CC_MIN_INCR);
7247
7248 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
7249 (w << 16) | (beta[w] << 13) | inc);
7250 }
7251 }
7252 }
7253
7254 /**
7255 * t4_set_pace_tbl - set the pace table
7256 * @adap: the adapter
7257 * @pace_vals: the pace values in microseconds
7258 * @start: index of the first entry in the HW pace table to set
7259 * @n: how many entries to set
7260 *
7261 * Sets (a subset of the) HW pace table.
7262 */
t4_set_pace_tbl(struct adapter * adap,const unsigned int * pace_vals,unsigned int start,unsigned int n)7263 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
7264 unsigned int start, unsigned int n)
7265 {
7266 unsigned int vals[NTX_SCHED], i;
7267 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
7268
7269 if (n > NTX_SCHED)
7270 return -ERANGE;
7271
7272 /* convert values from us to dack ticks, rounding to closest value */
7273 for (i = 0; i < n; i++, pace_vals++) {
7274 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
7275 if (vals[i] > 0x7ff)
7276 return -ERANGE;
7277 if (*pace_vals && vals[i] == 0)
7278 return -ERANGE;
7279 }
7280 for (i = 0; i < n; i++, start++)
7281 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
7282 return 0;
7283 }
7284
7285 /**
7286 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
7287 * @adap: the adapter
7288 * @kbps: target rate in Kbps
7289 * @sched: the scheduler index
7290 *
7291 * Configure a Tx HW scheduler for the target rate.
7292 */
t4_set_sched_bps(struct adapter * adap,int sched,unsigned int kbps)7293 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
7294 {
7295 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
7296 unsigned int clk = adap->params.vpd.cclk * 1000;
7297 unsigned int selected_cpt = 0, selected_bpt = 0;
7298
7299 if (kbps > 0) {
7300 kbps *= 125; /* -> bytes */
7301 for (cpt = 1; cpt <= 255; cpt++) {
7302 tps = clk / cpt;
7303 bpt = (kbps + tps / 2) / tps;
7304 if (bpt > 0 && bpt <= 255) {
7305 v = bpt * tps;
7306 delta = v >= kbps ? v - kbps : kbps - v;
7307 if (delta < mindelta) {
7308 mindelta = delta;
7309 selected_cpt = cpt;
7310 selected_bpt = bpt;
7311 }
7312 } else if (selected_cpt)
7313 break;
7314 }
7315 if (!selected_cpt)
7316 return -EINVAL;
7317 }
7318 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
7319 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
7320 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
7321 if (sched & 1)
7322 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
7323 else
7324 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
7325 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
7326 return 0;
7327 }
7328
7329 /**
7330 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
7331 * @adap: the adapter
7332 * @sched: the scheduler index
7333 * @ipg: the interpacket delay in tenths of nanoseconds
7334 *
7335 * Set the interpacket delay for a HW packet rate scheduler.
7336 */
t4_set_sched_ipg(struct adapter * adap,int sched,unsigned int ipg)7337 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
7338 {
7339 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
7340
7341 /* convert ipg to nearest number of core clocks */
7342 ipg *= core_ticks_per_usec(adap);
7343 ipg = (ipg + 5000) / 10000;
7344 if (ipg > M_TXTIMERSEPQ0)
7345 return -EINVAL;
7346
7347 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
7348 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
7349 if (sched & 1)
7350 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
7351 else
7352 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
7353 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
7354 t4_read_reg(adap, A_TP_TM_PIO_DATA);
7355 return 0;
7356 }
7357
7358 /*
7359 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
7360 * clocks. The formula is
7361 *
7362 * bytes/s = bytes256 * 256 * ClkFreq / 4096
7363 *
7364 * which is equivalent to
7365 *
7366 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
7367 */
chan_rate(struct adapter * adap,unsigned int bytes256)7368 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
7369 {
7370 u64 v = (u64)bytes256 * adap->params.vpd.cclk;
7371
7372 return v * 62 + v / 2;
7373 }
7374
7375 /**
7376 * t4_get_chan_txrate - get the current per channel Tx rates
7377 * @adap: the adapter
7378 * @nic_rate: rates for NIC traffic
7379 * @ofld_rate: rates for offloaded traffic
7380 *
7381 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
7382 * for each channel.
7383 */
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)7384 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
7385 {
7386 u32 v;
7387
7388 v = t4_read_reg(adap, A_TP_TX_TRATE);
7389 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
7390 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
7391 if (adap->chip_params->nchan > 2) {
7392 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
7393 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
7394 }
7395
7396 v = t4_read_reg(adap, A_TP_TX_ORATE);
7397 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
7398 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
7399 if (adap->chip_params->nchan > 2) {
7400 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
7401 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
7402 }
7403 }
7404
7405 /**
7406 * t4_set_trace_filter - configure one of the tracing filters
7407 * @adap: the adapter
7408 * @tp: the desired trace filter parameters
7409 * @idx: which filter to configure
7410 * @enable: whether to enable or disable the filter
7411 *
7412 * Configures one of the tracing filters available in HW. If @tp is %NULL
7413 * it indicates that the filter is already written in the register and it
7414 * just needs to be enabled or disabled.
7415 */
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)7416 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
7417 int idx, int enable)
7418 {
7419 int i, ofst;
7420 u32 match_ctl_a, match_ctl_b;
7421 u32 data_reg, mask_reg, cfg;
7422 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
7423
7424 if (idx < 0 || idx >= NTRACE)
7425 return -EINVAL;
7426
7427 if (chip_id(adap) >= CHELSIO_T7) {
7428 match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx);
7429 match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx);
7430 } else {
7431 match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx);
7432 match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx);
7433 }
7434
7435 if (tp == NULL || !enable) {
7436 t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0);
7437 return 0;
7438 }
7439
7440 /*
7441 * TODO - After T4 data book is updated, specify the exact
7442 * section below.
7443 *
7444 * See T4 data book - MPS section for a complete description
7445 * of the below if..else handling of A_MPS_TRC_CFG register
7446 * value.
7447 */
7448 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
7449 if (cfg & F_TRCMULTIFILTER) {
7450 /*
7451 * If multiple tracers are enabled, then maximum
7452 * capture size is 2.5KB (FIFO size of a single channel)
7453 * minus 2 flits for CPL_TRACE_PKT header.
7454 */
7455 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
7456 return -EINVAL;
7457 } else {
7458 /*
7459 * If multiple tracers are disabled, to avoid deadlocks
7460 * maximum packet capture size of 9600 bytes is recommended.
7461 * Also in this mode, only trace0 can be enabled and running.
7462 */
7463 if (tp->snap_len > 9600 || idx)
7464 return -EINVAL;
7465 }
7466
7467 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
7468 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
7469 tp->min_len > M_TFMINPKTSIZE)
7470 return -EINVAL;
7471
7472 /* stop the tracer we'll be changing */
7473 t4_set_reg_field(adap, match_ctl_a, en, 0);
7474
7475 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
7476 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
7477 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
7478
7479 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
7480 t4_write_reg(adap, data_reg, tp->data[i]);
7481 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
7482 }
7483 t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) |
7484 V_TFMINPKTSIZE(tp->min_len));
7485 t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) |
7486 V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ?
7487 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
7488 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
7489
7490 return 0;
7491 }
7492
7493 /**
7494 * t4_get_trace_filter - query one of the tracing filters
7495 * @adap: the adapter
7496 * @tp: the current trace filter parameters
7497 * @idx: which trace filter to query
7498 * @enabled: non-zero if the filter is enabled
7499 *
7500 * Returns the current settings of one of the HW tracing filters.
7501 */
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)7502 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
7503 int *enabled)
7504 {
7505 u32 ctla, ctlb;
7506 int i, ofst;
7507 u32 data_reg, mask_reg;
7508
7509 if (chip_id(adap) >= CHELSIO_T7) {
7510 ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx));
7511 ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx));
7512 } else {
7513 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx));
7514 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx));
7515 }
7516
7517 if (is_t4(adap)) {
7518 *enabled = !!(ctla & F_TFEN);
7519 tp->port = G_TFPORT(ctla);
7520 tp->invert = !!(ctla & F_TFINVERTMATCH);
7521 } else {
7522 *enabled = !!(ctla & F_T5_TFEN);
7523 tp->port = G_T5_TFPORT(ctla);
7524 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
7525 }
7526 tp->snap_len = G_TFCAPTUREMAX(ctlb);
7527 tp->min_len = G_TFMINPKTSIZE(ctlb);
7528 tp->skip_ofst = G_TFOFFSET(ctla);
7529 tp->skip_len = G_TFLENGTH(ctla);
7530
7531 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
7532 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
7533 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
7534
7535 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
7536 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
7537 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
7538 }
7539 }
7540
7541 /**
7542 * t4_set_trace_rss_control - configure the trace rss control register
7543 * @adap: the adapter
7544 * @chan: the channel number for RSS control
7545 * @qid: queue number
7546 *
7547 * Configures the MPS tracing RSS control parameter for specified
7548 * @chan channel and @qid queue number.
7549 */
t4_set_trace_rss_control(struct adapter * adap,u8 chan,u16 qid)7550 void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid)
7551 {
7552 u32 mps_trc_rss_control;
7553
7554 switch (chip_id(adap)) {
7555 case CHELSIO_T4:
7556 mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL;
7557 break;
7558 case CHELSIO_T5:
7559 case CHELSIO_T6:
7560 mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL;
7561 break;
7562 case CHELSIO_T7:
7563 default:
7564 mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL;
7565 break;
7566 }
7567
7568 t4_write_reg(adap, mps_trc_rss_control,
7569 V_RSSCONTROL(chan) | V_QUEUENUMBER(qid));
7570 }
7571
7572 /**
7573 * t4_pmtx_get_stats - returns the HW stats from PMTX
7574 * @adap: the adapter
7575 * @cnt: where to store the count statistics
7576 * @cycles: where to store the cycle statistics
7577 *
7578 * Returns performance statistics from PMTX.
7579 */
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])7580 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
7581 {
7582 int i;
7583 u32 data[2];
7584
7585 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
7586 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
7587 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
7588 if (is_t4(adap))
7589 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
7590 else {
7591 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
7592 A_PM_TX_DBG_DATA, data, 2,
7593 chip_id(adap) >= CHELSIO_T7 ?
7594 A_T7_PM_TX_DBG_STAT_MSB :
7595 A_PM_TX_DBG_STAT_MSB);
7596 cycles[i] = (((u64)data[0] << 32) | data[1]);
7597 }
7598 }
7599 }
7600
7601 /**
7602 * t4_pmrx_get_stats - returns the HW stats from PMRX
7603 * @adap: the adapter
7604 * @cnt: where to store the count statistics
7605 * @cycles: where to store the cycle statistics
7606 *
7607 * Returns performance statistics from PMRX.
7608 */
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])7609 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
7610 {
7611 int i;
7612 u32 data[2];
7613
7614 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
7615 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
7616 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
7617 if (is_t4(adap)) {
7618 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
7619 } else {
7620 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
7621 A_PM_RX_DBG_DATA, data, 2,
7622 A_PM_RX_DBG_STAT_MSB);
7623 cycles[i] = (((u64)data[0] << 32) | data[1]);
7624 }
7625 }
7626 }
7627
7628 /**
7629 * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats
7630 * @adap: the adapter
7631 * @stats: where to store the statistics
7632 *
7633 * Returns performance statistics of PMRX cache.
7634 */
t4_pmrx_cache_get_stats(struct adapter * adap,u32 stats[])7635 void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[])
7636 {
7637 u8 i, j;
7638
7639 for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) {
7640 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i);
7641 stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
7642 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA,
7643 &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB);
7644 }
7645 }
7646
7647 /**
7648 * t4_get_mps_bg_map - return the buffer groups associated with a port
7649 * @adap: the adapter
7650 * @idx: the port index
7651 *
7652 * Returns a bitmap indicating which MPS buffer groups are associated
7653 * with the given port. Bit i is set if buffer group i is used by the
7654 * port.
7655 */
t4_get_mps_bg_map(struct adapter * adap,int idx)7656 static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
7657 {
7658 u32 n;
7659
7660 if (adap->params.mps_bg_map != UINT32_MAX)
7661 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
7662
7663 n = adap->params.nports;
7664 MPASS(n > 0 && n <= MAX_NPORTS);
7665 if (n == 1)
7666 return idx == 0 ? 0xf : 0;
7667 if (n == 2 && chip_id(adap) <= CHELSIO_T5)
7668 return idx < 2 ? (3 << (2 * idx)) : 0;
7669 return 1 << idx;
7670 }
7671
7672 /*
7673 * TP RX e-channels associated with the port.
7674 */
t4_get_rx_e_chan_map(struct adapter * adap,int idx)7675 static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
7676 {
7677 const u32 n = adap->params.nports;
7678 const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
7679
7680 switch (adap->params.tp.lb_mode) {
7681 case 0:
7682 if (n == 1)
7683 return (all_chan);
7684 if (n == 2 && chip_id(adap) <= CHELSIO_T5)
7685 return (3 << (2 * idx));
7686 return (1 << idx);
7687 case 1:
7688 MPASS(n == 1);
7689 return (all_chan);
7690 case 2:
7691 MPASS(n <= 2);
7692 return (3 << (2 * idx));
7693 default:
7694 CH_ERR(adap, "Unsupported LB mode %d\n",
7695 adap->params.tp.lb_mode);
7696 return (0);
7697 }
7698 }
7699
7700 /*
7701 * TP RX c-channel associated with the port.
7702 */
t4_get_rx_c_chan(struct adapter * adap,int idx)7703 static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
7704 {
7705 if (adap->params.tp_ch_map != UINT32_MAX)
7706 return (adap->params.tp_ch_map >> (8 * idx)) & 0xff;
7707 return 0;
7708 }
7709
7710 /*
7711 * TP TX c-channel associated with the port.
7712 */
t4_get_tx_c_chan(struct adapter * adap,int idx)7713 static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx)
7714 {
7715 if (adap->params.tx_tp_ch_map != UINT32_MAX)
7716 return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff;
7717 return idx;
7718 }
7719
7720 /**
7721 * t4_get_port_type_description - return Port Type string description
7722 * @port_type: firmware Port Type enumeration
7723 */
t4_get_port_type_description(enum fw_port_type port_type)7724 const char *t4_get_port_type_description(enum fw_port_type port_type)
7725 {
7726 static const char *const port_type_description[] = {
7727 "Fiber_XFI",
7728 "Fiber_XAUI",
7729 "BT_SGMII",
7730 "BT_XFI",
7731 "BT_XAUI",
7732 "KX4",
7733 "CX4",
7734 "KX",
7735 "KR",
7736 "SFP",
7737 "BP_AP",
7738 "BP4_AP",
7739 "QSFP_10G",
7740 "QSA",
7741 "QSFP",
7742 "BP40_BA",
7743 "KR4_100G",
7744 "CR4_QSFP",
7745 "CR_QSFP",
7746 "CR2_QSFP",
7747 "SFP28",
7748 "KR_SFP28",
7749 "KR_XLAUI",
7750 };
7751
7752 if (port_type < ARRAY_SIZE(port_type_description))
7753 return port_type_description[port_type];
7754 return "UNKNOWN";
7755 }
7756
7757 /**
7758 * t4_get_port_stats_offset - collect port stats relative to a previous
7759 * snapshot
7760 * @adap: The adapter
7761 * @idx: The port
7762 * @stats: Current stats to fill
7763 * @offset: Previous stats snapshot
7764 */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)7765 void t4_get_port_stats_offset(struct adapter *adap, int idx,
7766 struct port_stats *stats,
7767 struct port_stats *offset)
7768 {
7769 u64 *s, *o;
7770 int i;
7771
7772 t4_get_port_stats(adap, idx, stats);
7773 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
7774 i < (sizeof(struct port_stats)/sizeof(u64)) ;
7775 i++, s++, o++)
7776 *s -= *o;
7777 }
7778
7779 /**
7780 * t4_get_port_stats - collect port statistics
7781 * @adap: the adapter
7782 * @idx: the port index
7783 * @p: the stats structure to fill
7784 *
7785 * Collect statistics related to the given port from HW.
7786 */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)7787 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
7788 {
7789 struct port_info *pi;
7790 int port_id, tx_chan;
7791 u32 bgmap, stat_ctl;
7792
7793 port_id = adap->port_map[idx];
7794 MPASS(port_id >= 0 && port_id <= adap->params.nports);
7795 pi = adap->port[port_id];
7796
7797 #define GET_STAT(name) \
7798 t4_read_reg64(adap, \
7799 t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L));
7800 memset(p, 0, sizeof(*p));
7801 for (tx_chan = pi->tx_chan;
7802 tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
7803 p->tx_pause += GET_STAT(TX_PORT_PAUSE);
7804 p->tx_octets += GET_STAT(TX_PORT_BYTES);
7805 p->tx_frames += GET_STAT(TX_PORT_FRAMES);
7806 p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST);
7807 p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST);
7808 p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST);
7809 p->tx_error_frames += GET_STAT(TX_PORT_ERROR);
7810 p->tx_frames_64 += GET_STAT(TX_PORT_64B);
7811 p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B);
7812 p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B);
7813 p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B);
7814 p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B);
7815 p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B);
7816 p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX);
7817 p->tx_drop += GET_STAT(TX_PORT_DROP);
7818 p->tx_ppp0 += GET_STAT(TX_PORT_PPP0);
7819 p->tx_ppp1 += GET_STAT(TX_PORT_PPP1);
7820 p->tx_ppp2 += GET_STAT(TX_PORT_PPP2);
7821 p->tx_ppp3 += GET_STAT(TX_PORT_PPP3);
7822 p->tx_ppp4 += GET_STAT(TX_PORT_PPP4);
7823 p->tx_ppp5 += GET_STAT(TX_PORT_PPP5);
7824 p->tx_ppp6 += GET_STAT(TX_PORT_PPP6);
7825 p->tx_ppp7 += GET_STAT(TX_PORT_PPP7);
7826
7827 p->rx_pause += GET_STAT(RX_PORT_PAUSE);
7828 p->rx_octets += GET_STAT(RX_PORT_BYTES);
7829 p->rx_frames += GET_STAT(RX_PORT_FRAMES);
7830 p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST);
7831 p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST);
7832 p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST);
7833 p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR);
7834 p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR);
7835 p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR);
7836 p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR);
7837 p->rx_runt += GET_STAT(RX_PORT_LESS_64B);
7838 p->rx_frames_64 += GET_STAT(RX_PORT_64B);
7839 p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B);
7840 p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B);
7841 p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B);
7842 p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B);
7843 p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B);
7844 p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX);
7845 p->rx_ppp0 += GET_STAT(RX_PORT_PPP0);
7846 p->rx_ppp1 += GET_STAT(RX_PORT_PPP1);
7847 p->rx_ppp2 += GET_STAT(RX_PORT_PPP2);
7848 p->rx_ppp3 += GET_STAT(RX_PORT_PPP3);
7849 p->rx_ppp4 += GET_STAT(RX_PORT_PPP4);
7850 p->rx_ppp5 += GET_STAT(RX_PORT_PPP5);
7851 p->rx_ppp6 += GET_STAT(RX_PORT_PPP6);
7852 p->rx_ppp7 += GET_STAT(RX_PORT_PPP7);
7853 if (!is_t6(adap)) {
7854 MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
7855 p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR);
7856 }
7857 }
7858 #undef GET_STAT
7859
7860 if (is_t6(adap) && pi->fcs_reg != -1)
7861 p->rx_fcs_err = t4_read_reg64(adap,
7862 t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base;
7863
7864 if (chip_id(adap) >= CHELSIO_T5) {
7865 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
7866 if (stat_ctl & F_COUNTPAUSESTATTX) {
7867 p->tx_frames -= p->tx_pause;
7868 p->tx_octets -= p->tx_pause * 64;
7869 }
7870 if (stat_ctl & F_COUNTPAUSEMCTX)
7871 p->tx_mcast_frames -= p->tx_pause;
7872 if (stat_ctl & F_COUNTPAUSESTATRX) {
7873 p->rx_frames -= p->rx_pause;
7874 p->rx_octets -= p->rx_pause * 64;
7875 }
7876 if (stat_ctl & F_COUNTPAUSEMCRX)
7877 p->rx_mcast_frames -= p->rx_pause;
7878 }
7879
7880 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
7881 bgmap = pi->mps_bg_map;
7882 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
7883 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
7884 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
7885 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
7886 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
7887 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
7888 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
7889 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
7890 #undef GET_STAT_COM
7891 }
7892
7893 /**
7894 * t4_get_lb_stats - collect loopback port statistics
7895 * @adap: the adapter
7896 * @idx: the loopback port index
7897 * @p: the stats structure to fill
7898 *
7899 * Return HW statistics for the given loopback port.
7900 */
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)7901 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
7902 {
7903
7904 #define GET_STAT(name) \
7905 t4_read_reg64(adap, \
7906 t4_port_reg(adap, idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
7907 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
7908
7909 p->octets = GET_STAT(BYTES);
7910 p->frames = GET_STAT(FRAMES);
7911 p->bcast_frames = GET_STAT(BCAST);
7912 p->mcast_frames = GET_STAT(MCAST);
7913 p->ucast_frames = GET_STAT(UCAST);
7914 p->error_frames = GET_STAT(ERROR);
7915
7916 p->frames_64 = GET_STAT(64B);
7917 p->frames_65_127 = GET_STAT(65B_127B);
7918 p->frames_128_255 = GET_STAT(128B_255B);
7919 p->frames_256_511 = GET_STAT(256B_511B);
7920 p->frames_512_1023 = GET_STAT(512B_1023B);
7921 p->frames_1024_1518 = GET_STAT(1024B_1518B);
7922 p->frames_1519_max = GET_STAT(1519B_MAX);
7923 p->drop = GET_STAT(DROP_FRAMES);
7924
7925 if (idx < adap->params.nports) {
7926 u32 bg = adap2pinfo(adap, idx)->mps_bg_map;
7927
7928 p->ovflow0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
7929 p->ovflow1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
7930 p->ovflow2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
7931 p->ovflow3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
7932 p->trunc0 = (bg & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
7933 p->trunc1 = (bg & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
7934 p->trunc2 = (bg & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
7935 p->trunc3 = (bg & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
7936 }
7937
7938 #undef GET_STAT
7939 #undef GET_STAT_COM
7940 }
7941
7942 /**
7943 * t4_wol_magic_enable - enable/disable magic packet WoL
7944 * @adap: the adapter
7945 * @port: the physical port index
7946 * @addr: MAC address expected in magic packets, %NULL to disable
7947 *
7948 * Enables/disables magic packet wake-on-LAN for the selected port.
7949 */
t4_wol_magic_enable(struct adapter * adap,unsigned int port,const u8 * addr)7950 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
7951 const u8 *addr)
7952 {
7953 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
7954
7955 if (is_t4(adap)) {
7956 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
7957 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
7958 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
7959 } else if (chip_id(adap) < CHELSIO_T7) {
7960 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
7961 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
7962 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
7963 } else {
7964 mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO);
7965 mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI);
7966 port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
7967 }
7968
7969 if (addr) {
7970 t4_write_reg(adap, mag_id_reg_l,
7971 (addr[2] << 24) | (addr[3] << 16) |
7972 (addr[4] << 8) | addr[5]);
7973 t4_write_reg(adap, mag_id_reg_h,
7974 (addr[0] << 8) | addr[1]);
7975 }
7976 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
7977 V_MAGICEN(addr != NULL));
7978 }
7979
7980 /**
7981 * t4_wol_pat_enable - enable/disable pattern-based WoL
7982 * @adap: the adapter
7983 * @port: the physical port index
7984 * @map: bitmap of which HW pattern filters to set
7985 * @mask0: byte mask for bytes 0-63 of a packet
7986 * @mask1: byte mask for bytes 64-127 of a packet
7987 * @crc: Ethernet CRC for selected bytes
7988 * @enable: enable/disable switch
7989 *
7990 * Sets the pattern filters indicated in @map to mask out the bytes
7991 * specified in @mask0/@mask1 in received packets and compare the CRC of
7992 * the resulting packet against @crc. If @enable is %true pattern-based
7993 * WoL is enabled, otherwise disabled.
7994 */
t4_wol_pat_enable(struct adapter * adap,unsigned int port,unsigned int map,u64 mask0,u64 mask1,unsigned int crc,bool enable)7995 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
7996 u64 mask0, u64 mask1, unsigned int crc, bool enable)
7997 {
7998 int i;
7999 u32 port_cfg_reg;
8000
8001 if (is_t4(adap))
8002 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
8003 else if (chip_id(adap) < CHELSIO_T7)
8004 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
8005 else
8006 port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
8007
8008 if (!enable) {
8009 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
8010 return 0;
8011 }
8012 if (map > 0xff)
8013 return -EINVAL;
8014
8015 #define EPIO_REG(name) \
8016 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
8017 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
8018
8019 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
8020 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
8021 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
8022
8023 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
8024 if (!(map & 1))
8025 continue;
8026
8027 /* write byte masks */
8028 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
8029 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
8030 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
8031 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
8032 return -ETIMEDOUT;
8033
8034 /* write CRC */
8035 t4_write_reg(adap, EPIO_REG(DATA0), crc);
8036 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
8037 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
8038 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
8039 return -ETIMEDOUT;
8040 }
8041 #undef EPIO_REG
8042
8043 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
8044 return 0;
8045 }
8046
8047 /* t4_mk_filtdelwr - create a delete filter WR
8048 * @ftid: the filter ID
8049 * @wr: the filter work request to populate
8050 * @qid: ingress queue to receive the delete notification
8051 *
8052 * Creates a filter work request to delete the supplied filter. If @qid is
8053 * negative the delete notification is suppressed.
8054 */
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int qid)8055 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
8056 {
8057 memset(wr, 0, sizeof(*wr));
8058 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
8059 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
8060 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
8061 V_FW_FILTER_WR_NOREPLY(qid < 0));
8062 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
8063 if (qid >= 0)
8064 wr->rx_chan_rx_rpl_iq =
8065 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
8066 }
8067
8068 #define INIT_CMD(var, cmd, rd_wr) do { \
8069 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
8070 F_FW_CMD_REQUEST | \
8071 F_FW_CMD_##rd_wr); \
8072 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
8073 } while (0)
8074
t4_fwaddrspace_write(struct adapter * adap,unsigned int mbox,u32 addr,u32 val)8075 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
8076 u32 addr, u32 val)
8077 {
8078 u32 ldst_addrspace;
8079 struct fw_ldst_cmd c;
8080
8081 memset(&c, 0, sizeof(c));
8082 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
8083 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8084 F_FW_CMD_REQUEST |
8085 F_FW_CMD_WRITE |
8086 ldst_addrspace);
8087 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
8088 c.u.addrval.addr = cpu_to_be32(addr);
8089 c.u.addrval.val = cpu_to_be32(val);
8090
8091 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8092 }
8093
8094 /**
8095 * t4_mdio_rd - read a PHY register through MDIO
8096 * @adap: the adapter
8097 * @mbox: mailbox to use for the FW command
8098 * @phy_addr: the PHY address
8099 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
8100 * @reg: the register to read
8101 * @valp: where to store the value
8102 *
8103 * Issues a FW command through the given mailbox to read a PHY register.
8104 */
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int * valp)8105 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
8106 unsigned int mmd, unsigned int reg, unsigned int *valp)
8107 {
8108 int ret;
8109 u32 ldst_addrspace;
8110 struct fw_ldst_cmd c;
8111
8112 memset(&c, 0, sizeof(c));
8113 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
8114 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8115 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8116 ldst_addrspace);
8117 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
8118 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
8119 V_FW_LDST_CMD_MMD(mmd));
8120 c.u.mdio.raddr = cpu_to_be16(reg);
8121
8122 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8123 if (ret == 0)
8124 *valp = be16_to_cpu(c.u.mdio.rval);
8125 return ret;
8126 }
8127
8128 /**
8129 * t4_mdio_wr - write a PHY register through MDIO
8130 * @adap: the adapter
8131 * @mbox: mailbox to use for the FW command
8132 * @phy_addr: the PHY address
8133 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
8134 * @reg: the register to write
8135 * @valp: value to write
8136 *
8137 * Issues a FW command through the given mailbox to write a PHY register.
8138 */
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int val)8139 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
8140 unsigned int mmd, unsigned int reg, unsigned int val)
8141 {
8142 u32 ldst_addrspace;
8143 struct fw_ldst_cmd c;
8144
8145 memset(&c, 0, sizeof(c));
8146 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
8147 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8148 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8149 ldst_addrspace);
8150 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
8151 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
8152 V_FW_LDST_CMD_MMD(mmd));
8153 c.u.mdio.raddr = cpu_to_be16(reg);
8154 c.u.mdio.rval = cpu_to_be16(val);
8155
8156 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8157 }
8158
8159 /**
8160 *
8161 * t4_sge_decode_idma_state - decode the idma state
8162 * @adap: the adapter
8163 * @state: the state idma is stuck in
8164 */
t4_sge_decode_idma_state(struct adapter * adapter,int state)8165 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
8166 {
8167 static const char * const t4_decode[] = {
8168 "IDMA_IDLE",
8169 "IDMA_PUSH_MORE_CPL_FIFO",
8170 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
8171 "Not used",
8172 "IDMA_PHYSADDR_SEND_PCIEHDR",
8173 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
8174 "IDMA_PHYSADDR_SEND_PAYLOAD",
8175 "IDMA_SEND_FIFO_TO_IMSG",
8176 "IDMA_FL_REQ_DATA_FL_PREP",
8177 "IDMA_FL_REQ_DATA_FL",
8178 "IDMA_FL_DROP",
8179 "IDMA_FL_H_REQ_HEADER_FL",
8180 "IDMA_FL_H_SEND_PCIEHDR",
8181 "IDMA_FL_H_PUSH_CPL_FIFO",
8182 "IDMA_FL_H_SEND_CPL",
8183 "IDMA_FL_H_SEND_IP_HDR_FIRST",
8184 "IDMA_FL_H_SEND_IP_HDR",
8185 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
8186 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
8187 "IDMA_FL_H_SEND_IP_HDR_PADDING",
8188 "IDMA_FL_D_SEND_PCIEHDR",
8189 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
8190 "IDMA_FL_D_REQ_NEXT_DATA_FL",
8191 "IDMA_FL_SEND_PCIEHDR",
8192 "IDMA_FL_PUSH_CPL_FIFO",
8193 "IDMA_FL_SEND_CPL",
8194 "IDMA_FL_SEND_PAYLOAD_FIRST",
8195 "IDMA_FL_SEND_PAYLOAD",
8196 "IDMA_FL_REQ_NEXT_DATA_FL",
8197 "IDMA_FL_SEND_NEXT_PCIEHDR",
8198 "IDMA_FL_SEND_PADDING",
8199 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
8200 "IDMA_FL_SEND_FIFO_TO_IMSG",
8201 "IDMA_FL_REQ_DATAFL_DONE",
8202 "IDMA_FL_REQ_HEADERFL_DONE",
8203 };
8204 static const char * const t5_decode[] = {
8205 "IDMA_IDLE",
8206 "IDMA_ALMOST_IDLE",
8207 "IDMA_PUSH_MORE_CPL_FIFO",
8208 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
8209 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
8210 "IDMA_PHYSADDR_SEND_PCIEHDR",
8211 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
8212 "IDMA_PHYSADDR_SEND_PAYLOAD",
8213 "IDMA_SEND_FIFO_TO_IMSG",
8214 "IDMA_FL_REQ_DATA_FL",
8215 "IDMA_FL_DROP",
8216 "IDMA_FL_DROP_SEND_INC",
8217 "IDMA_FL_H_REQ_HEADER_FL",
8218 "IDMA_FL_H_SEND_PCIEHDR",
8219 "IDMA_FL_H_PUSH_CPL_FIFO",
8220 "IDMA_FL_H_SEND_CPL",
8221 "IDMA_FL_H_SEND_IP_HDR_FIRST",
8222 "IDMA_FL_H_SEND_IP_HDR",
8223 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
8224 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
8225 "IDMA_FL_H_SEND_IP_HDR_PADDING",
8226 "IDMA_FL_D_SEND_PCIEHDR",
8227 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
8228 "IDMA_FL_D_REQ_NEXT_DATA_FL",
8229 "IDMA_FL_SEND_PCIEHDR",
8230 "IDMA_FL_PUSH_CPL_FIFO",
8231 "IDMA_FL_SEND_CPL",
8232 "IDMA_FL_SEND_PAYLOAD_FIRST",
8233 "IDMA_FL_SEND_PAYLOAD",
8234 "IDMA_FL_REQ_NEXT_DATA_FL",
8235 "IDMA_FL_SEND_NEXT_PCIEHDR",
8236 "IDMA_FL_SEND_PADDING",
8237 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
8238 };
8239 static const char * const t6_decode[] = {
8240 "IDMA_IDLE",
8241 "IDMA_PUSH_MORE_CPL_FIFO",
8242 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
8243 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
8244 "IDMA_PHYSADDR_SEND_PCIEHDR",
8245 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
8246 "IDMA_PHYSADDR_SEND_PAYLOAD",
8247 "IDMA_FL_REQ_DATA_FL",
8248 "IDMA_FL_DROP",
8249 "IDMA_FL_DROP_SEND_INC",
8250 "IDMA_FL_H_REQ_HEADER_FL",
8251 "IDMA_FL_H_SEND_PCIEHDR",
8252 "IDMA_FL_H_PUSH_CPL_FIFO",
8253 "IDMA_FL_H_SEND_CPL",
8254 "IDMA_FL_H_SEND_IP_HDR_FIRST",
8255 "IDMA_FL_H_SEND_IP_HDR",
8256 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
8257 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
8258 "IDMA_FL_H_SEND_IP_HDR_PADDING",
8259 "IDMA_FL_D_SEND_PCIEHDR",
8260 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
8261 "IDMA_FL_D_REQ_NEXT_DATA_FL",
8262 "IDMA_FL_SEND_PCIEHDR",
8263 "IDMA_FL_PUSH_CPL_FIFO",
8264 "IDMA_FL_SEND_CPL",
8265 "IDMA_FL_SEND_PAYLOAD_FIRST",
8266 "IDMA_FL_SEND_PAYLOAD",
8267 "IDMA_FL_REQ_NEXT_DATA_FL",
8268 "IDMA_FL_SEND_NEXT_PCIEHDR",
8269 "IDMA_FL_SEND_PADDING",
8270 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
8271 };
8272 static const u32 sge_regs[] = {
8273 A_SGE_DEBUG_DATA_LOW_INDEX_2,
8274 A_SGE_DEBUG_DATA_LOW_INDEX_3,
8275 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
8276 };
8277 const char * const *sge_idma_decode;
8278 int sge_idma_decode_nstates;
8279 int i;
8280 unsigned int chip_version = chip_id(adapter);
8281
8282 /* Select the right set of decode strings to dump depending on the
8283 * adapter chip type.
8284 */
8285 switch (chip_version) {
8286 case CHELSIO_T4:
8287 sge_idma_decode = (const char * const *)t4_decode;
8288 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
8289 break;
8290
8291 case CHELSIO_T5:
8292 sge_idma_decode = (const char * const *)t5_decode;
8293 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
8294 break;
8295
8296 case CHELSIO_T6:
8297 case CHELSIO_T7:
8298 sge_idma_decode = (const char * const *)t6_decode;
8299 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
8300 break;
8301
8302 default:
8303 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
8304 return;
8305 }
8306
8307 if (state < sge_idma_decode_nstates)
8308 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
8309 else
8310 CH_WARN(adapter, "idma state %d unknown\n", state);
8311
8312 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
8313 CH_WARN(adapter, "SGE register %#x value %#x\n",
8314 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
8315 }
8316
8317 /**
8318 * t4_sge_ctxt_flush - flush the SGE context cache
8319 * @adap: the adapter
8320 * @mbox: mailbox to use for the FW command
8321 *
8322 * Issues a FW command through the given mailbox to flush the
8323 * SGE context cache.
8324 */
t4_sge_ctxt_flush(struct adapter * adap,unsigned int mbox,int ctxt_type)8325 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
8326 {
8327 int ret;
8328 u32 ldst_addrspace;
8329 struct fw_ldst_cmd c;
8330
8331 memset(&c, 0, sizeof(c));
8332 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
8333 FW_LDST_ADDRSPC_SGE_EGRC :
8334 FW_LDST_ADDRSPC_SGE_INGC);
8335 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
8336 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8337 ldst_addrspace);
8338 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
8339 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
8340
8341 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8342 return ret;
8343 }
8344
8345 /**
8346 * t4_fw_hello - establish communication with FW
8347 * @adap: the adapter
8348 * @mbox: mailbox to use for the FW command
8349 * @evt_mbox: mailbox to receive async FW events
8350 * @master: specifies the caller's willingness to be the device master
8351 * @state: returns the current device state (if non-NULL)
8352 *
8353 * Issues a command to establish communication with FW. Returns either
8354 * an error (negative integer) or the mailbox of the Master PF.
8355 */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)8356 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
8357 enum dev_master master, enum dev_state *state)
8358 {
8359 int ret;
8360 struct fw_hello_cmd c;
8361 u32 v;
8362 unsigned int master_mbox;
8363 int retries = FW_CMD_HELLO_RETRIES;
8364
8365 retry:
8366 memset(&c, 0, sizeof(c));
8367 INIT_CMD(c, HELLO, WRITE);
8368 c.err_to_clearinit = cpu_to_be32(
8369 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
8370 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
8371 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
8372 mbox : M_FW_HELLO_CMD_MBMASTER) |
8373 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
8374 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
8375 F_FW_HELLO_CMD_CLEARINIT);
8376
8377 /*
8378 * Issue the HELLO command to the firmware. If it's not successful
8379 * but indicates that we got a "busy" or "timeout" condition, retry
8380 * the HELLO until we exhaust our retry limit. If we do exceed our
8381 * retry limit, check to see if the firmware left us any error
8382 * information and report that if so ...
8383 */
8384 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8385 if (ret != FW_SUCCESS) {
8386 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
8387 goto retry;
8388 return ret;
8389 }
8390
8391 v = be32_to_cpu(c.err_to_clearinit);
8392 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
8393 if (state) {
8394 if (v & F_FW_HELLO_CMD_ERR)
8395 *state = DEV_STATE_ERR;
8396 else if (v & F_FW_HELLO_CMD_INIT)
8397 *state = DEV_STATE_INIT;
8398 else
8399 *state = DEV_STATE_UNINIT;
8400 }
8401
8402 /*
8403 * If we're not the Master PF then we need to wait around for the
8404 * Master PF Driver to finish setting up the adapter.
8405 *
8406 * Note that we also do this wait if we're a non-Master-capable PF and
8407 * there is no current Master PF; a Master PF may show up momentarily
8408 * and we wouldn't want to fail pointlessly. (This can happen when an
8409 * OS loads lots of different drivers rapidly at the same time). In
8410 * this case, the Master PF returned by the firmware will be
8411 * M_PCIE_FW_MASTER so the test below will work ...
8412 */
8413 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
8414 master_mbox != mbox) {
8415 int waiting = FW_CMD_HELLO_TIMEOUT;
8416
8417 /*
8418 * Wait for the firmware to either indicate an error or
8419 * initialized state. If we see either of these we bail out
8420 * and report the issue to the caller. If we exhaust the
8421 * "hello timeout" and we haven't exhausted our retries, try
8422 * again. Otherwise bail with a timeout error.
8423 */
8424 for (;;) {
8425 u32 pcie_fw;
8426
8427 msleep(50);
8428 waiting -= 50;
8429
8430 /*
8431 * If neither Error nor Initialialized are indicated
8432 * by the firmware keep waiting till we exhaust our
8433 * timeout ... and then retry if we haven't exhausted
8434 * our retries ...
8435 */
8436 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
8437 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
8438 if (waiting <= 0) {
8439 if (retries-- > 0)
8440 goto retry;
8441
8442 return -ETIMEDOUT;
8443 }
8444 continue;
8445 }
8446
8447 /*
8448 * We either have an Error or Initialized condition
8449 * report errors preferentially.
8450 */
8451 if (state) {
8452 if (pcie_fw & F_PCIE_FW_ERR)
8453 *state = DEV_STATE_ERR;
8454 else if (pcie_fw & F_PCIE_FW_INIT)
8455 *state = DEV_STATE_INIT;
8456 }
8457
8458 /*
8459 * If we arrived before a Master PF was selected and
8460 * there's not a valid Master PF, grab its identity
8461 * for our caller.
8462 */
8463 if (master_mbox == M_PCIE_FW_MASTER &&
8464 (pcie_fw & F_PCIE_FW_MASTER_VLD))
8465 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
8466 break;
8467 }
8468 }
8469
8470 return master_mbox;
8471 }
8472
8473 /**
8474 * t4_fw_bye - end communication with FW
8475 * @adap: the adapter
8476 * @mbox: mailbox to use for the FW command
8477 *
8478 * Issues a command to terminate communication with FW.
8479 */
t4_fw_bye(struct adapter * adap,unsigned int mbox)8480 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
8481 {
8482 struct fw_bye_cmd c;
8483
8484 memset(&c, 0, sizeof(c));
8485 INIT_CMD(c, BYE, WRITE);
8486 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8487 }
8488
8489 /**
8490 * t4_fw_reset - issue a reset to FW
8491 * @adap: the adapter
8492 * @mbox: mailbox to use for the FW command
8493 * @reset: specifies the type of reset to perform
8494 *
8495 * Issues a reset command of the specified type to FW.
8496 */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)8497 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
8498 {
8499 struct fw_reset_cmd c;
8500
8501 memset(&c, 0, sizeof(c));
8502 INIT_CMD(c, RESET, WRITE);
8503 c.val = cpu_to_be32(reset);
8504 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8505 }
8506
8507 /**
8508 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
8509 * @adap: the adapter
8510 * @mbox: mailbox to use for the FW RESET command (if desired)
8511 * @force: force uP into RESET even if FW RESET command fails
8512 *
8513 * Issues a RESET command to firmware (if desired) with a HALT indication
8514 * and then puts the microprocessor into RESET state. The RESET command
8515 * will only be issued if a legitimate mailbox is provided (mbox <=
8516 * M_PCIE_FW_MASTER).
8517 *
8518 * This is generally used in order for the host to safely manipulate the
8519 * adapter without fear of conflicting with whatever the firmware might
8520 * be doing. The only way out of this state is to RESTART the firmware
8521 * ...
8522 */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)8523 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
8524 {
8525 int ret = 0;
8526
8527 /*
8528 * If a legitimate mailbox is provided, issue a RESET command
8529 * with a HALT indication.
8530 */
8531 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
8532 struct fw_reset_cmd c;
8533
8534 memset(&c, 0, sizeof(c));
8535 INIT_CMD(c, RESET, WRITE);
8536 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
8537 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
8538 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8539 }
8540
8541 /*
8542 * Normally we won't complete the operation if the firmware RESET
8543 * command fails but if our caller insists we'll go ahead and put the
8544 * uP into RESET. This can be useful if the firmware is hung or even
8545 * missing ... We'll have to take the risk of putting the uP into
8546 * RESET without the cooperation of firmware in that case.
8547 *
8548 * We also force the firmware's HALT flag to be on in case we bypassed
8549 * the firmware RESET command above or we're dealing with old firmware
8550 * which doesn't have the HALT capability. This will serve as a flag
8551 * for the incoming firmware to know that it's coming out of a HALT
8552 * rather than a RESET ... if it's new enough to understand that ...
8553 */
8554 if (ret == 0 || force) {
8555 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
8556 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
8557 F_PCIE_FW_HALT);
8558 }
8559
8560 /*
8561 * And we always return the result of the firmware RESET command
8562 * even when we force the uP into RESET ...
8563 */
8564 return ret;
8565 }
8566
8567 /**
8568 * t4_fw_restart - restart the firmware by taking the uP out of RESET
8569 * @adap: the adapter
8570 *
8571 * Restart firmware previously halted by t4_fw_halt(). On successful
8572 * return the previous PF Master remains as the new PF Master and there
8573 * is no need to issue a new HELLO command, etc.
8574 */
t4_fw_restart(struct adapter * adap,unsigned int mbox)8575 int t4_fw_restart(struct adapter *adap, unsigned int mbox)
8576 {
8577 int ms;
8578
8579 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
8580 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
8581 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
8582 return FW_SUCCESS;
8583 msleep(100);
8584 ms += 100;
8585 }
8586
8587 return -ETIMEDOUT;
8588 }
8589
8590 /**
8591 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
8592 * @adap: the adapter
8593 * @mbox: mailbox to use for the FW RESET command (if desired)
8594 * @fw_data: the firmware image to write
8595 * @size: image size
8596 * @force: force upgrade even if firmware doesn't cooperate
8597 *
8598 * Perform all of the steps necessary for upgrading an adapter's
8599 * firmware image. Normally this requires the cooperation of the
8600 * existing firmware in order to halt all existing activities
8601 * but if an invalid mailbox token is passed in we skip that step
8602 * (though we'll still put the adapter microprocessor into RESET in
8603 * that case).
8604 *
8605 * On successful return the new firmware will have been loaded and
8606 * the adapter will have been fully RESET losing all previous setup
8607 * state. On unsuccessful return the adapter may be completely hosed ...
8608 * positive errno indicates that the adapter is ~probably~ intact, a
8609 * negative errno indicates that things are looking bad ...
8610 */
t4_fw_upgrade(struct adapter * adap,unsigned int mbox,const u8 * fw_data,unsigned int size,int force)8611 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
8612 const u8 *fw_data, unsigned int size, int force)
8613 {
8614 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
8615 unsigned int bootstrap =
8616 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
8617 int ret;
8618
8619 if (!t4_fw_matches_chip(adap, fw_hdr))
8620 return -EINVAL;
8621
8622 if (!bootstrap) {
8623 ret = t4_fw_halt(adap, mbox, force);
8624 if (ret < 0 && !force)
8625 return ret;
8626 }
8627
8628 ret = t4_load_fw(adap, fw_data, size);
8629 if (ret < 0 || bootstrap)
8630 return ret;
8631
8632 return t4_fw_restart(adap, mbox);
8633 }
8634
8635 /**
8636 * t4_fw_initialize - ask FW to initialize the device
8637 * @adap: the adapter
8638 * @mbox: mailbox to use for the FW command
8639 *
8640 * Issues a command to FW to partially initialize the device. This
8641 * performs initialization that generally doesn't depend on user input.
8642 */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)8643 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
8644 {
8645 struct fw_initialize_cmd c;
8646
8647 memset(&c, 0, sizeof(c));
8648 INIT_CMD(c, INITIALIZE, WRITE);
8649 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8650 }
8651
8652 /**
8653 * t4_query_params_rw - query FW or device parameters
8654 * @adap: the adapter
8655 * @mbox: mailbox to use for the FW command
8656 * @pf: the PF
8657 * @vf: the VF
8658 * @nparams: the number of parameters
8659 * @params: the parameter names
8660 * @val: the parameter values
8661 * @rw: Write and read flag
8662 *
8663 * Reads the value of FW or device parameters. Up to 7 parameters can be
8664 * queried at once.
8665 */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw)8666 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
8667 unsigned int vf, unsigned int nparams, const u32 *params,
8668 u32 *val, int rw)
8669 {
8670 int i, ret;
8671 struct fw_params_cmd c;
8672 __be32 *p = &c.param[0].mnem;
8673
8674 if (nparams > 7)
8675 return -EINVAL;
8676
8677 memset(&c, 0, sizeof(c));
8678 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
8679 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8680 V_FW_PARAMS_CMD_PFN(pf) |
8681 V_FW_PARAMS_CMD_VFN(vf));
8682 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8683
8684 for (i = 0; i < nparams; i++) {
8685 *p++ = cpu_to_be32(*params++);
8686 if (rw)
8687 *p = cpu_to_be32(*(val + i));
8688 p++;
8689 }
8690
8691 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8692
8693 /*
8694 * We always copy back the results, even if there's an error. We'll
8695 * get an error if any of the parameters was unknown to the Firmware,
8696 * but there will be results for the others ... (Older Firmware
8697 * stopped at the first unknown parameter; newer Firmware processes
8698 * them all and flags the unknown parameters with a return value of
8699 * ~0UL.)
8700 */
8701 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
8702 *val++ = be32_to_cpu(*p);
8703
8704 return ret;
8705 }
8706
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)8707 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
8708 unsigned int vf, unsigned int nparams, const u32 *params,
8709 u32 *val)
8710 {
8711 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
8712 }
8713
8714 /**
8715 * t4_set_params_timeout - sets FW or device parameters
8716 * @adap: the adapter
8717 * @mbox: mailbox to use for the FW command
8718 * @pf: the PF
8719 * @vf: the VF
8720 * @nparams: the number of parameters
8721 * @params: the parameter names
8722 * @val: the parameter values
8723 * @timeout: the timeout time
8724 *
8725 * Sets the value of FW or device parameters. Up to 7 parameters can be
8726 * specified at once.
8727 */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)8728 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
8729 unsigned int pf, unsigned int vf,
8730 unsigned int nparams, const u32 *params,
8731 const u32 *val, int timeout)
8732 {
8733 struct fw_params_cmd c;
8734 __be32 *p = &c.param[0].mnem;
8735
8736 if (nparams > 7)
8737 return -EINVAL;
8738
8739 memset(&c, 0, sizeof(c));
8740 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
8741 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8742 V_FW_PARAMS_CMD_PFN(pf) |
8743 V_FW_PARAMS_CMD_VFN(vf));
8744 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8745
8746 while (nparams--) {
8747 *p++ = cpu_to_be32(*params++);
8748 *p++ = cpu_to_be32(*val++);
8749 }
8750
8751 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
8752 }
8753
8754 /**
8755 * t4_set_params - sets FW or device parameters
8756 * @adap: the adapter
8757 * @mbox: mailbox to use for the FW command
8758 * @pf: the PF
8759 * @vf: the VF
8760 * @nparams: the number of parameters
8761 * @params: the parameter names
8762 * @val: the parameter values
8763 *
8764 * Sets the value of FW or device parameters. Up to 7 parameters can be
8765 * specified at once.
8766 */
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)8767 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
8768 unsigned int vf, unsigned int nparams, const u32 *params,
8769 const u32 *val)
8770 {
8771 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
8772 FW_CMD_MAX_TIMEOUT);
8773 }
8774
8775 /**
8776 * t4_cfg_pfvf - configure PF/VF resource limits
8777 * @adap: the adapter
8778 * @mbox: mailbox to use for the FW command
8779 * @pf: the PF being configured
8780 * @vf: the VF being configured
8781 * @txq: the max number of egress queues
8782 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
8783 * @rxqi: the max number of interrupt-capable ingress queues
8784 * @rxq: the max number of interruptless ingress queues
8785 * @tc: the PCI traffic class
8786 * @vi: the max number of virtual interfaces
8787 * @cmask: the channel access rights mask for the PF/VF
8788 * @pmask: the port access rights mask for the PF/VF
8789 * @nexact: the maximum number of exact MPS filters
8790 * @rcaps: read capabilities
8791 * @wxcaps: write/execute capabilities
8792 *
8793 * Configures resource limits and capabilities for a physical or virtual
8794 * function.
8795 */
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)8796 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
8797 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
8798 unsigned int rxqi, unsigned int rxq, unsigned int tc,
8799 unsigned int vi, unsigned int cmask, unsigned int pmask,
8800 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
8801 {
8802 struct fw_pfvf_cmd c;
8803
8804 memset(&c, 0, sizeof(c));
8805 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
8806 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
8807 V_FW_PFVF_CMD_VFN(vf));
8808 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8809 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
8810 V_FW_PFVF_CMD_NIQ(rxq));
8811 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
8812 V_FW_PFVF_CMD_PMASK(pmask) |
8813 V_FW_PFVF_CMD_NEQ(txq));
8814 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
8815 V_FW_PFVF_CMD_NVI(vi) |
8816 V_FW_PFVF_CMD_NEXACTF(nexact));
8817 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
8818 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
8819 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
8820 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8821 }
8822
8823 /**
8824 * t4_alloc_vi_func - allocate a virtual interface
8825 * @adap: the adapter
8826 * @mbox: mailbox to use for the FW command
8827 * @port: physical port associated with the VI
8828 * @pf: the PF owning the VI
8829 * @vf: the VF owning the VI
8830 * @nmac: number of MAC addresses needed (1 to 5)
8831 * @mac: the MAC addresses of the VI
8832 * @rss_size: size of RSS table slice associated with this VI
8833 * @portfunc: which Port Application Function MAC Address is desired
8834 * @idstype: Intrusion Detection Type
8835 *
8836 * Allocates a virtual interface for the given physical port. If @mac is
8837 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
8838 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
8839 * @mac should be large enough to hold @nmac Ethernet addresses, they are
8840 * stored consecutively so the space needed is @nmac * 6 bytes.
8841 * Returns a negative error number or the non-negative VI id.
8842 */
t4_alloc_vi_func(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,u16 * rss_size,uint8_t * vfvld,uint16_t * vin,unsigned int portfunc,unsigned int idstype)8843 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
8844 unsigned int port, unsigned int pf, unsigned int vf,
8845 unsigned int nmac, u8 *mac, u16 *rss_size,
8846 uint8_t *vfvld, uint16_t *vin,
8847 unsigned int portfunc, unsigned int idstype)
8848 {
8849 int ret;
8850 struct fw_vi_cmd c;
8851
8852 memset(&c, 0, sizeof(c));
8853 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
8854 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
8855 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
8856 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
8857 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
8858 V_FW_VI_CMD_FUNC(portfunc));
8859 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
8860 c.nmac = nmac - 1;
8861 if(!rss_size)
8862 c.norss_rsssize = F_FW_VI_CMD_NORSS;
8863
8864 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8865 if (ret)
8866 return ret;
8867 ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
8868
8869 if (mac) {
8870 memcpy(mac, c.mac, sizeof(c.mac));
8871 switch (nmac) {
8872 case 5:
8873 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
8874 case 4:
8875 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
8876 case 3:
8877 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
8878 case 2:
8879 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
8880 }
8881 }
8882 if (rss_size)
8883 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
8884 if (vfvld) {
8885 *vfvld = adap->params.viid_smt_extn_support ?
8886 G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) :
8887 G_FW_VIID_VIVLD(ret);
8888 }
8889 if (vin) {
8890 *vin = adap->params.viid_smt_extn_support ?
8891 G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) :
8892 G_FW_VIID_VIN(ret);
8893 }
8894
8895 return ret;
8896 }
8897
8898 /**
8899 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
8900 * @adap: the adapter
8901 * @mbox: mailbox to use for the FW command
8902 * @port: physical port associated with the VI
8903 * @pf: the PF owning the VI
8904 * @vf: the VF owning the VI
8905 * @nmac: number of MAC addresses needed (1 to 5)
8906 * @mac: the MAC addresses of the VI
8907 * @rss_size: size of RSS table slice associated with this VI
8908 *
8909 * backwards compatible and convieniance routine to allocate a Virtual
8910 * Interface with a Ethernet Port Application Function and Intrustion
8911 * Detection System disabled.
8912 */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,u16 * rss_size,uint8_t * vfvld,uint16_t * vin)8913 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
8914 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
8915 u16 *rss_size, uint8_t *vfvld, uint16_t *vin)
8916 {
8917 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
8918 vfvld, vin, FW_VI_FUNC_ETH, 0);
8919 }
8920
8921 /**
8922 * t4_free_vi - free a virtual interface
8923 * @adap: the adapter
8924 * @mbox: mailbox to use for the FW command
8925 * @pf: the PF owning the VI
8926 * @vf: the VF owning the VI
8927 * @viid: virtual interface identifiler
8928 *
8929 * Free a previously allocated virtual interface.
8930 */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)8931 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8932 unsigned int vf, unsigned int viid)
8933 {
8934 struct fw_vi_cmd c;
8935
8936 memset(&c, 0, sizeof(c));
8937 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8938 F_FW_CMD_REQUEST |
8939 F_FW_CMD_EXEC |
8940 V_FW_VI_CMD_PFN(pf) |
8941 V_FW_VI_CMD_VFN(vf));
8942 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8943 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8944
8945 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8946 }
8947
8948 /**
8949 * t4_set_rxmode - set Rx properties of a virtual interface
8950 * @adap: the adapter
8951 * @mbox: mailbox to use for the FW command
8952 * @viid: the VI id
8953 * @mtu: the new MTU or -1
8954 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8955 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8956 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8957 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8958 * @sleep_ok: if true we may sleep while awaiting command completion
8959 *
8960 * Sets Rx properties of a virtual interface.
8961 */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)8962 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8963 int mtu, int promisc, int all_multi, int bcast, int vlanex,
8964 bool sleep_ok)
8965 {
8966 struct fw_vi_rxmode_cmd c;
8967
8968 /* convert to FW values */
8969 if (mtu < 0)
8970 mtu = M_FW_VI_RXMODE_CMD_MTU;
8971 if (promisc < 0)
8972 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8973 if (all_multi < 0)
8974 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8975 if (bcast < 0)
8976 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8977 if (vlanex < 0)
8978 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8979
8980 memset(&c, 0, sizeof(c));
8981 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8982 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8983 V_FW_VI_RXMODE_CMD_VIID(viid));
8984 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8985 c.mtu_to_vlanexen =
8986 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8987 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8988 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8989 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8990 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8991 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8992 }
8993
8994 /**
8995 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
8996 * @adap: the adapter
8997 * @viid: the VI id
8998 * @mac: the MAC address
8999 * @mask: the mask
9000 * @vni: the VNI id for the tunnel protocol
9001 * @vni_mask: mask for the VNI id
9002 * @dip_hit: to enable DIP match for the MPS entry
9003 * @lookup_type: MAC address for inner (1) or outer (0) header
9004 * @sleep_ok: call is allowed to sleep
9005 *
9006 * Allocates an MPS entry with specified MAC address and VNI value.
9007 *
9008 * Returns a negative error number or the allocated index for this mac.
9009 */
t4_alloc_encap_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int vni,unsigned int vni_mask,u8 dip_hit,u8 lookup_type,bool sleep_ok)9010 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
9011 const u8 *addr, const u8 *mask, unsigned int vni,
9012 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
9013 bool sleep_ok)
9014 {
9015 struct fw_vi_mac_cmd c;
9016 struct fw_vi_mac_vni *p = c.u.exact_vni;
9017 int ret = 0;
9018 u32 val;
9019
9020 memset(&c, 0, sizeof(c));
9021 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9022 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9023 V_FW_VI_MAC_CMD_VIID(viid));
9024 val = V_FW_CMD_LEN16(1) |
9025 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
9026 c.freemacs_to_len16 = cpu_to_be32(val);
9027 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
9028 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
9029 memcpy(p->macaddr, addr, sizeof(p->macaddr));
9030 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
9031
9032 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
9033 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
9034 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
9035 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
9036
9037 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
9038 if (ret == 0)
9039 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
9040 return ret;
9041 }
9042
9043 /**
9044 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
9045 * @adap: the adapter
9046 * @viid: the VI id
9047 * @mac: the MAC address
9048 * @mask: the mask
9049 * @idx: index at which to add this entry
9050 * @port_id: the port index
9051 * @lookup_type: MAC address for inner (1) or outer (0) header
9052 * @sleep_ok: call is allowed to sleep
9053 *
9054 * Adds the mac entry at the specified index using raw mac interface.
9055 *
9056 * Returns a negative error number or the allocated index for this mac.
9057 */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)9058 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
9059 const u8 *addr, const u8 *mask, unsigned int idx,
9060 u8 lookup_type, u8 port_id, bool sleep_ok)
9061 {
9062 int ret = 0;
9063 struct fw_vi_mac_cmd c;
9064 struct fw_vi_mac_raw *p = &c.u.raw;
9065 u32 val;
9066
9067 memset(&c, 0, sizeof(c));
9068 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9069 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9070 V_FW_VI_MAC_CMD_VIID(viid));
9071 val = V_FW_CMD_LEN16(1) |
9072 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
9073 c.freemacs_to_len16 = cpu_to_be32(val);
9074
9075 /* Specify that this is an inner mac address */
9076 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
9077
9078 /* Lookup Type. Outer header: 0, Inner header: 1 */
9079 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
9080 V_DATAPORTNUM(port_id));
9081 /* Lookup mask and port mask */
9082 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
9083 V_DATAPORTNUM(M_DATAPORTNUM));
9084
9085 /* Copy the address and the mask */
9086 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
9087 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
9088
9089 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
9090 if (ret == 0) {
9091 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
9092 if (ret != idx)
9093 ret = -ENOMEM;
9094 }
9095
9096 return ret;
9097 }
9098
9099 /**
9100 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
9101 * @adap: the adapter
9102 * @mbox: mailbox to use for the FW command
9103 * @viid: the VI id
9104 * @free: if true any existing filters for this VI id are first removed
9105 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
9106 * @addr: the MAC address(es)
9107 * @idx: where to store the index of each allocated filter
9108 * @hash: pointer to hash address filter bitmap
9109 * @sleep_ok: call is allowed to sleep
9110 *
9111 * Allocates an exact-match filter for each of the supplied addresses and
9112 * sets it to the corresponding address. If @idx is not %NULL it should
9113 * have at least @naddr entries, each of which will be set to the index of
9114 * the filter allocated for the corresponding MAC address. If a filter
9115 * could not be allocated for an address its index is set to 0xffff.
9116 * If @hash is not %NULL addresses that fail to allocate an exact filter
9117 * are hashed and update the hash filter bitmap pointed at by @hash.
9118 *
9119 * Returns a negative error number or the number of filters allocated.
9120 */
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)9121 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
9122 unsigned int viid, bool free, unsigned int naddr,
9123 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
9124 {
9125 int offset, ret = 0;
9126 struct fw_vi_mac_cmd c;
9127 unsigned int nfilters = 0;
9128 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
9129 unsigned int rem = naddr;
9130
9131 if (naddr > max_naddr)
9132 return -EINVAL;
9133
9134 for (offset = 0; offset < naddr ; /**/) {
9135 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
9136 ? rem
9137 : ARRAY_SIZE(c.u.exact));
9138 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
9139 u.exact[fw_naddr]), 16);
9140 struct fw_vi_mac_exact *p;
9141 int i;
9142
9143 memset(&c, 0, sizeof(c));
9144 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9145 F_FW_CMD_REQUEST |
9146 F_FW_CMD_WRITE |
9147 V_FW_CMD_EXEC(free) |
9148 V_FW_VI_MAC_CMD_VIID(viid));
9149 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
9150 V_FW_CMD_LEN16(len16));
9151
9152 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
9153 p->valid_to_idx =
9154 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
9155 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
9156 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
9157 }
9158
9159 /*
9160 * It's okay if we run out of space in our MAC address arena.
9161 * Some of the addresses we submit may get stored so we need
9162 * to run through the reply to see what the results were ...
9163 */
9164 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
9165 if (ret && ret != -FW_ENOMEM)
9166 break;
9167
9168 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
9169 u16 index = G_FW_VI_MAC_CMD_IDX(
9170 be16_to_cpu(p->valid_to_idx));
9171
9172 if (idx)
9173 idx[offset+i] = (index >= max_naddr
9174 ? 0xffff
9175 : index);
9176 if (index < max_naddr)
9177 nfilters++;
9178 else if (hash)
9179 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
9180 }
9181
9182 free = false;
9183 offset += fw_naddr;
9184 rem -= fw_naddr;
9185 }
9186
9187 if (ret == 0 || ret == -FW_ENOMEM)
9188 ret = nfilters;
9189 return ret;
9190 }
9191
9192 /**
9193 * t4_free_encap_mac_filt - frees MPS entry at given index
9194 * @adap: the adapter
9195 * @viid: the VI id
9196 * @idx: index of MPS entry to be freed
9197 * @sleep_ok: call is allowed to sleep
9198 *
9199 * Frees the MPS entry at supplied index
9200 *
9201 * Returns a negative error number or zero on success
9202 */
t4_free_encap_mac_filt(struct adapter * adap,unsigned int viid,int idx,bool sleep_ok)9203 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
9204 int idx, bool sleep_ok)
9205 {
9206 struct fw_vi_mac_exact *p;
9207 struct fw_vi_mac_cmd c;
9208 u8 addr[] = {0,0,0,0,0,0};
9209 int ret = 0;
9210 u32 exact;
9211
9212 memset(&c, 0, sizeof(c));
9213 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9214 F_FW_CMD_REQUEST |
9215 F_FW_CMD_WRITE |
9216 V_FW_CMD_EXEC(0) |
9217 V_FW_VI_MAC_CMD_VIID(viid));
9218 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
9219 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
9220 exact |
9221 V_FW_CMD_LEN16(1));
9222 p = c.u.exact;
9223 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
9224 V_FW_VI_MAC_CMD_IDX(idx));
9225 memcpy(p->macaddr, addr, sizeof(p->macaddr));
9226
9227 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
9228 return ret;
9229 }
9230
9231 /**
9232 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
9233 * @adap: the adapter
9234 * @viid: the VI id
9235 * @addr: the MAC address
9236 * @mask: the mask
9237 * @idx: index of the entry in mps tcam
9238 * @lookup_type: MAC address for inner (1) or outer (0) header
9239 * @port_id: the port index
9240 * @sleep_ok: call is allowed to sleep
9241 *
9242 * Removes the mac entry at the specified index using raw mac interface.
9243 *
9244 * Returns a negative error number on failure.
9245 */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)9246 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
9247 const u8 *addr, const u8 *mask, unsigned int idx,
9248 u8 lookup_type, u8 port_id, bool sleep_ok)
9249 {
9250 struct fw_vi_mac_cmd c;
9251 struct fw_vi_mac_raw *p = &c.u.raw;
9252 u32 raw;
9253
9254 memset(&c, 0, sizeof(c));
9255 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9256 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9257 V_FW_CMD_EXEC(0) |
9258 V_FW_VI_MAC_CMD_VIID(viid));
9259 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
9260 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
9261 raw |
9262 V_FW_CMD_LEN16(1));
9263
9264 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
9265 FW_VI_MAC_ID_BASED_FREE);
9266
9267 /* Lookup Type. Outer header: 0, Inner header: 1 */
9268 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
9269 V_DATAPORTNUM(port_id));
9270 /* Lookup mask and port mask */
9271 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
9272 V_DATAPORTNUM(M_DATAPORTNUM));
9273
9274 /* Copy the address and the mask */
9275 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
9276 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
9277
9278 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
9279 }
9280
9281 /**
9282 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
9283 * @adap: the adapter
9284 * @mbox: mailbox to use for the FW command
9285 * @viid: the VI id
9286 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
9287 * @addr: the MAC address(es)
9288 * @sleep_ok: call is allowed to sleep
9289 *
9290 * Frees the exact-match filter for each of the supplied addresses
9291 *
9292 * Returns a negative error number or the number of filters freed.
9293 */
t4_free_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)9294 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
9295 unsigned int viid, unsigned int naddr,
9296 const u8 **addr, bool sleep_ok)
9297 {
9298 int offset, ret = 0;
9299 struct fw_vi_mac_cmd c;
9300 unsigned int nfilters = 0;
9301 unsigned int max_naddr = adap->chip_params->mps_tcam_size;
9302 unsigned int rem = naddr;
9303
9304 if (naddr > max_naddr)
9305 return -EINVAL;
9306
9307 for (offset = 0; offset < (int)naddr ; /**/) {
9308 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
9309 ? rem
9310 : ARRAY_SIZE(c.u.exact));
9311 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
9312 u.exact[fw_naddr]), 16);
9313 struct fw_vi_mac_exact *p;
9314 int i;
9315
9316 memset(&c, 0, sizeof(c));
9317 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9318 F_FW_CMD_REQUEST |
9319 F_FW_CMD_WRITE |
9320 V_FW_CMD_EXEC(0) |
9321 V_FW_VI_MAC_CMD_VIID(viid));
9322 c.freemacs_to_len16 =
9323 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
9324 V_FW_CMD_LEN16(len16));
9325
9326 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
9327 p->valid_to_idx = cpu_to_be16(
9328 F_FW_VI_MAC_CMD_VALID |
9329 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
9330 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
9331 }
9332
9333 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
9334 if (ret)
9335 break;
9336
9337 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
9338 u16 index = G_FW_VI_MAC_CMD_IDX(
9339 be16_to_cpu(p->valid_to_idx));
9340
9341 if (index < max_naddr)
9342 nfilters++;
9343 }
9344
9345 offset += fw_naddr;
9346 rem -= fw_naddr;
9347 }
9348
9349 if (ret == 0)
9350 ret = nfilters;
9351 return ret;
9352 }
9353
9354 /**
9355 * t4_change_mac - modifies the exact-match filter for a MAC address
9356 * @adap: the adapter
9357 * @mbox: mailbox to use for the FW command
9358 * @viid: the VI id
9359 * @idx: index of existing filter for old value of MAC address, or -1
9360 * @addr: the new MAC address value
9361 * @persist: whether a new MAC allocation should be persistent
9362 * @smt_idx: add MAC to SMT and return its index, or NULL
9363 *
9364 * Modifies an exact-match filter and sets it to the new MAC address if
9365 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
9366 * latter case the address is added persistently if @persist is %true.
9367 *
9368 * Note that in general it is not possible to modify the value of a given
9369 * filter so the generic way to modify an address filter is to free the one
9370 * being used by the old address value and allocate a new filter for the
9371 * new address value.
9372 *
9373 * Returns a negative error number or the index of the filter with the new
9374 * MAC value. Note that this index may differ from @idx.
9375 */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,uint16_t * smt_idx)9376 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
9377 int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
9378 {
9379 int ret, mode;
9380 struct fw_vi_mac_cmd c;
9381 struct fw_vi_mac_exact *p = c.u.exact;
9382 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
9383
9384 if (idx < 0) /* new allocation */
9385 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
9386 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
9387
9388 memset(&c, 0, sizeof(c));
9389 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9390 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9391 V_FW_VI_MAC_CMD_VIID(viid));
9392 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
9393 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
9394 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
9395 V_FW_VI_MAC_CMD_IDX(idx));
9396 memcpy(p->macaddr, addr, sizeof(p->macaddr));
9397
9398 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9399 if (ret == 0) {
9400 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
9401 if (ret >= max_mac_addr)
9402 ret = -ENOMEM;
9403 if (smt_idx) {
9404 if (adap->params.viid_smt_extn_support)
9405 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
9406 else {
9407 if (chip_id(adap) <= CHELSIO_T5)
9408 *smt_idx = (viid & M_FW_VIID_VIN) << 1;
9409 else
9410 *smt_idx = viid & M_FW_VIID_VIN;
9411 }
9412 }
9413 }
9414 return ret;
9415 }
9416
9417 /**
9418 * t4_set_addr_hash - program the MAC inexact-match hash filter
9419 * @adap: the adapter
9420 * @mbox: mailbox to use for the FW command
9421 * @viid: the VI id
9422 * @ucast: whether the hash filter should also match unicast addresses
9423 * @vec: the value to be written to the hash filter
9424 * @sleep_ok: call is allowed to sleep
9425 *
9426 * Sets the 64-bit inexact-match hash filter for a virtual interface.
9427 */
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)9428 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
9429 bool ucast, u64 vec, bool sleep_ok)
9430 {
9431 struct fw_vi_mac_cmd c;
9432 u32 val;
9433
9434 memset(&c, 0, sizeof(c));
9435 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
9436 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
9437 V_FW_VI_ENABLE_CMD_VIID(viid));
9438 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
9439 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
9440 c.freemacs_to_len16 = cpu_to_be32(val);
9441 c.u.hash.hashvec = cpu_to_be64(vec);
9442 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
9443 }
9444
9445 /**
9446 * t4_enable_vi_params - enable/disable a virtual interface
9447 * @adap: the adapter
9448 * @mbox: mailbox to use for the FW command
9449 * @viid: the VI id
9450 * @rx_en: 1=enable Rx, 0=disable Rx
9451 * @tx_en: 1=enable Tx, 0=disable Tx
9452 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
9453 *
9454 * Enables/disables a virtual interface. Note that setting DCB Enable
9455 * only makes sense when enabling a Virtual Interface ...
9456 */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)9457 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
9458 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
9459 {
9460 struct fw_vi_enable_cmd c;
9461
9462 memset(&c, 0, sizeof(c));
9463 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
9464 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9465 V_FW_VI_ENABLE_CMD_VIID(viid));
9466 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
9467 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
9468 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
9469 FW_LEN16(c));
9470 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
9471 }
9472
9473 /**
9474 * t4_enable_vi - enable/disable a virtual interface
9475 * @adap: the adapter
9476 * @mbox: mailbox to use for the FW command
9477 * @viid: the VI id
9478 * @rx_en: 1=enable Rx, 0=disable Rx
9479 * @tx_en: 1=enable Tx, 0=disable Tx
9480 *
9481 * Enables/disables a virtual interface. Note that setting DCB Enable
9482 * only makes sense when enabling a Virtual Interface ...
9483 */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)9484 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
9485 bool rx_en, bool tx_en)
9486 {
9487 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
9488 }
9489
9490 /**
9491 * t4_identify_port - identify a VI's port by blinking its LED
9492 * @adap: the adapter
9493 * @mbox: mailbox to use for the FW command
9494 * @viid: the VI id
9495 * @nblinks: how many times to blink LED at 2.5 Hz
9496 *
9497 * Identifies a VI's port by blinking its LED.
9498 */
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)9499 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
9500 unsigned int nblinks)
9501 {
9502 struct fw_vi_enable_cmd c;
9503
9504 memset(&c, 0, sizeof(c));
9505 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
9506 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9507 V_FW_VI_ENABLE_CMD_VIID(viid));
9508 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
9509 c.blinkdur = cpu_to_be16(nblinks);
9510 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9511 }
9512
9513 /**
9514 * t4_iq_stop - stop an ingress queue and its FLs
9515 * @adap: the adapter
9516 * @mbox: mailbox to use for the FW command
9517 * @pf: the PF owning the queues
9518 * @vf: the VF owning the queues
9519 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
9520 * @iqid: ingress queue id
9521 * @fl0id: FL0 queue id or 0xffff if no attached FL0
9522 * @fl1id: FL1 queue id or 0xffff if no attached FL1
9523 *
9524 * Stops an ingress queue and its associated FLs, if any. This causes
9525 * any current or future data/messages destined for these queues to be
9526 * tossed.
9527 */
t4_iq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)9528 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
9529 unsigned int vf, unsigned int iqtype, unsigned int iqid,
9530 unsigned int fl0id, unsigned int fl1id)
9531 {
9532 struct fw_iq_cmd c;
9533
9534 memset(&c, 0, sizeof(c));
9535 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
9536 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
9537 V_FW_IQ_CMD_VFN(vf));
9538 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
9539 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
9540 c.iqid = cpu_to_be16(iqid);
9541 c.fl0id = cpu_to_be16(fl0id);
9542 c.fl1id = cpu_to_be16(fl1id);
9543 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9544 }
9545
9546 /**
9547 * t4_iq_free - free an ingress queue and its FLs
9548 * @adap: the adapter
9549 * @mbox: mailbox to use for the FW command
9550 * @pf: the PF owning the queues
9551 * @vf: the VF owning the queues
9552 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
9553 * @iqid: ingress queue id
9554 * @fl0id: FL0 queue id or 0xffff if no attached FL0
9555 * @fl1id: FL1 queue id or 0xffff if no attached FL1
9556 *
9557 * Frees an ingress queue and its associated FLs, if any.
9558 */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)9559 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9560 unsigned int vf, unsigned int iqtype, unsigned int iqid,
9561 unsigned int fl0id, unsigned int fl1id)
9562 {
9563 struct fw_iq_cmd c;
9564
9565 memset(&c, 0, sizeof(c));
9566 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
9567 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
9568 V_FW_IQ_CMD_VFN(vf));
9569 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
9570 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
9571 c.iqid = cpu_to_be16(iqid);
9572 c.fl0id = cpu_to_be16(fl0id);
9573 c.fl1id = cpu_to_be16(fl1id);
9574 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9575 }
9576
9577 /**
9578 * t4_eth_eq_stop - stop an Ethernet egress queue
9579 * @adap: the adapter
9580 * @mbox: mailbox to use for the FW command
9581 * @pf: the PF owning the queues
9582 * @vf: the VF owning the queues
9583 * @eqid: egress queue id
9584 *
9585 * Stops an Ethernet egress queue. The queue can be reinitialized or
9586 * freed but is not otherwise functional after this call.
9587 */
t4_eth_eq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)9588 int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
9589 unsigned int vf, unsigned int eqid)
9590 {
9591 struct fw_eq_eth_cmd c;
9592
9593 memset(&c, 0, sizeof(c));
9594 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
9595 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9596 V_FW_EQ_ETH_CMD_PFN(pf) |
9597 V_FW_EQ_ETH_CMD_VFN(vf));
9598 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_EQSTOP | FW_LEN16(c));
9599 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
9600 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9601 }
9602
9603 /**
9604 * t4_eth_eq_free - free an Ethernet egress queue
9605 * @adap: the adapter
9606 * @mbox: mailbox to use for the FW command
9607 * @pf: the PF owning the queue
9608 * @vf: the VF owning the queue
9609 * @eqid: egress queue id
9610 *
9611 * Frees an Ethernet egress queue.
9612 */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)9613 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9614 unsigned int vf, unsigned int eqid)
9615 {
9616 struct fw_eq_eth_cmd c;
9617
9618 memset(&c, 0, sizeof(c));
9619 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
9620 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9621 V_FW_EQ_ETH_CMD_PFN(pf) |
9622 V_FW_EQ_ETH_CMD_VFN(vf));
9623 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
9624 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
9625 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9626 }
9627
9628 /**
9629 * t4_ctrl_eq_free - free a control egress queue
9630 * @adap: the adapter
9631 * @mbox: mailbox to use for the FW command
9632 * @pf: the PF owning the queue
9633 * @vf: the VF owning the queue
9634 * @eqid: egress queue id
9635 *
9636 * Frees a control egress queue.
9637 */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)9638 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9639 unsigned int vf, unsigned int eqid)
9640 {
9641 struct fw_eq_ctrl_cmd c;
9642
9643 memset(&c, 0, sizeof(c));
9644 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
9645 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9646 V_FW_EQ_CTRL_CMD_PFN(pf) |
9647 V_FW_EQ_CTRL_CMD_VFN(vf));
9648 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
9649 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
9650 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9651 }
9652
9653 /**
9654 * t4_ofld_eq_free - free an offload egress queue
9655 * @adap: the adapter
9656 * @mbox: mailbox to use for the FW command
9657 * @pf: the PF owning the queue
9658 * @vf: the VF owning the queue
9659 * @eqid: egress queue id
9660 *
9661 * Frees a control egress queue.
9662 */
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)9663 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9664 unsigned int vf, unsigned int eqid)
9665 {
9666 struct fw_eq_ofld_cmd c;
9667
9668 memset(&c, 0, sizeof(c));
9669 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
9670 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9671 V_FW_EQ_OFLD_CMD_PFN(pf) |
9672 V_FW_EQ_OFLD_CMD_VFN(vf));
9673 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
9674 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
9675 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9676 }
9677
9678 /**
9679 * t4_link_down_rc_str - return a string for a Link Down Reason Code
9680 * @link_down_rc: Link Down Reason Code
9681 *
9682 * Returns a string representation of the Link Down Reason Code.
9683 */
t4_link_down_rc_str(unsigned char link_down_rc)9684 const char *t4_link_down_rc_str(unsigned char link_down_rc)
9685 {
9686 static const char *reason[] = {
9687 "Link Down",
9688 "Remote Fault",
9689 "Auto-negotiation Failure",
9690 "Reserved3",
9691 "Insufficient Airflow",
9692 "Unable To Determine Reason",
9693 "No RX Signal Detected",
9694 "Reserved7",
9695 };
9696
9697 if (link_down_rc >= ARRAY_SIZE(reason))
9698 return "Bad Reason Code";
9699
9700 return reason[link_down_rc];
9701 }
9702
9703 /*
9704 * Return the highest speed set in the port capabilities, in Mb/s.
9705 */
fwcap_to_speed(uint32_t caps)9706 unsigned int fwcap_to_speed(uint32_t caps)
9707 {
9708 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
9709 do { \
9710 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
9711 return __speed; \
9712 } while (0)
9713
9714 TEST_SPEED_RETURN(400G, 400000);
9715 TEST_SPEED_RETURN(200G, 200000);
9716 TEST_SPEED_RETURN(100G, 100000);
9717 TEST_SPEED_RETURN(50G, 50000);
9718 TEST_SPEED_RETURN(40G, 40000);
9719 TEST_SPEED_RETURN(25G, 25000);
9720 TEST_SPEED_RETURN(10G, 10000);
9721 TEST_SPEED_RETURN(1G, 1000);
9722 TEST_SPEED_RETURN(100M, 100);
9723
9724 #undef TEST_SPEED_RETURN
9725
9726 return 0;
9727 }
9728
9729 /*
9730 * Return the port capabilities bit for the given speed, which is in Mb/s.
9731 */
speed_to_fwcap(unsigned int speed)9732 uint32_t speed_to_fwcap(unsigned int speed)
9733 {
9734 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
9735 do { \
9736 if (speed == __speed) \
9737 return FW_PORT_CAP32_SPEED_##__caps_speed; \
9738 } while (0)
9739
9740 TEST_SPEED_RETURN(400G, 400000);
9741 TEST_SPEED_RETURN(200G, 200000);
9742 TEST_SPEED_RETURN(100G, 100000);
9743 TEST_SPEED_RETURN(50G, 50000);
9744 TEST_SPEED_RETURN(40G, 40000);
9745 TEST_SPEED_RETURN(25G, 25000);
9746 TEST_SPEED_RETURN(10G, 10000);
9747 TEST_SPEED_RETURN(1G, 1000);
9748 TEST_SPEED_RETURN(100M, 100);
9749
9750 #undef TEST_SPEED_RETURN
9751
9752 return 0;
9753 }
9754
9755 /*
9756 * Return the port capabilities bit for the highest speed in the capabilities.
9757 */
fwcap_top_speed(uint32_t caps)9758 uint32_t fwcap_top_speed(uint32_t caps)
9759 {
9760 #define TEST_SPEED_RETURN(__caps_speed) \
9761 do { \
9762 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
9763 return FW_PORT_CAP32_SPEED_##__caps_speed; \
9764 } while (0)
9765
9766 TEST_SPEED_RETURN(400G);
9767 TEST_SPEED_RETURN(200G);
9768 TEST_SPEED_RETURN(100G);
9769 TEST_SPEED_RETURN(50G);
9770 TEST_SPEED_RETURN(40G);
9771 TEST_SPEED_RETURN(25G);
9772 TEST_SPEED_RETURN(10G);
9773 TEST_SPEED_RETURN(1G);
9774 TEST_SPEED_RETURN(100M);
9775
9776 #undef TEST_SPEED_RETURN
9777
9778 return 0;
9779 }
9780
9781 /**
9782 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
9783 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
9784 *
9785 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
9786 * 32-bit Port Capabilities value.
9787 */
lstatus_to_fwcap(u32 lstatus)9788 static uint32_t lstatus_to_fwcap(u32 lstatus)
9789 {
9790 uint32_t linkattr = 0;
9791
9792 /*
9793 * Unfortunately the format of the Link Status in the old
9794 * 16-bit Port Information message isn't the same as the
9795 * 16-bit Port Capabilities bitfield used everywhere else ...
9796 */
9797 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
9798 linkattr |= FW_PORT_CAP32_FC_RX;
9799 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
9800 linkattr |= FW_PORT_CAP32_FC_TX;
9801 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
9802 linkattr |= FW_PORT_CAP32_SPEED_100M;
9803 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
9804 linkattr |= FW_PORT_CAP32_SPEED_1G;
9805 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
9806 linkattr |= FW_PORT_CAP32_SPEED_10G;
9807 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
9808 linkattr |= FW_PORT_CAP32_SPEED_25G;
9809 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
9810 linkattr |= FW_PORT_CAP32_SPEED_40G;
9811 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
9812 linkattr |= FW_PORT_CAP32_SPEED_100G;
9813
9814 return linkattr;
9815 }
9816
9817 /*
9818 * Updates all fields owned by the common code in port_info and link_config
9819 * based on information provided by the firmware. Does not touch any
9820 * requested_* field.
9821 */
handle_port_info(struct port_info * pi,const struct fw_port_cmd * p,enum fw_port_action action,bool * mod_changed,bool * link_changed)9822 static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
9823 enum fw_port_action action, bool *mod_changed, bool *link_changed)
9824 {
9825 struct link_config old_lc, *lc = &pi->link_cfg;
9826 unsigned char fc;
9827 u32 stat, linkattr;
9828 int old_ptype, old_mtype;
9829
9830 old_ptype = pi->port_type;
9831 old_mtype = pi->mod_type;
9832 old_lc = *lc;
9833 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
9834 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
9835
9836 pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
9837 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
9838 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
9839 G_FW_PORT_CMD_MDIOADDR(stat) : -1;
9840
9841 lc->pcaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
9842 lc->acaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
9843 lc->lpacaps = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
9844 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
9845 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
9846
9847 linkattr = lstatus_to_fwcap(stat);
9848 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
9849 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
9850
9851 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
9852 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
9853 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
9854 G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
9855
9856 lc->pcaps = be32_to_cpu(p->u.info32.pcaps32);
9857 lc->acaps = be32_to_cpu(p->u.info32.acaps32);
9858 lc->lpacaps = be32_to_cpu(p->u.info32.lpacaps32);
9859 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
9860 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
9861
9862 linkattr = be32_to_cpu(p->u.info32.linkattr32);
9863 } else {
9864 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
9865 return;
9866 }
9867
9868 lc->speed = fwcap_to_speed(linkattr);
9869 lc->fec = fwcap_to_fec(linkattr, true);
9870
9871 fc = 0;
9872 if (linkattr & FW_PORT_CAP32_FC_RX)
9873 fc |= PAUSE_RX;
9874 if (linkattr & FW_PORT_CAP32_FC_TX)
9875 fc |= PAUSE_TX;
9876 lc->fc = fc;
9877
9878 if (mod_changed != NULL)
9879 *mod_changed = false;
9880 if (link_changed != NULL)
9881 *link_changed = false;
9882 if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
9883 old_lc.pcaps != lc->pcaps) {
9884 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE)
9885 lc->fec_hint = fwcap_to_fec(lc->acaps, true);
9886 if (mod_changed != NULL)
9887 *mod_changed = true;
9888 }
9889 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
9890 old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
9891 if (link_changed != NULL)
9892 *link_changed = true;
9893 }
9894 }
9895
9896 /**
9897 * t4_update_port_info - retrieve and update port information if changed
9898 * @pi: the port_info
9899 *
9900 * We issue a Get Port Information Command to the Firmware and, if
9901 * successful, we check to see if anything is different from what we
9902 * last recorded and update things accordingly.
9903 */
t4_update_port_info(struct port_info * pi)9904 int t4_update_port_info(struct port_info *pi)
9905 {
9906 struct adapter *sc = pi->adapter;
9907 struct fw_port_cmd cmd;
9908 enum fw_port_action action;
9909 int ret;
9910
9911 memset(&cmd, 0, sizeof(cmd));
9912 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9913 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9914 V_FW_PORT_CMD_PORTID(pi->hw_port));
9915 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
9916 FW_PORT_ACTION_GET_PORT_INFO;
9917 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
9918 FW_LEN16(cmd));
9919 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
9920 if (ret)
9921 return ret;
9922
9923 handle_port_info(pi, &cmd, action, NULL, NULL);
9924 return 0;
9925 }
9926
9927 /**
9928 * t4_handle_fw_rpl - process a FW reply message
9929 * @adap: the adapter
9930 * @rpl: start of the FW message
9931 *
9932 * Processes a FW message, such as link state change messages.
9933 */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)9934 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
9935 {
9936 u8 opcode = *(const u8 *)rpl;
9937 const struct fw_port_cmd *p = (const void *)rpl;
9938 enum fw_port_action action =
9939 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
9940 bool mod_changed, link_changed;
9941
9942 if (opcode == FW_PORT_CMD &&
9943 (action == FW_PORT_ACTION_GET_PORT_INFO ||
9944 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
9945 /* link/module state change message */
9946 int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
9947 int port_id = adap->port_map[hw_port];
9948 struct port_info *pi;
9949
9950 MPASS(port_id >= 0 && port_id < adap->params.nports);
9951 pi = adap->port[port_id];
9952 PORT_LOCK(pi);
9953 handle_port_info(pi, p, action, &mod_changed, &link_changed);
9954 PORT_UNLOCK(pi);
9955 if (mod_changed)
9956 t4_os_portmod_changed(pi);
9957 if (link_changed) {
9958 PORT_LOCK(pi);
9959 t4_os_link_changed(pi);
9960 PORT_UNLOCK(pi);
9961 }
9962 } else {
9963 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9964 return -EINVAL;
9965 }
9966 return 0;
9967 }
9968
9969 /**
9970 * get_pci_mode - determine a card's PCI mode
9971 * @adapter: the adapter
9972 * @p: where to store the PCI settings
9973 *
9974 * Determines a card's PCI mode and associated parameters, such as speed
9975 * and width.
9976 */
get_pci_mode(struct adapter * adapter,struct pci_params * p)9977 static void get_pci_mode(struct adapter *adapter,
9978 struct pci_params *p)
9979 {
9980 u16 val;
9981 u32 pcie_cap;
9982
9983 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9984 if (pcie_cap) {
9985 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
9986 p->speed = val & PCI_EXP_LNKSTA_CLS;
9987 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
9988 }
9989 }
9990
9991 struct flash_desc {
9992 u32 vendor_and_model_id;
9993 u32 size_mb;
9994 };
9995
t4_get_flash_params(struct adapter * adapter)9996 int t4_get_flash_params(struct adapter *adapter)
9997 {
9998 /*
9999 * Table for non-standard supported Flash parts. Note, all Flash
10000 * parts must have 64KB sectors.
10001 */
10002 static struct flash_desc supported_flash[] = {
10003 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
10004 };
10005
10006 int ret;
10007 u32 flashid = 0;
10008 unsigned int part, manufacturer;
10009 unsigned int density, size = 0;
10010
10011
10012 /*
10013 * Issue a Read ID Command to the Flash part. We decode supported
10014 * Flash parts and their sizes from this. There's a newer Query
10015 * Command which can retrieve detailed geometry information but many
10016 * Flash parts don't support it.
10017 */
10018 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
10019 if (!ret)
10020 ret = sf1_read(adapter, 3, 0, 1, &flashid);
10021 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
10022 if (ret < 0)
10023 return ret;
10024
10025 /*
10026 * Check to see if it's one of our non-standard supported Flash parts.
10027 */
10028 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
10029 if (supported_flash[part].vendor_and_model_id == flashid) {
10030 adapter->params.sf_size =
10031 supported_flash[part].size_mb;
10032 adapter->params.sf_nsec =
10033 adapter->params.sf_size / SF_SEC_SIZE;
10034 goto found;
10035 }
10036
10037 /*
10038 * Decode Flash part size. The code below looks repetative with
10039 * common encodings, but that's not guaranteed in the JEDEC
10040 * specification for the Read JADEC ID command. The only thing that
10041 * we're guaranteed by the JADEC specification is where the
10042 * Manufacturer ID is in the returned result. After that each
10043 * Manufacturer ~could~ encode things completely differently.
10044 * Note, all Flash parts must have 64KB sectors.
10045 */
10046 manufacturer = flashid & 0xff;
10047 switch (manufacturer) {
10048 case 0x20: /* Micron/Numonix */
10049 /*
10050 * This Density -> Size decoding table is taken from Micron
10051 * Data Sheets.
10052 */
10053 density = (flashid >> 16) & 0xff;
10054 switch (density) {
10055 case 0x14: size = 1 << 20; break; /* 1MB */
10056 case 0x15: size = 1 << 21; break; /* 2MB */
10057 case 0x16: size = 1 << 22; break; /* 4MB */
10058 case 0x17: size = 1 << 23; break; /* 8MB */
10059 case 0x18: size = 1 << 24; break; /* 16MB */
10060 case 0x19: size = 1 << 25; break; /* 32MB */
10061 case 0x20: size = 1 << 26; break; /* 64MB */
10062 case 0x21: size = 1 << 27; break; /* 128MB */
10063 case 0x22: size = 1 << 28; break; /* 256MB */
10064 }
10065 break;
10066
10067 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
10068 /*
10069 * This Density -> Size decoding table is taken from ISSI
10070 * Data Sheets.
10071 */
10072 density = (flashid >> 16) & 0xff;
10073 switch (density) {
10074 case 0x16: size = 1 << 25; break; /* 32MB */
10075 case 0x17: size = 1 << 26; break; /* 64MB */
10076 }
10077 break;
10078
10079 case 0xc2: /* Macronix */
10080 /*
10081 * This Density -> Size decoding table is taken from Macronix
10082 * Data Sheets.
10083 */
10084 density = (flashid >> 16) & 0xff;
10085 switch (density) {
10086 case 0x17: size = 1 << 23; break; /* 8MB */
10087 case 0x18: size = 1 << 24; break; /* 16MB */
10088 }
10089 break;
10090
10091 case 0xef: /* Winbond */
10092 /*
10093 * This Density -> Size decoding table is taken from Winbond
10094 * Data Sheets.
10095 */
10096 density = (flashid >> 16) & 0xff;
10097 switch (density) {
10098 case 0x17: size = 1 << 23; break; /* 8MB */
10099 case 0x18: size = 1 << 24; break; /* 16MB */
10100 }
10101 break;
10102 }
10103
10104 /* If we didn't recognize the FLASH part, that's no real issue: the
10105 * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a
10106 * FLASH part which has 64KB sectors and is at least 4MB or 16MB in
10107 * size, depending on the board.
10108 */
10109 if (size == 0) {
10110 size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4;
10111 CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n",
10112 flashid, size);
10113 size <<= 20;
10114 }
10115
10116 /*
10117 * Store decoded Flash size and fall through into vetting code.
10118 */
10119 adapter->params.sf_size = size;
10120 adapter->params.sf_nsec = size / SF_SEC_SIZE;
10121
10122 found:
10123 /*
10124 * We should ~probably~ reject adapters with FLASHes which are too
10125 * small but we have some legacy FPGAs with small FLASHes that we'd
10126 * still like to use. So instead we emit a scary message ...
10127 */
10128 if (adapter->params.sf_size < FLASH_MIN_SIZE)
10129 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
10130 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
10131
10132 return 0;
10133 }
10134
set_pcie_completion_timeout(struct adapter * adapter,u8 range)10135 static void set_pcie_completion_timeout(struct adapter *adapter,
10136 u8 range)
10137 {
10138 u16 val;
10139 u32 pcie_cap;
10140
10141 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
10142 if (pcie_cap) {
10143 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
10144 val &= 0xfff0;
10145 val |= range ;
10146 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
10147 }
10148 }
10149
t4_get_chip_params(int chipid)10150 const struct chip_params *t4_get_chip_params(int chipid)
10151 {
10152 static const struct chip_params chip_params[] = {
10153 {
10154 /* T4 */
10155 .nchan = NCHAN,
10156 .pm_stats_cnt = PM_NSTATS,
10157 .cng_ch_bits_log = 2,
10158 .nsched_cls = 15,
10159 .cim_num_ibq = CIM_NUM_IBQ,
10160 .cim_num_obq = CIM_NUM_OBQ,
10161 .filter_opt_len = FILTER_OPT_LEN,
10162 .filter_num_opt = S_FT_LAST + 1,
10163 .mps_rplc_size = 128,
10164 .vfcount = 128,
10165 .sge_fl_db = F_DBPRIO,
10166 .sge_ctxt_size = SGE_CTXT_SIZE,
10167 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
10168 .rss_nentries = RSS_NENTRIES,
10169 .cim_la_size = CIMLA_SIZE,
10170 },
10171 {
10172 /* T5 */
10173 .nchan = NCHAN,
10174 .pm_stats_cnt = PM_NSTATS,
10175 .cng_ch_bits_log = 2,
10176 .nsched_cls = 16,
10177 .cim_num_ibq = CIM_NUM_IBQ,
10178 .cim_num_obq = CIM_NUM_OBQ_T5,
10179 .filter_opt_len = T5_FILTER_OPT_LEN,
10180 .filter_num_opt = S_FT_LAST + 1,
10181 .mps_rplc_size = 128,
10182 .vfcount = 128,
10183 .sge_fl_db = F_DBPRIO | F_DBTYPE,
10184 .sge_ctxt_size = SGE_CTXT_SIZE,
10185 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
10186 .rss_nentries = RSS_NENTRIES,
10187 .cim_la_size = CIMLA_SIZE,
10188 },
10189 {
10190 /* T6 */
10191 .nchan = T6_NCHAN,
10192 .pm_stats_cnt = T6_PM_NSTATS,
10193 .cng_ch_bits_log = 3,
10194 .nsched_cls = 16,
10195 .cim_num_ibq = CIM_NUM_IBQ,
10196 .cim_num_obq = CIM_NUM_OBQ_T5,
10197 .filter_opt_len = T5_FILTER_OPT_LEN,
10198 .filter_num_opt = S_FT_LAST + 1,
10199 .mps_rplc_size = 256,
10200 .vfcount = 256,
10201 .sge_fl_db = 0,
10202 .sge_ctxt_size = SGE_CTXT_SIZE,
10203 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
10204 .rss_nentries = T6_RSS_NENTRIES,
10205 .cim_la_size = CIMLA_SIZE_T6,
10206 },
10207 {
10208 /* T7 */
10209 .nchan = NCHAN,
10210 .pm_stats_cnt = T6_PM_NSTATS,
10211 .cng_ch_bits_log = 2,
10212 .nsched_cls = 16,
10213 .cim_num_ibq = CIM_NUM_IBQ_T7,
10214 .cim_num_obq = CIM_NUM_OBQ_T7,
10215 .filter_opt_len = T7_FILTER_OPT_LEN,
10216 .filter_num_opt = S_T7_FT_LAST + 1,
10217 .mps_rplc_size = 256,
10218 .vfcount = 256,
10219 .sge_fl_db = 0,
10220 .sge_ctxt_size = SGE_CTXT_SIZE_T7,
10221 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES * 3,
10222 .rss_nentries = T7_RSS_NENTRIES,
10223 .cim_la_size = CIMLA_SIZE_T6,
10224 },
10225 };
10226
10227 chipid -= CHELSIO_T4;
10228 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
10229 return NULL;
10230
10231 return &chip_params[chipid];
10232 }
10233
10234 /**
10235 * t4_prep_adapter - prepare SW and HW for operation
10236 * @adapter: the adapter
10237 * @buf: temporary space of at least VPD_LEN size provided by the caller.
10238 *
10239 * Initialize adapter SW state for the various HW modules, set initial
10240 * values for some adapter tunables, take PHYs out of reset, and
10241 * initialize the MDIO interface.
10242 */
t4_prep_adapter(struct adapter * adapter,u32 * buf)10243 int t4_prep_adapter(struct adapter *adapter, u32 *buf)
10244 {
10245 int ret;
10246 uint16_t device_id;
10247 uint32_t pl_rev;
10248
10249 get_pci_mode(adapter, &adapter->params.pci);
10250
10251 pl_rev = t4_read_reg(adapter, A_PL_REV);
10252 adapter->params.chipid = G_CHIPID(pl_rev);
10253 adapter->params.rev = G_REV(pl_rev);
10254 if (adapter->params.chipid == 0) {
10255 /* T4 did not have chipid in PL_REV (T5 onwards do) */
10256 adapter->params.chipid = CHELSIO_T4;
10257
10258 /* T4A1 chip is not supported */
10259 if (adapter->params.rev == 1) {
10260 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
10261 return -EINVAL;
10262 }
10263 }
10264
10265 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
10266 if (adapter->chip_params == NULL)
10267 return -EINVAL;
10268
10269 adapter->params.pci.vpd_cap_addr =
10270 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
10271
10272 ret = t4_get_flash_params(adapter);
10273 if (ret < 0)
10274 return ret;
10275
10276 /* Cards with real ASICs have the chipid in the PCIe device id */
10277 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
10278 if (device_id >> 12 == chip_id(adapter))
10279 adapter->params.cim_la_size = adapter->chip_params->cim_la_size;
10280 else {
10281 /* FPGA */
10282 adapter->params.fpga = 1;
10283 adapter->params.cim_la_size = 2 * adapter->chip_params->cim_la_size;
10284 }
10285
10286 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
10287 if (ret < 0)
10288 return ret;
10289
10290 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
10291
10292 /*
10293 * Default port and clock for debugging in case we can't reach FW.
10294 */
10295 adapter->params.nports = 1;
10296 adapter->params.portvec = 1;
10297 adapter->params.vpd.cclk = 50000;
10298
10299 /* Set pci completion timeout value to 4 seconds. */
10300 set_pcie_completion_timeout(adapter, 0xd);
10301 return 0;
10302 }
10303
10304 /**
10305 * t4_shutdown_adapter - shut down adapter, host & wire
10306 * @adapter: the adapter
10307 *
10308 * Perform an emergency shutdown of the adapter and stop it from
10309 * continuing any further communication on the ports or DMA to the
10310 * host. This is typically used when the adapter and/or firmware
10311 * have crashed and we want to prevent any further accidental
10312 * communication with the rest of the world. This will also force
10313 * the port Link Status to go down -- if register writes work --
10314 * which should help our peers figure out that we're down.
10315 */
t4_shutdown_adapter(struct adapter * adapter)10316 int t4_shutdown_adapter(struct adapter *adapter)
10317 {
10318 int port;
10319 const bool bt = adapter->bt_map != 0;
10320
10321 t4_intr_disable(adapter);
10322 if (bt)
10323 t4_write_reg(adapter, A_DBG_GPIO_EN, 0xffff0000);
10324 for_each_port(adapter, port) {
10325 u32 a_port_cfg = is_t4(adapter) ?
10326 t4_port_reg(adapter, port, A_XGMAC_PORT_CFG) :
10327 t4_port_reg(adapter, port, A_MAC_PORT_CFG);
10328
10329 t4_write_reg(adapter, a_port_cfg,
10330 t4_read_reg(adapter, a_port_cfg)
10331 & ~V_SIGNAL_DET(1));
10332 if (!bt) {
10333 u32 hss_cfg0 = is_t4(adapter) ?
10334 t4_port_reg(adapter, port, A_XGMAC_PORT_HSS_CFG0) :
10335 t4_port_reg(adapter, port, A_MAC_PORT_HSS_CFG0);
10336 t4_set_reg_field(adapter, hss_cfg0, F_HSSPDWNPLLB |
10337 F_HSSPDWNPLLA | F_HSSPLLBYPB | F_HSSPLLBYPA,
10338 F_HSSPDWNPLLB | F_HSSPDWNPLLA | F_HSSPLLBYPB |
10339 F_HSSPLLBYPA);
10340 }
10341 }
10342 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
10343
10344 return 0;
10345 }
10346
10347 /**
10348 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
10349 * @adapter: the adapter
10350 * @qid: the Queue ID
10351 * @qtype: the Ingress or Egress type for @qid
10352 * @user: true if this request is for a user mode queue
10353 * @pbar2_qoffset: BAR2 Queue Offset
10354 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
10355 *
10356 * Returns the BAR2 SGE Queue Registers information associated with the
10357 * indicated Absolute Queue ID. These are passed back in return value
10358 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
10359 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
10360 *
10361 * This may return an error which indicates that BAR2 SGE Queue
10362 * registers aren't available. If an error is not returned, then the
10363 * following values are returned:
10364 *
10365 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
10366 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
10367 *
10368 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
10369 * require the "Inferred Queue ID" ability may be used. E.g. the
10370 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
10371 * then these "Inferred Queue ID" register may not be used.
10372 */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,int user,u64 * pbar2_qoffset,unsigned int * pbar2_qid)10373 int t4_bar2_sge_qregs(struct adapter *adapter,
10374 unsigned int qid,
10375 enum t4_bar2_qtype qtype,
10376 int user,
10377 u64 *pbar2_qoffset,
10378 unsigned int *pbar2_qid)
10379 {
10380 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
10381 u64 bar2_page_offset, bar2_qoffset;
10382 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
10383
10384 /* T4 doesn't support BAR2 SGE Queue registers for kernel
10385 * mode queues.
10386 */
10387 if (!user && is_t4(adapter))
10388 return -EINVAL;
10389
10390 /* Get our SGE Page Size parameters.
10391 */
10392 page_shift = adapter->params.sge.page_shift;
10393 page_size = 1 << page_shift;
10394
10395 /* Get the right Queues per Page parameters for our Queue.
10396 */
10397 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
10398 ? adapter->params.sge.eq_s_qpp
10399 : adapter->params.sge.iq_s_qpp);
10400 qpp_mask = (1 << qpp_shift) - 1;
10401
10402 /* Calculate the basics of the BAR2 SGE Queue register area:
10403 * o The BAR2 page the Queue registers will be in.
10404 * o The BAR2 Queue ID.
10405 * o The BAR2 Queue ID Offset into the BAR2 page.
10406 */
10407 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
10408 bar2_qid = qid & qpp_mask;
10409 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
10410
10411 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
10412 * hardware will infer the Absolute Queue ID simply from the writes to
10413 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
10414 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
10415 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
10416 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
10417 * from the BAR2 Page and BAR2 Queue ID.
10418 *
10419 * One important censequence of this is that some BAR2 SGE registers
10420 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
10421 * there. But other registers synthesize the SGE Queue ID purely
10422 * from the writes to the registers -- the Write Combined Doorbell
10423 * Buffer is a good example. These BAR2 SGE Registers are only
10424 * available for those BAR2 SGE Register areas where the SGE Absolute
10425 * Queue ID can be inferred from simple writes.
10426 */
10427 bar2_qoffset = bar2_page_offset;
10428 bar2_qinferred = (bar2_qid_offset < page_size);
10429 if (bar2_qinferred) {
10430 bar2_qoffset += bar2_qid_offset;
10431 bar2_qid = 0;
10432 }
10433
10434 *pbar2_qoffset = bar2_qoffset;
10435 *pbar2_qid = bar2_qid;
10436 return 0;
10437 }
10438
10439 /**
10440 * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores
10441 * @adap: the adapter
10442 * @fw_attach: whether we can talk to the firmware
10443 */
t4_init_devlog_ncores_params(struct adapter * adap,int fw_attach)10444 int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach)
10445 {
10446 struct devlog_params *dparams = &adap->params.devlog;
10447 u32 pf_dparams;
10448 unsigned int devlog_meminfo;
10449 struct fw_devlog_cmd devlog_cmd;
10450 int ret;
10451
10452 /* If we're dealing with newer firmware, the Device Log Paramerters
10453 * are stored in a designated register which allows us to access the
10454 * Device Log even if we can't talk to the firmware.
10455 */
10456 pf_dparams =
10457 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
10458 if (pf_dparams && pf_dparams != UINT32_MAX) {
10459 unsigned int nentries, nentries128, ncore_shift;
10460
10461 ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) |
10462 G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams);
10463 adap->params.ncores = 1 << ncore_shift;
10464
10465 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
10466 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
10467 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
10468 nentries = (nentries128 + 1) * 128;
10469 dparams->size = nentries * sizeof(struct fw_devlog_e);
10470
10471 return 0;
10472 }
10473
10474 /*
10475 * For any failing returns ...
10476 */
10477 adap->params.ncores = 1;
10478 memset(dparams, 0, sizeof *dparams);
10479
10480 /*
10481 * If we can't talk to the firmware, there's really nothing we can do
10482 * at this point.
10483 */
10484 if (!fw_attach)
10485 return -ENXIO;
10486
10487 /* Otherwise, ask the firmware for it's Device Log Parameters.
10488 */
10489 memset(&devlog_cmd, 0, sizeof devlog_cmd);
10490 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10491 F_FW_CMD_REQUEST | F_FW_CMD_READ);
10492 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10493 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
10494 &devlog_cmd);
10495 if (ret)
10496 return ret;
10497
10498 devlog_meminfo =
10499 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
10500 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
10501 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
10502 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
10503
10504 return 0;
10505 }
10506
10507 /**
10508 * t4_init_sge_params - initialize adap->params.sge
10509 * @adapter: the adapter
10510 *
10511 * Initialize various fields of the adapter's SGE Parameters structure.
10512 */
t4_init_sge_params(struct adapter * adapter)10513 int t4_init_sge_params(struct adapter *adapter)
10514 {
10515 u32 r;
10516 struct sge_params *sp = &adapter->params.sge;
10517 unsigned i, tscale = 1;
10518
10519 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
10520 sp->counter_val[0] = G_THRESHOLD_0(r);
10521 sp->counter_val[1] = G_THRESHOLD_1(r);
10522 sp->counter_val[2] = G_THRESHOLD_2(r);
10523 sp->counter_val[3] = G_THRESHOLD_3(r);
10524
10525 if (chip_id(adapter) >= CHELSIO_T6) {
10526 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
10527 tscale = G_TSCALE(r);
10528 if (tscale == 0)
10529 tscale = 1;
10530 else
10531 tscale += 2;
10532 }
10533
10534 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
10535 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
10536 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
10537 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
10538 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
10539 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
10540 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
10541 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
10542 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
10543
10544 r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
10545 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
10546 if (is_t4(adapter))
10547 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
10548 else if (is_t5(adapter))
10549 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
10550 else
10551 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
10552
10553 /* egress queues: log2 of # of doorbells per BAR2 page */
10554 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
10555 r >>= S_QUEUESPERPAGEPF0 +
10556 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
10557 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
10558
10559 /* ingress queues: log2 of # of doorbells per BAR2 page */
10560 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
10561 r >>= S_QUEUESPERPAGEPF0 +
10562 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
10563 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
10564
10565 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
10566 r >>= S_HOSTPAGESIZEPF0 +
10567 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
10568 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
10569
10570 r = t4_read_reg(adapter, A_SGE_CONTROL);
10571 sp->sge_control = r;
10572 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
10573 sp->fl_pktshift = G_PKTSHIFT(r);
10574 if (chip_id(adapter) <= CHELSIO_T5) {
10575 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
10576 X_INGPADBOUNDARY_SHIFT);
10577 } else {
10578 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
10579 X_T6_INGPADBOUNDARY_SHIFT);
10580 }
10581 if (is_t4(adapter))
10582 sp->pack_boundary = sp->pad_boundary;
10583 else {
10584 r = t4_read_reg(adapter, A_SGE_CONTROL2);
10585 if (G_INGPACKBOUNDARY(r) == 0)
10586 sp->pack_boundary = 16;
10587 else
10588 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
10589 }
10590 for (i = 0; i < SGE_FLBUF_SIZES; i++)
10591 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
10592 A_SGE_FL_BUFFER_SIZE0 + (4 * i));
10593
10594 return 0;
10595 }
10596
10597 /* Convert the LE's hardware hash mask to a shorter filter mask. */
10598 static inline uint16_t
hashmask_to_filtermask(struct adapter * adap,uint64_t hashmask,uint16_t filter_mode)10599 hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode)
10600 {
10601 int first, last, i;
10602 uint16_t filter_mask;
10603 uint64_t mask; /* field mask */
10604
10605
10606 if (chip_id(adap) >= CHELSIO_T7) {
10607 first = S_T7_FT_FIRST;
10608 last = S_T7_FT_LAST;
10609 } else {
10610 first = S_FT_FIRST;
10611 last = S_FT_LAST;
10612 }
10613
10614 for (filter_mask = 0, i = first; i <= last; i++) {
10615 if ((filter_mode & (1 << i)) == 0)
10616 continue;
10617 mask = (1 << t4_filter_field_width(adap, i)) - 1;
10618 if ((hashmask & mask) == mask)
10619 filter_mask |= 1 << i;
10620 hashmask >>= t4_filter_field_width(adap, i);
10621 }
10622
10623 return (filter_mask);
10624 }
10625
10626 /*
10627 * Read and cache the adapter's compressed filter mode and ingress config.
10628 */
10629 static void
read_filter_mode_and_ingress_config(struct adapter * adap)10630 read_filter_mode_and_ingress_config(struct adapter *adap)
10631 {
10632 int rc;
10633 uint32_t v, param[2], val[2];
10634 struct tp_params *tpp = &adap->params.tp;
10635 uint64_t hash_mask;
10636
10637 param[0] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10638 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10639 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
10640 param[1] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10641 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10642 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
10643 rc = -t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val);
10644 if (rc == 0) {
10645 tpp->filter_mode = G_FW_PARAMS_PARAM_FILTER_MODE(val[0]);
10646 tpp->filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val[0]);
10647 tpp->vnic_mode = val[1];
10648 } else {
10649 /*
10650 * Old firmware. Read filter mode/mask and ingress config
10651 * straight from the hardware.
10652 */
10653 t4_tp_pio_read(adap, &v, 1, A_TP_VLAN_PRI_MAP, true);
10654 tpp->filter_mode = v & 0xffff;
10655
10656 hash_mask = 0;
10657 if (chip_id(adap) > CHELSIO_T4) {
10658 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
10659 hash_mask = v;
10660 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
10661 hash_mask |= (u64)v << 32;
10662 }
10663 if (chip_id(adap) >= CHELSIO_T7) {
10664 /*
10665 * This param came before T7 so T7+ firmwares should
10666 * always support this query.
10667 */
10668 CH_WARN(adap, "query for filter mode/mask failed: %d\n",
10669 rc);
10670 }
10671 tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask,
10672 tpp->filter_mode);
10673
10674 t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
10675 if (v & F_VNIC)
10676 tpp->vnic_mode = FW_VNIC_MODE_PF_VF;
10677 else
10678 tpp->vnic_mode = FW_VNIC_MODE_OUTER_VLAN;
10679 }
10680
10681 /*
10682 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
10683 * shift positions of several elements of the Compressed Filter Tuple
10684 * for this adapter which we need frequently ...
10685 */
10686 if (chip_id(adap) >= CHELSIO_T7) {
10687 tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX);
10688 tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE);
10689 tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT);
10690 tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID);
10691 tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN);
10692 tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS);
10693 tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL);
10694 tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE);
10695 tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH);
10696 tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE);
10697 tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION);
10698 tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE);
10699 tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY);
10700 tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS);
10701 } else {
10702 tpp->ipsecidx_shift = -1;
10703 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
10704 tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
10705 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
10706 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
10707 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
10708 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
10709 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
10710 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
10711 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
10712 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
10713 tpp->roce_shift = -1;
10714 tpp->synonly_shift = -1;
10715 tpp->tcpflags_shift = -1;
10716 }
10717 }
10718
10719 /**
10720 * t4_init_tp_params - initialize adap->params.tp
10721 * @adap: the adapter
10722 *
10723 * Initialize various fields of the adapter's TP Parameters structure.
10724 */
t4_init_tp_params(struct adapter * adap)10725 int t4_init_tp_params(struct adapter *adap)
10726 {
10727 u32 tx_len, rx_len, r, v;
10728 struct tp_params *tpp = &adap->params.tp;
10729
10730 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
10731 tpp->tre = G_TIMERRESOLUTION(v);
10732 tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
10733
10734 read_filter_mode_and_ingress_config(adap);
10735
10736 tpp->rx_pkt_encap = false;
10737 tpp->lb_mode = 0;
10738 tpp->lb_nchan = 1;
10739 if (chip_id(adap) > CHELSIO_T5) {
10740 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
10741 tpp->rx_pkt_encap = v & F_CRXPKTENC;
10742 if (chip_id(adap) >= CHELSIO_T7) {
10743 t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true);
10744 tpp->lb_mode = G_T7_LB_MODE(v);
10745 if (tpp->lb_mode == 1)
10746 tpp->lb_nchan = 4;
10747 else if (tpp->lb_mode == 2)
10748 tpp->lb_nchan = 2;
10749 }
10750 }
10751
10752 rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
10753 tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
10754
10755 r = t4_read_reg(adap, A_TP_PARA_REG2);
10756 rx_len = min(rx_len, G_MAXRXDATA(r));
10757 tx_len = min(tx_len, G_MAXRXDATA(r));
10758
10759 r = t4_read_reg(adap, A_TP_PARA_REG7);
10760 v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
10761 rx_len = min(rx_len, v);
10762 tx_len = min(tx_len, v);
10763
10764 tpp->max_tx_pdu = tx_len;
10765 tpp->max_rx_pdu = rx_len;
10766
10767 return 0;
10768 }
10769
10770 /**
10771 * t4_filter_field_width - returns the width of a filter field
10772 * @adap: the adapter
10773 * @filter_field: the filter field whose width is being requested
10774 *
10775 * Return the shift position of a filter field within the Compressed
10776 * Filter Tuple. The filter field is specified via its selection bit
10777 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
10778 */
t4_filter_field_width(const struct adapter * adap,int filter_field)10779 int t4_filter_field_width(const struct adapter *adap, int filter_field)
10780 {
10781 const int nopt = adap->chip_params->filter_num_opt;
10782 static const uint8_t width_t7[] = {
10783 W_FT_IPSECIDX,
10784 W_FT_FCOE,
10785 W_FT_PORT,
10786 W_FT_VNIC_ID,
10787 W_FT_VLAN,
10788 W_FT_TOS,
10789 W_FT_PROTOCOL,
10790 W_FT_ETHERTYPE,
10791 W_FT_MACMATCH,
10792 W_FT_MPSHITTYPE,
10793 W_FT_FRAGMENTATION,
10794 W_FT_ROCE,
10795 W_FT_SYNONLY,
10796 W_FT_TCPFLAGS
10797 };
10798 static const uint8_t width_t4[] = {
10799 W_FT_FCOE,
10800 W_FT_PORT,
10801 W_FT_VNIC_ID,
10802 W_FT_VLAN,
10803 W_FT_TOS,
10804 W_FT_PROTOCOL,
10805 W_FT_ETHERTYPE,
10806 W_FT_MACMATCH,
10807 W_FT_MPSHITTYPE,
10808 W_FT_FRAGMENTATION
10809 };
10810 const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4;
10811
10812 if (filter_field < 0 || filter_field >= nopt)
10813 return (0);
10814 return (width[filter_field]);
10815 }
10816
10817 /**
10818 * t4_filter_field_shift - calculate filter field shift
10819 * @adap: the adapter
10820 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
10821 *
10822 * Return the shift position of a filter field within the Compressed
10823 * Filter Tuple. The filter field is specified via its selection bit
10824 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
10825 */
t4_filter_field_shift(const struct adapter * adap,int filter_sel)10826 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
10827 {
10828 const unsigned int filter_mode = adap->params.tp.filter_mode;
10829 unsigned int sel;
10830 int field_shift;
10831
10832 if ((filter_mode & filter_sel) == 0)
10833 return -1;
10834
10835 if (chip_id(adap) >= CHELSIO_T7) {
10836 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
10837 switch (filter_mode & sel) {
10838 case F_IPSECIDX:
10839 field_shift += W_FT_IPSECIDX;
10840 break;
10841 case F_T7_FCOE:
10842 field_shift += W_FT_FCOE;
10843 break;
10844 case F_T7_PORT:
10845 field_shift += W_FT_PORT;
10846 break;
10847 case F_T7_VNIC_ID:
10848 field_shift += W_FT_VNIC_ID;
10849 break;
10850 case F_T7_VLAN:
10851 field_shift += W_FT_VLAN;
10852 break;
10853 case F_T7_TOS:
10854 field_shift += W_FT_TOS;
10855 break;
10856 case F_T7_PROTOCOL:
10857 field_shift += W_FT_PROTOCOL;
10858 break;
10859 case F_T7_ETHERTYPE:
10860 field_shift += W_FT_ETHERTYPE;
10861 break;
10862 case F_T7_MACMATCH:
10863 field_shift += W_FT_MACMATCH;
10864 break;
10865 case F_T7_MPSHITTYPE:
10866 field_shift += W_FT_MPSHITTYPE;
10867 break;
10868 case F_T7_FRAGMENTATION:
10869 field_shift += W_FT_FRAGMENTATION;
10870 break;
10871 case F_ROCE:
10872 field_shift += W_FT_ROCE;
10873 break;
10874 case F_SYNONLY:
10875 field_shift += W_FT_SYNONLY;
10876 break;
10877 case F_TCPFLAGS:
10878 field_shift += W_FT_TCPFLAGS;
10879 break;
10880 }
10881 }
10882 return field_shift;
10883 }
10884
10885 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
10886 switch (filter_mode & sel) {
10887 case F_FCOE:
10888 field_shift += W_FT_FCOE;
10889 break;
10890 case F_PORT:
10891 field_shift += W_FT_PORT;
10892 break;
10893 case F_VNIC_ID:
10894 field_shift += W_FT_VNIC_ID;
10895 break;
10896 case F_VLAN:
10897 field_shift += W_FT_VLAN;
10898 break;
10899 case F_TOS:
10900 field_shift += W_FT_TOS;
10901 break;
10902 case F_PROTOCOL:
10903 field_shift += W_FT_PROTOCOL;
10904 break;
10905 case F_ETHERTYPE:
10906 field_shift += W_FT_ETHERTYPE;
10907 break;
10908 case F_MACMATCH:
10909 field_shift += W_FT_MACMATCH;
10910 break;
10911 case F_MPSHITTYPE:
10912 field_shift += W_FT_MPSHITTYPE;
10913 break;
10914 case F_FRAGMENTATION:
10915 field_shift += W_FT_FRAGMENTATION;
10916 break;
10917 }
10918 }
10919 return field_shift;
10920 }
10921
t4_port_init(struct adapter * adap,int mbox,int pf,int vf,int port_id)10922 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
10923 {
10924 u8 addr[6];
10925 int ret, i, j;
10926 struct port_info *p = adap2pinfo(adap, port_id);
10927 u32 param, val;
10928 struct vi_info *vi = &p->vi[0];
10929
10930 for (i = 0, j = -1; i <= p->port_id; i++) {
10931 do {
10932 j++;
10933 } while ((adap->params.portvec & (1 << j)) == 0);
10934 }
10935
10936 p->hw_port = j;
10937 p->tx_chan = t4_get_tx_c_chan(adap, j);
10938 p->rx_chan = t4_get_rx_c_chan(adap, j);
10939 p->mps_bg_map = t4_get_mps_bg_map(adap, j);
10940 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
10941
10942 if (!(adap->flags & IS_VF) ||
10943 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
10944 t4_update_port_info(p);
10945 }
10946
10947 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
10948 &vi->vfvld, &vi->vin);
10949 if (ret < 0)
10950 return ret;
10951
10952 vi->viid = ret;
10953 t4_os_set_hw_addr(p, addr);
10954
10955 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10956 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
10957 V_FW_PARAMS_PARAM_YZ(vi->viid);
10958 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val);
10959 if (ret)
10960 vi->rss_base = 0xffff;
10961 else {
10962 /* MPASS((val >> 16) == rss_size); */
10963 vi->rss_base = val & 0xffff;
10964 }
10965
10966 return 0;
10967 }
10968
t4_read_cimq_cfg_ibq_core(struct adapter * adap,u8 coreid,u32 qid,u16 * base,u16 * size,u16 * thres)10969 static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid,
10970 u16 *base, u16 *size, u16 *thres)
10971 {
10972 unsigned int v, m;
10973
10974 if (chip_id(adap) > CHELSIO_T6) {
10975 v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) |
10976 V_CORESELECT(coreid);
10977 /* value is in 512-byte units */
10978 m = 512;
10979 } else {
10980 v = F_IBQSELECT | V_QUENUMSELECT(qid);
10981 /* value is in 256-byte units */
10982 m = 256;
10983 }
10984
10985 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
10986 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10987 if (base)
10988 *base = G_CIMQBASE(v) * m;
10989 if (size)
10990 *size = G_CIMQSIZE(v) * m;
10991 if (thres)
10992 *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
10993 }
10994
t4_read_cimq_cfg_obq_core(struct adapter * adap,u8 coreid,u32 qid,u16 * base,u16 * size)10995 static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid,
10996 u16 *base, u16 *size)
10997 {
10998 unsigned int v, m;
10999
11000 if (chip_id(adap) > CHELSIO_T6) {
11001 v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) |
11002 V_CORESELECT(coreid);
11003 /* value is in 512-byte units */
11004 m = 512;
11005 } else {
11006 v = F_OBQSELECT | V_QUENUMSELECT(qid);
11007 /* value is in 256-byte units */
11008 m = 256;
11009 }
11010
11011 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
11012 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
11013 if (base)
11014 *base = G_CIMQBASE(v) * m;
11015 if (size)
11016 *size = G_CIMQSIZE(v) * m;
11017 }
11018
11019 /**
11020 * t4_read_cimq_cfg_core - read CIM queue configuration on specific core
11021 * @adap: the adapter
11022 * @coreid: the uP coreid
11023 * @base: holds the queue base addresses in bytes
11024 * @size: holds the queue sizes in bytes
11025 * @thres: holds the queue full thresholds in bytes
11026 *
11027 * Returns the current configuration of the CIM queues, starting with
11028 * the IBQs, then the OBQs, on a specific @coreid.
11029 */
t4_read_cimq_cfg_core(struct adapter * adap,u8 coreid,u16 * base,u16 * size,u16 * thres)11030 void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
11031 u16 *size, u16 *thres)
11032 {
11033 unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
11034 unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
11035 unsigned int i;
11036
11037 for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++)
11038 t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres);
11039
11040 for (i = 0; i < cim_num_obq; i++, base++, size++)
11041 t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size);
11042 }
11043
t4_read_cim_ibq_data_core(struct adapter * adap,u8 coreid,u32 addr,u32 * data)11044 static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr,
11045 u32 *data)
11046 {
11047 int ret, attempts;
11048 unsigned int v;
11049
11050 /* It might take 3-10ms before the IBQ debug read access is allowed.
11051 * Wait for 1 Sec with a delay of 1 usec.
11052 */
11053 attempts = 1000000;
11054
11055 if (chip_id(adap) > CHELSIO_T6)
11056 v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid);
11057 else
11058 v = V_IBQDBGADDR(addr);
11059
11060 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN);
11061 ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
11062 attempts, 1);
11063 if (ret)
11064 return ret;
11065
11066 *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
11067 return 0;
11068 }
11069
11070 /**
11071 * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on
11072 * specific core
11073 * @adap: the adapter
11074 * @coreid: the uP coreid
11075 * @qid: the queue index
11076 * @data: where to store the queue contents
11077 * @n: capacity of @data in 32-bit words
11078 *
11079 * Reads the contents of the selected CIM queue starting at address 0 up
11080 * to the capacity of @data on a specific @coreid. @n must be a multiple
11081 * of 4. Returns < 0 on error and the number of 32-bit words actually
11082 * read on success.
11083 */
t4_read_cim_ibq_core(struct adapter * adap,u8 coreid,u32 qid,u32 * data,size_t n)11084 int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
11085 size_t n)
11086 {
11087 unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
11088 u16 i, addr, nwords;
11089 int ret;
11090
11091 if (qid > (cim_num_ibq - 1) || (n & 3))
11092 return -EINVAL;
11093
11094 t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL);
11095 addr >>= sizeof(u16);
11096 nwords >>= sizeof(u16);
11097 if (n > nwords)
11098 n = nwords;
11099
11100 for (i = 0; i < n; i++, addr++, data++) {
11101 ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data);
11102 if (ret < 0)
11103 return ret;
11104 }
11105
11106 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
11107 return i;
11108 }
11109
t4_read_cim_obq_data_core(struct adapter * adap,u8 coreid,u32 addr,u32 * data)11110 static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr,
11111 u32 *data)
11112 {
11113 unsigned int v;
11114 int ret;
11115
11116 if (chip_id(adap) > CHELSIO_T6)
11117 v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid);
11118 else
11119 v = V_OBQDBGADDR(addr);
11120
11121 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN);
11122 ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1);
11123 if (ret)
11124 return ret;
11125
11126 *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
11127 return 0;
11128 }
11129
11130 /**
11131 * t4_read_cim_obq_core - read the contents of a CIM outbound queue on
11132 * specific core
11133 * @adap: the adapter
11134 * @coreid: the uP coreid
11135 * @qid: the queue index
11136 * @data: where to store the queue contents
11137 * @n: capacity of @data in 32-bit words
11138 *
11139 * Reads the contents of the selected CIM queue starting at address 0 up
11140 * to the capacity of @data on specific @coreid. @n must be a multiple
11141 * of 4. Returns < 0 on error and the number of 32-bit words actually
11142 * read on success.
11143 */
t4_read_cim_obq_core(struct adapter * adap,u8 coreid,u32 qid,u32 * data,size_t n)11144 int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
11145 size_t n)
11146 {
11147 unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
11148 u16 i, addr, nwords;
11149 int ret;
11150
11151 if ((qid > (cim_num_obq - 1)) || (n & 3))
11152 return -EINVAL;
11153
11154 t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords);
11155 addr >>= sizeof(u16);
11156 nwords >>= sizeof(u16);
11157 if (n > nwords)
11158 n = nwords;
11159
11160 for (i = 0; i < n; i++, addr++, data++) {
11161 ret = t4_read_cim_obq_data_core(adap, coreid, addr, data);
11162 if (ret < 0)
11163 return ret;
11164 }
11165
11166 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
11167 return i;
11168 }
11169
11170 /**
11171 * t4_cim_read_core - read a block from CIM internal address space
11172 * of a control register group on specific core.
11173 * @adap: the adapter
11174 * @group: the control register group to select for read
11175 * @coreid: the uP coreid
11176 * @addr: the start address within the CIM address space
11177 * @n: number of words to read
11178 * @valp: where to store the result
11179 *
11180 * Reads a block of 4-byte words from the CIM intenal address space
11181 * of a control register @group on a specific @coreid.
11182 */
t4_cim_read_core(struct adapter * adap,u8 group,u8 coreid,unsigned int addr,unsigned int n,unsigned int * valp)11183 int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
11184 unsigned int addr, unsigned int n,
11185 unsigned int *valp)
11186 {
11187 unsigned int hostbusy, v = 0;
11188 int ret = 0;
11189
11190 if (chip_id(adap) > CHELSIO_T6) {
11191 hostbusy = F_T7_HOSTBUSY;
11192 v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid);
11193 } else {
11194 hostbusy = F_HOSTBUSY;
11195 }
11196
11197 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
11198 return -EBUSY;
11199
11200 for ( ; !ret && n--; addr += 4) {
11201 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
11202 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
11203 0, 5, 2);
11204 if (!ret)
11205 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
11206 }
11207
11208 return ret;
11209 }
11210
11211 /**
11212 * t4_cim_write_core - write a block into CIM internal address space
11213 * of a control register group on specific core.
11214 * @adap: the adapter
11215 * @group: the control register group to select for write
11216 * @coreid: the uP coreid
11217 * @addr: the start address within the CIM address space
11218 * @n: number of words to write
11219 * @valp: set of values to write
11220 *
11221 * Writes a block of 4-byte words into the CIM intenal address space
11222 * of a control register @group on a specific @coreid.
11223 */
t4_cim_write_core(struct adapter * adap,u8 group,u8 coreid,unsigned int addr,unsigned int n,const unsigned int * valp)11224 int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
11225 unsigned int addr, unsigned int n,
11226 const unsigned int *valp)
11227 {
11228 unsigned int hostbusy, v;
11229 int ret = 0;
11230
11231 if (chip_id(adap) > CHELSIO_T6) {
11232 hostbusy = F_T7_HOSTBUSY;
11233 v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) |
11234 V_HOSTCORESEL(coreid);
11235 } else {
11236 hostbusy = F_HOSTBUSY;
11237 v = F_HOSTWRITE;
11238 }
11239
11240 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
11241 return -EBUSY;
11242
11243 for ( ; !ret && n--; addr += 4) {
11244 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
11245 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
11246 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
11247 0, 5, 2);
11248 }
11249
11250 return ret;
11251 }
11252
11253 /**
11254 * t4_cim_read_la_core - read CIM LA capture buffer on specific core
11255 * @adap: the adapter
11256 * @coreid: uP coreid
11257 * @la_buf: where to store the LA data
11258 * @wrptr: the HW write pointer within the capture buffer
11259 *
11260 * Reads the contents of the CIM LA buffer on a specific @coreid
11261 * with the most recent entry at the end of the returned data
11262 * and with the entry at @wrptr first. We try to leave the LA
11263 * in the running state we find it in.
11264 */
t4_cim_read_la_core(struct adapter * adap,u8 coreid,u32 * la_buf,u32 * wrptr)11265 int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
11266 u32 *wrptr)
11267 {
11268 unsigned int cfg, val, idx;
11269 int i, ret;
11270
11271 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg);
11272 if (ret)
11273 return ret;
11274
11275 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
11276 val = 0;
11277 ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
11278 &val);
11279 if (ret)
11280 return ret;
11281 }
11282
11283 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val);
11284 if (ret)
11285 goto restart;
11286
11287 idx = G_UPDBGLAWRPTR(val);
11288 if (wrptr)
11289 *wrptr = idx;
11290
11291 for (i = 0; i < adap->params.cim_la_size; i++) {
11292 val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN;
11293 ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
11294 &val);
11295 if (ret)
11296 break;
11297 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
11298 &val);
11299 if (ret)
11300 break;
11301 if (val & F_UPDBGLARDEN) {
11302 ret = -ETIMEDOUT;
11303 break;
11304 }
11305 ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1,
11306 &la_buf[i]);
11307 if (ret)
11308 break;
11309
11310 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
11311 * identify the 32-bit portion of the full 312-bit data
11312 */
11313 if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9)
11314 idx = (idx & 0xff0) + 0x10;
11315 else
11316 idx++;
11317 /* address can't exceed 0xfff */
11318 idx &= M_UPDBGLARDPTR;
11319 }
11320 restart:
11321 if (cfg & F_UPDBGLAEN) {
11322 int r;
11323
11324 val = cfg & ~F_UPDBGLARDEN;
11325 r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
11326 &val);
11327 if (!ret)
11328 ret = r;
11329 }
11330
11331 return ret;
11332 }
11333
11334 /**
11335 * t4_tp_read_la - read TP LA capture buffer
11336 * @adap: the adapter
11337 * @la_buf: where to store the LA data
11338 * @wrptr: the HW write pointer within the capture buffer
11339 *
11340 * Reads the contents of the TP LA buffer with the most recent entry at
11341 * the end of the returned data and with the entry at @wrptr first.
11342 * We leave the LA in the running state we find it in.
11343 */
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)11344 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
11345 {
11346 bool last_incomplete;
11347 unsigned int i, cfg, val, idx;
11348
11349 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
11350 if (cfg & F_DBGLAENABLE) /* freeze LA */
11351 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11352 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
11353
11354 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
11355 idx = G_DBGLAWPTR(val);
11356 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
11357 if (last_incomplete)
11358 idx = (idx + 1) & M_DBGLARPTR;
11359 if (wrptr)
11360 *wrptr = idx;
11361
11362 val &= 0xffff;
11363 val &= ~V_DBGLARPTR(M_DBGLARPTR);
11364 val |= adap->params.tp.la_mask;
11365
11366 for (i = 0; i < TPLA_SIZE; i++) {
11367 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
11368 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
11369 idx = (idx + 1) & M_DBGLARPTR;
11370 }
11371
11372 /* Wipe out last entry if it isn't valid */
11373 if (last_incomplete)
11374 la_buf[TPLA_SIZE - 1] = ~0ULL;
11375
11376 if (cfg & F_DBGLAENABLE) /* restore running state */
11377 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11378 cfg | adap->params.tp.la_mask);
11379 }
11380
11381 /*
11382 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
11383 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
11384 * state for more than the Warning Threshold then we'll issue a warning about
11385 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
11386 * appears to be hung every Warning Repeat second till the situation clears.
11387 * If the situation clears, we'll note that as well.
11388 */
11389 #define SGE_IDMA_WARN_THRESH 1
11390 #define SGE_IDMA_WARN_REPEAT 300
11391
11392 /**
11393 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
11394 * @adapter: the adapter
11395 * @idma: the adapter IDMA Monitor state
11396 *
11397 * Initialize the state of an SGE Ingress DMA Monitor.
11398 */
t4_idma_monitor_init(struct adapter * adapter,struct sge_idma_monitor_state * idma)11399 void t4_idma_monitor_init(struct adapter *adapter,
11400 struct sge_idma_monitor_state *idma)
11401 {
11402 /* Initialize the state variables for detecting an SGE Ingress DMA
11403 * hang. The SGE has internal counters which count up on each clock
11404 * tick whenever the SGE finds its Ingress DMA State Engines in the
11405 * same state they were on the previous clock tick. The clock used is
11406 * the Core Clock so we have a limit on the maximum "time" they can
11407 * record; typically a very small number of seconds. For instance,
11408 * with a 600MHz Core Clock, we can only count up to a bit more than
11409 * 7s. So we'll synthesize a larger counter in order to not run the
11410 * risk of having the "timers" overflow and give us the flexibility to
11411 * maintain a Hung SGE State Machine of our own which operates across
11412 * a longer time frame.
11413 */
11414 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
11415 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
11416 }
11417
11418 /**
11419 * t4_idma_monitor - monitor SGE Ingress DMA state
11420 * @adapter: the adapter
11421 * @idma: the adapter IDMA Monitor state
11422 * @hz: number of ticks/second
11423 * @ticks: number of ticks since the last IDMA Monitor call
11424 */
t4_idma_monitor(struct adapter * adapter,struct sge_idma_monitor_state * idma,int hz,int ticks)11425 void t4_idma_monitor(struct adapter *adapter,
11426 struct sge_idma_monitor_state *idma,
11427 int hz, int ticks)
11428 {
11429 int i, idma_same_state_cnt[2];
11430
11431 /* Read the SGE Debug Ingress DMA Same State Count registers. These
11432 * are counters inside the SGE which count up on each clock when the
11433 * SGE finds its Ingress DMA State Engines in the same states they
11434 * were in the previous clock. The counters will peg out at
11435 * 0xffffffff without wrapping around so once they pass the 1s
11436 * threshold they'll stay above that till the IDMA state changes.
11437 */
11438 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
11439 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
11440 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11441
11442 for (i = 0; i < 2; i++) {
11443 u32 debug0, debug11;
11444
11445 /* If the Ingress DMA Same State Counter ("timer") is less
11446 * than 1s, then we can reset our synthesized Stall Timer and
11447 * continue. If we have previously emitted warnings about a
11448 * potential stalled Ingress Queue, issue a note indicating
11449 * that the Ingress Queue has resumed forward progress.
11450 */
11451 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
11452 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
11453 CH_WARN(adapter, "SGE idma%d, queue %u, "
11454 "resumed after %d seconds\n",
11455 i, idma->idma_qid[i],
11456 idma->idma_stalled[i]/hz);
11457 idma->idma_stalled[i] = 0;
11458 continue;
11459 }
11460
11461 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
11462 * domain. The first time we get here it'll be because we
11463 * passed the 1s Threshold; each additional time it'll be
11464 * because the RX Timer Callback is being fired on its regular
11465 * schedule.
11466 *
11467 * If the stall is below our Potential Hung Ingress Queue
11468 * Warning Threshold, continue.
11469 */
11470 if (idma->idma_stalled[i] == 0) {
11471 idma->idma_stalled[i] = hz;
11472 idma->idma_warn[i] = 0;
11473 } else {
11474 idma->idma_stalled[i] += ticks;
11475 idma->idma_warn[i] -= ticks;
11476 }
11477
11478 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
11479 continue;
11480
11481 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
11482 */
11483 if (idma->idma_warn[i] > 0)
11484 continue;
11485 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
11486
11487 /* Read and save the SGE IDMA State and Queue ID information.
11488 * We do this every time in case it changes across time ...
11489 * can't be too careful ...
11490 */
11491 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
11492 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11493 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
11494
11495 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
11496 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11497 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
11498
11499 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
11500 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
11501 i, idma->idma_qid[i], idma->idma_state[i],
11502 idma->idma_stalled[i]/hz,
11503 debug0, debug11);
11504 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
11505 }
11506 }
11507
11508 /**
11509 * t4_set_vf_mac - Set MAC address for the specified VF
11510 * @adapter: The adapter
11511 * @pf: the PF used to instantiate the VFs
11512 * @vf: one of the VFs instantiated by the specified PF
11513 * @naddr: the number of MAC addresses
11514 * @addr: the MAC address(es) to be set to the specified VF
11515 */
t4_set_vf_mac(struct adapter * adapter,unsigned int pf,unsigned int vf,unsigned int naddr,u8 * addr)11516 int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf,
11517 unsigned int naddr, u8 *addr)
11518 {
11519 struct fw_acl_mac_cmd cmd;
11520
11521 memset(&cmd, 0, sizeof(cmd));
11522 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
11523 F_FW_CMD_REQUEST |
11524 F_FW_CMD_WRITE |
11525 V_FW_ACL_MAC_CMD_PFN(pf) |
11526 V_FW_ACL_MAC_CMD_VFN(vf));
11527
11528 /* Note: Do not enable the ACL */
11529 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
11530 cmd.nmac = naddr;
11531
11532 switch (pf) {
11533 case 3:
11534 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
11535 break;
11536 case 2:
11537 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
11538 break;
11539 case 1:
11540 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
11541 break;
11542 case 0:
11543 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
11544 break;
11545 }
11546
11547 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
11548 }
11549
11550 /**
11551 * t4_read_pace_tbl - read the pace table
11552 * @adap: the adapter
11553 * @pace_vals: holds the returned values
11554 *
11555 * Returns the values of TP's pace table in microseconds.
11556 */
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])11557 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
11558 {
11559 unsigned int i, v;
11560
11561 for (i = 0; i < NTX_SCHED; i++) {
11562 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
11563 v = t4_read_reg(adap, A_TP_PACE_TABLE);
11564 pace_vals[i] = dack_ticks_to_usec(adap, v);
11565 }
11566 }
11567
11568 /**
11569 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
11570 * @adap: the adapter
11571 * @sched: the scheduler index
11572 * @kbps: the byte rate in Kbps
11573 * @ipg: the interpacket delay in tenths of nanoseconds
11574 *
11575 * Return the current configuration of a HW Tx scheduler.
11576 */
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg,bool sleep_ok)11577 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
11578 unsigned int *ipg, bool sleep_ok)
11579 {
11580 unsigned int v, addr, bpt, cpt;
11581
11582 if (kbps) {
11583 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
11584 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11585 if (sched & 1)
11586 v >>= 16;
11587 bpt = (v >> 8) & 0xff;
11588 cpt = v & 0xff;
11589 if (!cpt)
11590 *kbps = 0; /* scheduler disabled */
11591 else {
11592 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
11593 *kbps = (v * bpt) / 125;
11594 }
11595 }
11596 if (ipg) {
11597 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
11598 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11599 if (sched & 1)
11600 v >>= 16;
11601 v &= 0xffff;
11602 *ipg = (10000 * v) / core_ticks_per_usec(adap);
11603 }
11604 }
11605
11606 /**
11607 * t4_load_cfg - download config file
11608 * @adap: the adapter
11609 * @cfg_data: the cfg text file to write
11610 * @size: text file size
11611 *
11612 * Write the supplied config text file to the card's serial flash.
11613 */
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11614 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
11615 {
11616 int ret, i, n, cfg_addr;
11617 unsigned int addr, len;
11618 unsigned int flash_cfg_start_sec;
11619
11620 cfg_addr = t4_flash_cfg_addr(adap, &len);
11621 if (cfg_addr < 0)
11622 return cfg_addr;
11623
11624 if (size > len) {
11625 CH_ERR(adap, "cfg file too large, max is %u bytes\n", len);
11626 return -EFBIG;
11627 }
11628
11629 flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
11630 i = DIV_ROUND_UP(len, SF_SEC_SIZE);
11631 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11632 flash_cfg_start_sec + i - 1);
11633 /*
11634 * If size == 0 then we're simply erasing the FLASH sectors associated
11635 * with the on-adapter Firmware Configuration File.
11636 */
11637 if (ret || size == 0)
11638 goto out;
11639
11640 /* this will write to the flash up to SF_PAGE_SIZE at a time */
11641 addr = cfg_addr;
11642 for (i = 0; i < size; i += SF_PAGE_SIZE) {
11643 n = min(size - i, SF_PAGE_SIZE);
11644 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
11645 if (ret)
11646 goto out;
11647 addr += SF_PAGE_SIZE;
11648 cfg_data += SF_PAGE_SIZE;
11649 }
11650
11651 out:
11652 if (ret)
11653 CH_ERR(adap, "config file %s failed %d\n",
11654 (size == 0 ? "clear" : "download"), ret);
11655 return ret;
11656 }
11657
11658 /**
11659 * t5_fw_init_extern_mem - initialize the external memory
11660 * @adap: the adapter
11661 *
11662 * Initializes the external memory on T5.
11663 */
t5_fw_init_extern_mem(struct adapter * adap)11664 int t5_fw_init_extern_mem(struct adapter *adap)
11665 {
11666 u32 params[1], val[1];
11667 int ret;
11668
11669 if (!is_t5(adap))
11670 return 0;
11671
11672 val[0] = 0xff; /* Initialize all MCs */
11673 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11674 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
11675 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
11676 FW_CMD_MAX_TIMEOUT);
11677
11678 return ret;
11679 }
11680
11681 /* BIOS boot headers */
11682 typedef struct pci_expansion_rom_header {
11683 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
11684 u8 reserved[22]; /* Reserved per processor Architecture data */
11685 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
11686 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
11687
11688 /* Legacy PCI Expansion ROM Header */
11689 typedef struct legacy_pci_expansion_rom_header {
11690 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
11691 u8 size512; /* Current Image Size in units of 512 bytes */
11692 u8 initentry_point[4];
11693 u8 cksum; /* Checksum computed on the entire Image */
11694 u8 reserved[16]; /* Reserved */
11695 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
11696 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
11697
11698 /* EFI PCI Expansion ROM Header */
11699 typedef struct efi_pci_expansion_rom_header {
11700 u8 signature[2]; // ROM signature. The value 0xaa55
11701 u8 initialization_size[2]; /* Units 512. Includes this header */
11702 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
11703 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
11704 u8 efi_machine_type[2]; /* Machine type from EFI image header */
11705 u8 compression_type[2]; /* Compression type. */
11706 /*
11707 * Compression type definition
11708 * 0x0: uncompressed
11709 * 0x1: Compressed
11710 * 0x2-0xFFFF: Reserved
11711 */
11712 u8 reserved[8]; /* Reserved */
11713 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
11714 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
11715 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
11716
11717 /* PCI Data Structure Format */
11718 typedef struct pcir_data_structure { /* PCI Data Structure */
11719 u8 signature[4]; /* Signature. The string "PCIR" */
11720 u8 vendor_id[2]; /* Vendor Identification */
11721 u8 device_id[2]; /* Device Identification */
11722 u8 vital_product[2]; /* Pointer to Vital Product Data */
11723 u8 length[2]; /* PCIR Data Structure Length */
11724 u8 revision; /* PCIR Data Structure Revision */
11725 u8 class_code[3]; /* Class Code */
11726 u8 image_length[2]; /* Image Length. Multiple of 512B */
11727 u8 code_revision[2]; /* Revision Level of Code/Data */
11728 u8 code_type; /* Code Type. */
11729 /*
11730 * PCI Expansion ROM Code Types
11731 * 0x00: Intel IA-32, PC-AT compatible. Legacy
11732 * 0x01: Open Firmware standard for PCI. FCODE
11733 * 0x02: Hewlett-Packard PA RISC. HP reserved
11734 * 0x03: EFI Image. EFI
11735 * 0x04-0xFF: Reserved.
11736 */
11737 u8 indicator; /* Indicator. Identifies the last image in the ROM */
11738 u8 reserved[2]; /* Reserved */
11739 } pcir_data_t; /* PCI__DATA_STRUCTURE */
11740
11741 /* BOOT constants */
11742 enum {
11743 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
11744 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
11745 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
11746 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
11747 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
11748 VENDOR_ID = 0x1425, /* Vendor ID */
11749 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
11750 };
11751
11752 /*
11753 * modify_device_id - Modifies the device ID of the Boot BIOS image
11754 * @adatper: the device ID to write.
11755 * @boot_data: the boot image to modify.
11756 *
11757 * Write the supplied device ID to the boot BIOS image.
11758 */
modify_device_id(int device_id,u8 * boot_data)11759 static void modify_device_id(int device_id, u8 *boot_data)
11760 {
11761 legacy_pci_exp_rom_header_t *header;
11762 pcir_data_t *pcir_header;
11763 u32 cur_header = 0;
11764
11765 /*
11766 * Loop through all chained images and change the device ID's
11767 */
11768 while (1) {
11769 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
11770 pcir_header = (pcir_data_t *) &boot_data[cur_header +
11771 le16_to_cpu(*(u16*)header->pcir_offset)];
11772
11773 /*
11774 * Only modify the Device ID if code type is Legacy or HP.
11775 * 0x00: Okay to modify
11776 * 0x01: FCODE. Do not be modify
11777 * 0x03: Okay to modify
11778 * 0x04-0xFF: Do not modify
11779 */
11780 if (pcir_header->code_type == 0x00) {
11781 u8 csum = 0;
11782 int i;
11783
11784 /*
11785 * Modify Device ID to match current adatper
11786 */
11787 *(u16*) pcir_header->device_id = device_id;
11788
11789 /*
11790 * Set checksum temporarily to 0.
11791 * We will recalculate it later.
11792 */
11793 header->cksum = 0x0;
11794
11795 /*
11796 * Calculate and update checksum
11797 */
11798 for (i = 0; i < (header->size512 * 512); i++)
11799 csum += (u8)boot_data[cur_header + i];
11800
11801 /*
11802 * Invert summed value to create the checksum
11803 * Writing new checksum value directly to the boot data
11804 */
11805 boot_data[cur_header + 7] = -csum;
11806
11807 } else if (pcir_header->code_type == 0x03) {
11808
11809 /*
11810 * Modify Device ID to match current adatper
11811 */
11812 *(u16*) pcir_header->device_id = device_id;
11813
11814 }
11815
11816
11817 /*
11818 * Check indicator element to identify if this is the last
11819 * image in the ROM.
11820 */
11821 if (pcir_header->indicator & 0x80)
11822 break;
11823
11824 /*
11825 * Move header pointer up to the next image in the ROM.
11826 */
11827 cur_header += header->size512 * 512;
11828 }
11829 }
11830
11831 /*
11832 * t4_load_boot - download boot flash
11833 * @adapter: the adapter
11834 * @boot_data: the boot image to write
11835 * @boot_addr: offset in flash to write boot_data
11836 * @size: image size
11837 *
11838 * Write the supplied boot image to the card's serial flash.
11839 * The boot image has the following sections: a 28-byte header and the
11840 * boot image.
11841 */
t4_load_boot(struct adapter * adap,u8 * boot_data,unsigned int boot_addr,unsigned int size)11842 int t4_load_boot(struct adapter *adap, u8 *boot_data,
11843 unsigned int boot_addr, unsigned int size)
11844 {
11845 pci_exp_rom_header_t *header;
11846 int pcir_offset ;
11847 pcir_data_t *pcir_header;
11848 int ret, addr;
11849 uint16_t device_id;
11850 unsigned int i, start, len;
11851 unsigned int boot_sector = boot_addr * 1024;
11852
11853 /*
11854 * Make sure the boot image does not exceed its available space.
11855 */
11856 len = 0;
11857 start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len);
11858 if (boot_sector + size > start + len) {
11859 CH_ERR(adap, "boot data is larger than available BOOT area\n");
11860 return -EFBIG;
11861 }
11862
11863 /*
11864 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
11865 * and Boot configuration data sections. These 3 boot sections span
11866 * the entire FLASH_LOC_BOOT_AREA.
11867 */
11868 i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
11869 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
11870 (boot_sector >> 16) + i - 1);
11871
11872 /*
11873 * If size == 0 then we're simply erasing the FLASH sectors associated
11874 * with the on-adapter option ROM file
11875 */
11876 if (ret || (size == 0))
11877 goto out;
11878
11879 /* Get boot header */
11880 header = (pci_exp_rom_header_t *)boot_data;
11881 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
11882 /* PCIR Data Structure */
11883 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
11884
11885 /*
11886 * Perform some primitive sanity testing to avoid accidentally
11887 * writing garbage over the boot sectors. We ought to check for
11888 * more but it's not worth it for now ...
11889 */
11890 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
11891 CH_ERR(adap, "boot image too small/large\n");
11892 return -EFBIG;
11893 }
11894
11895 #ifndef CHELSIO_T4_DIAGS
11896 /*
11897 * Check BOOT ROM header signature
11898 */
11899 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
11900 CH_ERR(adap, "Boot image missing signature\n");
11901 return -EINVAL;
11902 }
11903
11904 /*
11905 * Check PCI header signature
11906 */
11907 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
11908 CH_ERR(adap, "PCI header missing signature\n");
11909 return -EINVAL;
11910 }
11911
11912 /*
11913 * Check Vendor ID matches Chelsio ID
11914 */
11915 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
11916 CH_ERR(adap, "Vendor ID missing signature\n");
11917 return -EINVAL;
11918 }
11919 #endif
11920
11921 /*
11922 * Retrieve adapter's device ID
11923 */
11924 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
11925 /* Want to deal with PF 0 so I strip off PF 4 indicator */
11926 device_id = device_id & 0xf0ff;
11927
11928 /*
11929 * Check PCIE Device ID
11930 */
11931 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
11932 /*
11933 * Change the device ID in the Boot BIOS image to match
11934 * the Device ID of the current adapter.
11935 */
11936 modify_device_id(device_id, boot_data);
11937 }
11938
11939 /*
11940 * Skip over the first SF_PAGE_SIZE worth of data and write it after
11941 * we finish copying the rest of the boot image. This will ensure
11942 * that the BIOS boot header will only be written if the boot image
11943 * was written in full.
11944 */
11945 addr = boot_sector;
11946 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
11947 addr += SF_PAGE_SIZE;
11948 boot_data += SF_PAGE_SIZE;
11949 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
11950 if (ret)
11951 goto out;
11952 }
11953
11954 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
11955 (const u8 *)header, 0);
11956
11957 out:
11958 if (ret)
11959 CH_ERR(adap, "boot image download failed, error %d\n", ret);
11960 return ret;
11961 }
11962
11963 /*
11964 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
11965 * @adapter: the adapter
11966 *
11967 * Return the address within the flash where the OptionROM Configuration
11968 * is stored, or an error if the device FLASH is too small to contain
11969 * a OptionROM Configuration.
11970 */
t4_flash_bootcfg_addr(struct adapter * adapter,unsigned int * lenp)11971 static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp)
11972 {
11973 unsigned int len = 0;
11974 const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len);
11975
11976 /*
11977 * If the device FLASH isn't large enough to hold a Firmware
11978 * Configuration File, return an error.
11979 */
11980 if (adapter->params.sf_size < start + len)
11981 return -ENOSPC;
11982 if (lenp != NULL)
11983 *lenp = len;
11984 return (start);
11985 }
11986
t4_load_bootcfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11987 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
11988 {
11989 int ret, i, n, cfg_addr;
11990 unsigned int addr, len;
11991 unsigned int flash_cfg_start_sec;
11992
11993 cfg_addr = t4_flash_bootcfg_addr(adap, &len);
11994 if (cfg_addr < 0)
11995 return cfg_addr;
11996
11997 if (size > len) {
11998 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len);
11999 return -EFBIG;
12000 }
12001
12002 flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
12003 i = DIV_ROUND_UP(len, SF_SEC_SIZE);
12004 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
12005 flash_cfg_start_sec + i - 1);
12006
12007 /*
12008 * If size == 0 then we're simply erasing the FLASH sectors associated
12009 * with the on-adapter OptionROM Configuration File.
12010 */
12011 if (ret || size == 0)
12012 goto out;
12013
12014 /* this will write to the flash up to SF_PAGE_SIZE at a time */
12015 addr = cfg_addr;
12016 for (i = 0; i < size; i += SF_PAGE_SIZE) {
12017 n = min(size - i, SF_PAGE_SIZE);
12018 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
12019 if (ret)
12020 goto out;
12021 addr += SF_PAGE_SIZE;
12022 cfg_data += SF_PAGE_SIZE;
12023 }
12024
12025 out:
12026 if (ret)
12027 CH_ERR(adap, "boot config data %s failed %d\n",
12028 (size == 0 ? "clear" : "download"), ret);
12029 return ret;
12030 }
12031
12032 /**
12033 * t4_set_filter_cfg - set up filter mode/mask and ingress config.
12034 * @adap: the adapter
12035 * @mode: a bitmap selecting which optional filter components to enable
12036 * @mask: a bitmap selecting which components to enable in filter mask
12037 * @vnic_mode: the ingress config/vnic mode setting
12038 *
12039 * Sets the filter mode and mask by selecting the optional components to
12040 * enable in filter tuples. Returns 0 on success and a negative error if
12041 * the requested mode needs more bits than are available for optional
12042 * components. The filter mask must be a subset of the filter mode.
12043 */
t4_set_filter_cfg(struct adapter * adap,int mode,int mask,int vnic_mode)12044 int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
12045 {
12046 int i, nbits, rc;
12047 uint32_t param, val;
12048 uint16_t fmode, fmask;
12049 const int maxbits = adap->chip_params->filter_opt_len;
12050 const int nopt = adap->chip_params->filter_num_opt;
12051 int width;
12052
12053 if (mode != -1 || mask != -1) {
12054 if (mode != -1) {
12055 fmode = mode;
12056 nbits = 0;
12057 for (i = 0; i < nopt; i++) {
12058 if (fmode & (1 << i))
12059 nbits += t4_filter_field_width(adap, i);
12060 }
12061 if (nbits > maxbits) {
12062 CH_ERR(adap, "optional fields in the filter "
12063 "mode (0x%x) add up to %d bits "
12064 "(must be <= %db). Remove some fields and "
12065 "try again.\n", fmode, nbits, maxbits);
12066 return -E2BIG;
12067 }
12068
12069 /*
12070 * Hardware < T7 wants the bits to be maxed out. Keep
12071 * setting them until there's no room for more.
12072 */
12073 if (chip_id(adap) < CHELSIO_T7) {
12074 for (i = 0; i < nopt; i++) {
12075 if (fmode & (1 << i))
12076 continue;
12077 width = t4_filter_field_width(adap, i);
12078 if (nbits + width <= maxbits) {
12079 fmode |= 1 << i;
12080 nbits += width;
12081 if (nbits == maxbits)
12082 break;
12083 }
12084 }
12085 }
12086
12087 fmask = fmode & adap->params.tp.filter_mask;
12088 if (fmask != adap->params.tp.filter_mask) {
12089 CH_WARN(adap,
12090 "filter mask will be changed from 0x%x to "
12091 "0x%x to comply with the filter mode (0x%x).\n",
12092 adap->params.tp.filter_mask, fmask, fmode);
12093 }
12094 } else {
12095 fmode = adap->params.tp.filter_mode;
12096 fmask = mask;
12097 if ((fmode | fmask) != fmode) {
12098 CH_ERR(adap,
12099 "filter mask (0x%x) must be a subset of "
12100 "the filter mode (0x%x).\n", fmask, fmode);
12101 return -EINVAL;
12102 }
12103 }
12104
12105 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12106 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
12107 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
12108 val = V_FW_PARAMS_PARAM_FILTER_MODE(fmode) |
12109 V_FW_PARAMS_PARAM_FILTER_MASK(fmask);
12110 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m,
12111 &val);
12112 if (rc < 0)
12113 return rc;
12114 }
12115
12116 if (vnic_mode != -1) {
12117 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12118 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
12119 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
12120 val = vnic_mode;
12121 rc = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m,
12122 &val);
12123 if (rc < 0)
12124 return rc;
12125 }
12126
12127 /* Refresh. */
12128 read_filter_mode_and_ingress_config(adap);
12129
12130 return 0;
12131 }
12132
12133 /**
12134 * t4_clr_port_stats - clear port statistics
12135 * @adap: the adapter
12136 * @idx: the port index
12137 *
12138 * Clear HW statistics for the given port.
12139 */
t4_clr_port_stats(struct adapter * adap,int idx)12140 void t4_clr_port_stats(struct adapter *adap, int idx)
12141 {
12142 struct port_info *pi;
12143 int i, port_id, tx_chan;
12144 u32 bgmap, port_base_addr;
12145
12146 port_id = adap->port_map[idx];
12147 MPASS(port_id >= 0 && port_id <= adap->params.nports);
12148 pi = adap->port[port_id];
12149
12150 for (tx_chan = pi->tx_chan;
12151 tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
12152 port_base_addr = t4_port_reg(adap, tx_chan, 0);
12153
12154 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
12155 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
12156 t4_write_reg(adap, port_base_addr + i, 0);
12157 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
12158 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
12159 t4_write_reg(adap, port_base_addr + i, 0);
12160 }
12161 bgmap = pi->mps_bg_map;
12162 for (i = 0; i < 4; i++)
12163 if (bgmap & (1 << i)) {
12164 t4_write_reg(adap,
12165 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
12166 t4_write_reg(adap,
12167 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
12168 }
12169 }
12170
12171 /**
12172 * t4_i2c_io - read/write I2C data from adapter
12173 * @adap: the adapter
12174 * @port: Port number if per-port device; <0 if not
12175 * @devid: per-port device ID or absolute device ID
12176 * @offset: byte offset into device I2C space
12177 * @len: byte length of I2C space data
12178 * @buf: buffer in which to return I2C data for read
12179 * buffer which holds the I2C data for write
12180 * @write: if true, do a write; else do a read
12181 * Reads/Writes the I2C data from/to the indicated device and location.
12182 */
t4_i2c_io(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf,bool write)12183 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
12184 int port, unsigned int devid,
12185 unsigned int offset, unsigned int len,
12186 u8 *buf, bool write)
12187 {
12188 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
12189 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
12190 int ret = 0;
12191
12192 if (len > I2C_PAGE_SIZE)
12193 return -EINVAL;
12194
12195 /* Dont allow reads that spans multiple pages */
12196 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
12197 return -EINVAL;
12198
12199 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
12200 ldst_cmd.op_to_addrspace =
12201 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12202 F_FW_CMD_REQUEST |
12203 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
12204 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
12205 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
12206 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
12207 ldst_cmd.u.i2c.did = devid;
12208
12209 while (len > 0) {
12210 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
12211
12212 ldst_cmd.u.i2c.boffset = offset;
12213 ldst_cmd.u.i2c.blen = i2c_len;
12214
12215 if (write)
12216 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
12217
12218 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
12219 write ? NULL : &ldst_rpl);
12220 if (ret)
12221 break;
12222
12223 if (!write)
12224 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
12225 offset += i2c_len;
12226 buf += i2c_len;
12227 len -= i2c_len;
12228 }
12229
12230 return ret;
12231 }
12232
t4_i2c_rd(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)12233 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
12234 int port, unsigned int devid,
12235 unsigned int offset, unsigned int len,
12236 u8 *buf)
12237 {
12238 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
12239 }
12240
t4_i2c_wr(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)12241 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
12242 int port, unsigned int devid,
12243 unsigned int offset, unsigned int len,
12244 u8 *buf)
12245 {
12246 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
12247 }
12248
12249 /**
12250 * t4_sge_ctxt_rd - read an SGE context through FW
12251 * @adap: the adapter
12252 * @mbox: mailbox to use for the FW command
12253 * @cid: the context id
12254 * @ctype: the context type
12255 * @data: where to store the context data
12256 *
12257 * Issues a FW command through the given mailbox to read an SGE context.
12258 */
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)12259 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
12260 enum ctxt_type ctype, u32 *data)
12261 {
12262 int ret;
12263 struct fw_ldst_cmd c;
12264
12265 if (ctype == CTXT_EGRESS)
12266 ret = FW_LDST_ADDRSPC_SGE_EGRC;
12267 else if (ctype == CTXT_INGRESS)
12268 ret = FW_LDST_ADDRSPC_SGE_INGC;
12269 else if (ctype == CTXT_FLM)
12270 ret = FW_LDST_ADDRSPC_SGE_FLMC;
12271 else
12272 ret = FW_LDST_ADDRSPC_SGE_CONMC;
12273
12274 memset(&c, 0, sizeof(c));
12275 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12276 F_FW_CMD_REQUEST | F_FW_CMD_READ |
12277 V_FW_LDST_CMD_ADDRSPACE(ret));
12278 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
12279 c.u.idctxt.physid = cpu_to_be32(cid);
12280
12281 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12282 if (ret == 0) {
12283 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
12284 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
12285 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
12286 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
12287 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
12288 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
12289 if (chip_id(adap) > CHELSIO_T6)
12290 data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6);
12291 }
12292 return ret;
12293 }
12294
12295 /**
12296 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
12297 * @adap: the adapter
12298 * @cid: the context id
12299 * @ctype: the context type
12300 * @data: where to store the context data
12301 *
12302 * Reads an SGE context directly, bypassing FW. This is only for
12303 * debugging when FW is unavailable.
12304 */
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)12305 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
12306 u32 *data)
12307 {
12308 int i, ret;
12309
12310 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
12311 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
12312 if (!ret) {
12313 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
12314 *data++ = t4_read_reg(adap, i);
12315 if (chip_id(adap) > CHELSIO_T6)
12316 *data++ = t4_read_reg(adap, i);
12317 }
12318 return ret;
12319 }
12320
t4_sched_config(struct adapter * adapter,int type,int minmaxen,int sleep_ok)12321 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
12322 int sleep_ok)
12323 {
12324 struct fw_sched_cmd cmd;
12325
12326 memset(&cmd, 0, sizeof(cmd));
12327 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12328 F_FW_CMD_REQUEST |
12329 F_FW_CMD_WRITE);
12330 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12331
12332 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
12333 cmd.u.config.type = type;
12334 cmd.u.config.minmaxen = minmaxen;
12335
12336 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12337 NULL, sleep_ok);
12338 }
12339
t4_sched_params(struct adapter * adapter,int type,int level,int mode,int rateunit,int ratemode,int channel,int cl,int minrate,int maxrate,int weight,int pktsize,int burstsize,int sleep_ok)12340 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
12341 int rateunit, int ratemode, int channel, int cl,
12342 int minrate, int maxrate, int weight, int pktsize,
12343 int burstsize, int sleep_ok)
12344 {
12345 struct fw_sched_cmd cmd;
12346
12347 memset(&cmd, 0, sizeof(cmd));
12348 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12349 F_FW_CMD_REQUEST |
12350 F_FW_CMD_WRITE);
12351 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12352
12353 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12354 cmd.u.params.type = type;
12355 cmd.u.params.level = level;
12356 cmd.u.params.mode = mode;
12357 cmd.u.params.ch = channel;
12358 cmd.u.params.cl = cl;
12359 cmd.u.params.unit = rateunit;
12360 cmd.u.params.rate = ratemode;
12361 cmd.u.params.min = cpu_to_be32(minrate);
12362 cmd.u.params.max = cpu_to_be32(maxrate);
12363 cmd.u.params.weight = cpu_to_be16(weight);
12364 cmd.u.params.pktsize = cpu_to_be16(pktsize);
12365 cmd.u.params.burstsize = cpu_to_be16(burstsize);
12366
12367 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12368 NULL, sleep_ok);
12369 }
12370
t4_sched_params_ch_rl(struct adapter * adapter,int channel,int ratemode,unsigned int maxrate,int sleep_ok)12371 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
12372 unsigned int maxrate, int sleep_ok)
12373 {
12374 struct fw_sched_cmd cmd;
12375
12376 memset(&cmd, 0, sizeof(cmd));
12377 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12378 F_FW_CMD_REQUEST |
12379 F_FW_CMD_WRITE);
12380 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12381
12382 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12383 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
12384 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
12385 cmd.u.params.ch = channel;
12386 cmd.u.params.rate = ratemode; /* REL or ABS */
12387 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */
12388
12389 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12390 NULL, sleep_ok);
12391 }
12392
t4_sched_params_cl_wrr(struct adapter * adapter,int channel,int cl,int weight,int sleep_ok)12393 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
12394 int weight, int sleep_ok)
12395 {
12396 struct fw_sched_cmd cmd;
12397
12398 if (weight < 0 || weight > 100)
12399 return -EINVAL;
12400
12401 memset(&cmd, 0, sizeof(cmd));
12402 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12403 F_FW_CMD_REQUEST |
12404 F_FW_CMD_WRITE);
12405 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12406
12407 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12408 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
12409 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
12410 cmd.u.params.ch = channel;
12411 cmd.u.params.cl = cl;
12412 cmd.u.params.weight = cpu_to_be16(weight);
12413
12414 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12415 NULL, sleep_ok);
12416 }
12417
t4_sched_params_cl_rl_kbps(struct adapter * adapter,int channel,int cl,int mode,unsigned int maxrate,int pktsize,int sleep_ok)12418 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
12419 int mode, unsigned int maxrate, int pktsize, int sleep_ok)
12420 {
12421 struct fw_sched_cmd cmd;
12422
12423 memset(&cmd, 0, sizeof(cmd));
12424 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12425 F_FW_CMD_REQUEST |
12426 F_FW_CMD_WRITE);
12427 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12428
12429 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12430 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
12431 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
12432 cmd.u.params.mode = mode;
12433 cmd.u.params.ch = channel;
12434 cmd.u.params.cl = cl;
12435 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
12436 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
12437 cmd.u.params.max = cpu_to_be32(maxrate);
12438 cmd.u.params.pktsize = cpu_to_be16(pktsize);
12439
12440 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12441 NULL, sleep_ok);
12442 }
12443
12444 /*
12445 * t4_config_watchdog - configure (enable/disable) a watchdog timer
12446 * @adapter: the adapter
12447 * @mbox: mailbox to use for the FW command
12448 * @pf: the PF owning the queue
12449 * @vf: the VF owning the queue
12450 * @timeout: watchdog timeout in ms
12451 * @action: watchdog timer / action
12452 *
12453 * There are separate watchdog timers for each possible watchdog
12454 * action. Configure one of the watchdog timers by setting a non-zero
12455 * timeout. Disable a watchdog timer by using a timeout of zero.
12456 */
t4_config_watchdog(struct adapter * adapter,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int timeout,unsigned int action)12457 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
12458 unsigned int pf, unsigned int vf,
12459 unsigned int timeout, unsigned int action)
12460 {
12461 struct fw_watchdog_cmd wdog;
12462 unsigned int ticks;
12463
12464 /*
12465 * The watchdog command expects a timeout in units of 10ms so we need
12466 * to convert it here (via rounding) and force a minimum of one 10ms
12467 * "tick" if the timeout is non-zero but the conversion results in 0
12468 * ticks.
12469 */
12470 ticks = (timeout + 5)/10;
12471 if (timeout && !ticks)
12472 ticks = 1;
12473
12474 memset(&wdog, 0, sizeof wdog);
12475 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
12476 F_FW_CMD_REQUEST |
12477 F_FW_CMD_WRITE |
12478 V_FW_PARAMS_CMD_PFN(pf) |
12479 V_FW_PARAMS_CMD_VFN(vf));
12480 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
12481 wdog.timeout = cpu_to_be32(ticks);
12482 wdog.action = cpu_to_be32(action);
12483
12484 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
12485 }
12486
t4_get_devlog_level(struct adapter * adapter,unsigned int * level)12487 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
12488 {
12489 struct fw_devlog_cmd devlog_cmd;
12490 int ret;
12491
12492 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12493 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12494 F_FW_CMD_REQUEST | F_FW_CMD_READ);
12495 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12496 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12497 sizeof(devlog_cmd), &devlog_cmd);
12498 if (ret)
12499 return ret;
12500
12501 *level = devlog_cmd.level;
12502 return 0;
12503 }
12504
t4_set_devlog_level(struct adapter * adapter,unsigned int level)12505 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
12506 {
12507 struct fw_devlog_cmd devlog_cmd;
12508
12509 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12510 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12511 F_FW_CMD_REQUEST |
12512 F_FW_CMD_WRITE);
12513 devlog_cmd.level = level;
12514 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12515 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12516 sizeof(devlog_cmd), &devlog_cmd);
12517 }
12518
t4_configure_add_smac(struct adapter * adap)12519 int t4_configure_add_smac(struct adapter *adap)
12520 {
12521 unsigned int param, val;
12522 int ret = 0;
12523
12524 adap->params.smac_add_support = 0;
12525 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12526 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
12527 /* Query FW to check if FW supports adding source mac address
12528 * to TCAM feature or not.
12529 * If FW returns 1, driver can use this feature and driver need to send
12530 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
12531 * enable adding smac to TCAM.
12532 */
12533 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12534 if (ret)
12535 return ret;
12536
12537 if (val == 1) {
12538 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
12539 ¶m, &val);
12540 if (!ret)
12541 /* Firmware allows adding explicit TCAM entries.
12542 * Save this internally.
12543 */
12544 adap->params.smac_add_support = 1;
12545 }
12546
12547 return ret;
12548 }
12549
t4_configure_ringbb(struct adapter * adap)12550 int t4_configure_ringbb(struct adapter *adap)
12551 {
12552 unsigned int param, val;
12553 int ret = 0;
12554
12555 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12556 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
12557 /* Query FW to check if FW supports ring switch feature or not.
12558 * If FW returns 1, driver can use this feature and driver need to send
12559 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
12560 * enable the ring backbone configuration.
12561 */
12562 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12563 if (ret < 0) {
12564 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
12565 ret);
12566 goto out;
12567 }
12568
12569 if (val != 1) {
12570 CH_ERR(adap, "FW doesnot support ringbackbone features\n");
12571 goto out;
12572 }
12573
12574 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12575 if (ret < 0) {
12576 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
12577 ret);
12578 goto out;
12579 }
12580
12581 out:
12582 return ret;
12583 }
12584
12585 /*
12586 * t4_set_vlan_acl - Set a VLAN id for the specified VF
12587 * @adapter: the adapter
12588 * @mbox: mailbox to use for the FW command
12589 * @vf: one of the VFs instantiated by the specified PF
12590 * @vlan: The vlanid to be set
12591 *
12592 */
t4_set_vlan_acl(struct adapter * adap,unsigned int pf,unsigned int vf,u16 vlan)12593 int t4_set_vlan_acl(struct adapter *adap, unsigned int pf, unsigned int vf,
12594 u16 vlan)
12595 {
12596 struct fw_acl_vlan_cmd vlan_cmd;
12597 unsigned int enable;
12598
12599 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
12600 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
12601 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
12602 F_FW_CMD_REQUEST |
12603 F_FW_CMD_WRITE |
12604 F_FW_CMD_EXEC |
12605 V_FW_ACL_VLAN_CMD_PFN(pf) |
12606 V_FW_ACL_VLAN_CMD_VFN(vf));
12607 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd) |
12608 V_FW_ACL_VLAN_CMD_PMASK(1 << pf));
12609 /* Drop all packets that donot match vlan id */
12610 vlan_cmd.dropnovlan_fm = (enable
12611 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
12612 F_FW_ACL_VLAN_CMD_FM)
12613 : 0);
12614 if (enable != 0) {
12615 vlan_cmd.nvlan = 1;
12616 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
12617 }
12618
12619 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
12620 }
12621
12622 /**
12623 * t4_del_mac - Removes the exact-match filter for a MAC address
12624 * @adap: the adapter
12625 * @mbox: mailbox to use for the FW command
12626 * @viid: the VI id
12627 * @addr: the MAC address value
12628 * @smac: if true, delete from only the smac region of MPS
12629 *
12630 * Modifies an exact-match filter and sets it to the new MAC address if
12631 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
12632 * latter case the address is added persistently if @persist is %true.
12633 *
12634 * Returns a negative error number or the index of the filter with the new
12635 * MAC value. Note that this index may differ from @idx.
12636 */
t4_del_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,const u8 * addr,bool smac)12637 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12638 const u8 *addr, bool smac)
12639 {
12640 int ret;
12641 struct fw_vi_mac_cmd c;
12642 struct fw_vi_mac_exact *p = c.u.exact;
12643 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
12644
12645 memset(&c, 0, sizeof(c));
12646 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12647 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12648 V_FW_VI_MAC_CMD_VIID(viid));
12649 c.freemacs_to_len16 = cpu_to_be32(
12650 V_FW_CMD_LEN16(1) |
12651 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12652
12653 memcpy(p->macaddr, addr, sizeof(p->macaddr));
12654 p->valid_to_idx = cpu_to_be16(
12655 F_FW_VI_MAC_CMD_VALID |
12656 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
12657
12658 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12659 if (ret == 0) {
12660 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12661 if (ret < max_mac_addr)
12662 return -ENOMEM;
12663 }
12664
12665 return ret;
12666 }
12667
12668 /**
12669 * t4_add_mac - Adds an exact-match filter for a MAC address
12670 * @adap: the adapter
12671 * @mbox: mailbox to use for the FW command
12672 * @viid: the VI id
12673 * @idx: index of existing filter for old value of MAC address, or -1
12674 * @addr: the new MAC address value
12675 * @persist: whether a new MAC allocation should be persistent
12676 * @add_smt: if true also add the address to the HW SMT
12677 * @smac: if true, update only the smac region of MPS
12678 *
12679 * Modifies an exact-match filter and sets it to the new MAC address if
12680 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
12681 * latter case the address is added persistently if @persist is %true.
12682 *
12683 * Returns a negative error number or the index of the filter with the new
12684 * MAC value. Note that this index may differ from @idx.
12685 */
t4_add_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx,bool smac)12686 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12687 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
12688 {
12689 int ret, mode;
12690 struct fw_vi_mac_cmd c;
12691 struct fw_vi_mac_exact *p = c.u.exact;
12692 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
12693
12694 if (idx < 0) /* new allocation */
12695 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
12696 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
12697
12698 memset(&c, 0, sizeof(c));
12699 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12700 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12701 V_FW_VI_MAC_CMD_VIID(viid));
12702 c.freemacs_to_len16 = cpu_to_be32(
12703 V_FW_CMD_LEN16(1) |
12704 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12705 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
12706 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
12707 V_FW_VI_MAC_CMD_IDX(idx));
12708 memcpy(p->macaddr, addr, sizeof(p->macaddr));
12709
12710 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12711 if (ret == 0) {
12712 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12713 if (ret >= max_mac_addr)
12714 return -ENOMEM;
12715 if (smt_idx) {
12716 /* Does fw supports returning smt_idx? */
12717 if (adap->params.viid_smt_extn_support)
12718 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
12719 else {
12720 /* In T4/T5, SMT contains 256 SMAC entries
12721 * organized in 128 rows of 2 entries each.
12722 * In T6, SMT contains 256 SMAC entries in
12723 * 256 rows.
12724 */
12725 if (chip_id(adap) <= CHELSIO_T5)
12726 *smt_idx = ((viid & M_FW_VIID_VIN) << 1);
12727 else
12728 *smt_idx = (viid & M_FW_VIID_VIN);
12729 }
12730 }
12731 }
12732
12733 return ret;
12734 }
12735