1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14 *
15 * Copyright (C) 2003-2019 Chelsio Communications. All rights reserved.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 * release for licensing terms and conditions.
21 */
22
23 /*
24 * Copyright 2020 RackTop Systems, Inc.
25 */
26
27 #include "common.h"
28 #include "t4_regs.h"
29 #include "t4_regs_values.h"
30 #include "t4fw_interface.h"
31
32 /**
33 * t4_wait_op_done_val - wait until an operation is completed
34 * @adapter: the adapter performing the operation
35 * @reg: the register to check for completion
36 * @mask: a single-bit field within @reg that indicates completion
37 * @polarity: the value of the field when the operation is completed
38 * @attempts: number of check iterations
39 * @delay: delay in usecs between iterations
40 * @valp: where to store the value of the register at completion time
41 *
42 * Wait until an operation is completed by checking a bit in a register
43 * up to @attempts times. If @valp is not NULL the value of the register
44 * at the time it indicated completion is stored there. Returns 0 if the
45 * operation completes and -EAGAIN otherwise.
46 */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)47 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
48 int polarity, int attempts, int delay, u32 *valp)
49 {
50 while (1) {
51 u32 val = t4_read_reg(adapter, reg);
52
53 if (!!(val & mask) == polarity) {
54 if (valp)
55 *valp = val;
56 return 0;
57 }
58 if (--attempts == 0)
59 return -EAGAIN;
60 if (delay)
61 udelay(delay);
62 }
63 }
64
t4_wait_op_done(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay)65 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
66 int polarity, int attempts, int delay)
67 {
68 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
69 delay, NULL);
70 }
71
72 /**
73 * t4_set_reg_field - set a register field to a value
74 * @adapter: the adapter to program
75 * @addr: the register address
76 * @mask: specifies the portion of the register to modify
77 * @val: the new value for the register field
78 *
79 * Sets a register field specified by the supplied mask to the
80 * given value.
81 */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)82 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
83 u32 val)
84 {
85 u32 v = t4_read_reg(adapter, addr) & ~mask;
86
87 t4_write_reg(adapter, addr, v | val);
88 (void) t4_read_reg(adapter, addr); /* flush */
89 }
90
91 /**
92 * t4_read_indirect - read indirectly addressed registers
93 * @adap: the adapter
94 * @addr_reg: register holding the indirect address
95 * @data_reg: register holding the value of the indirect register
96 * @vals: where the read register values are stored
97 * @nregs: how many indirect registers to read
98 * @start_idx: index of first indirect register to read
99 *
100 * Reads registers that are accessed indirectly through an address/data
101 * register pair.
102 */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)103 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
104 unsigned int data_reg, u32 *vals,
105 unsigned int nregs, unsigned int start_idx)
106 {
107 while (nregs--) {
108 t4_write_reg(adap, addr_reg, start_idx);
109 *vals++ = t4_read_reg(adap, data_reg);
110 start_idx++;
111 }
112 }
113
114 /**
115 * t4_write_indirect - write indirectly addressed registers
116 * @adap: the adapter
117 * @addr_reg: register holding the indirect addresses
118 * @data_reg: register holding the value for the indirect registers
119 * @vals: values to write
120 * @nregs: how many indirect registers to write
121 * @start_idx: address of first indirect register to write
122 *
123 * Writes a sequential block of registers that are accessed indirectly
124 * through an address/data register pair.
125 */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)126 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
127 unsigned int data_reg, const u32 *vals,
128 unsigned int nregs, unsigned int start_idx)
129 {
130 while (nregs--) {
131 t4_write_reg(adap, addr_reg, start_idx++);
132 t4_write_reg(adap, data_reg, *vals++);
133 }
134 }
135
136 /*
137 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
138 * mechanism. This guarantees that we get the real value even if we're
139 * operating within a Virtual Machine and the Hypervisor is trapping our
140 * Configuration Space accesses.
141 *
142 * N.B. This routine should only be used as a last resort: the firmware uses
143 * the backdoor registers on a regular basis and we can end up
144 * conflicting with it's uses!
145 */
t4_hw_pci_read_cfg4(struct adapter * adap,int reg,u32 * val)146 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
147 {
148 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
149
150 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
151 req |= F_ENABLE;
152 else
153 req |= F_T6_ENABLE;
154
155 if (is_t4(adap->params.chip))
156 req |= F_LOCALCFG;
157
158 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
159 *val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
160
161 /* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
162 * Configuration Space read. (None of the other fields matter when
163 * F_ENABLE is 0 so a simple register write is easier than a
164 * read-modify-write via t4_set_reg_field().)
165 */
166 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
167 }
168
169 /*
170 * t4_report_fw_error - report firmware error
171 * @adap: the adapter
172 *
173 * The adapter firmware can indicate error conditions to the host.
174 * If the firmware has indicated an error, print out the reason for
175 * the firmware error.
176 */
t4_report_fw_error(struct adapter * adap)177 static void t4_report_fw_error(struct adapter *adap)
178 {
179 static const char *const reason[] = {
180 "Crash", /* PCIE_FW_EVAL_CRASH */
181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
187 "Reserved", /* reserved */
188 };
189 u32 pcie_fw;
190
191 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
192 if (pcie_fw & F_PCIE_FW_ERR) {
193 CH_ERR(adap, "Firmware reports adapter error: %s\n",
194 reason[G_PCIE_FW_EVAL(pcie_fw)]);
195 adap->flags &= ~FW_OK;
196 }
197 }
198
199 /*
200 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
201 */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)202 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
203 u32 mbox_addr)
204 {
205 for ( ; nflit; nflit--, mbox_addr += 8)
206 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
207 }
208
209 /*
210 * Handle a FW assertion reported in a mailbox.
211 */
fw_asrt(struct adapter * adap,struct fw_debug_cmd * asrt)212 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
213 {
214 CH_ALERT(adap,
215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
216 asrt->u.assert.filename_0_7,
217 be32_to_cpu(asrt->u.assert.line),
218 be32_to_cpu(asrt->u.assert.x),
219 be32_to_cpu(asrt->u.assert.y));
220 }
221
222 #define X_CIM_PF_NOACCESS 0xeeeeeeee
223
224 /*
225 * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
226 * busy loops which don't sleep.
227 */
228 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
229 #define T4_OS_TOUCH_NMI_WATCHDOG() t4_os_touch_nmi_watchdog()
230 #else
231 #define T4_OS_TOUCH_NMI_WATCHDOG()
232 #endif
233
234 #ifdef T4_OS_LOG_MBOX_CMDS
235 /**
236 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
237 * @adapter: the adapter
238 * @cmd: the Firmware Mailbox Command or Reply
239 * @size: command length in bytes
240 * @access: the time (ms) needed to access the Firmware Mailbox
241 * @execute: the time (ms) the command spent being executed
242 */
t4_record_mbox(struct adapter * adapter,const __be64 * cmd,unsigned int size,int access,int execute)243 static void t4_record_mbox(struct adapter *adapter,
244 const __be64 *cmd, unsigned int size,
245 int access, int execute)
246 {
247 struct mbox_cmd_log *log = adapter->mbox_log;
248 struct mbox_cmd *entry;
249 int i;
250
251 entry = mbox_cmd_log_entry(log, log->cursor++);
252 if (log->cursor == log->size)
253 log->cursor = 0;
254
255 for (i = 0; i < size/8; i++)
256 entry->cmd[i] = be64_to_cpu(cmd[i]);
257 while (i < MBOX_LEN/8)
258 entry->cmd[i++] = 0;
259 entry->timestamp = t4_os_timestamp();
260 entry->seqno = log->seqno++;
261 entry->access = access;
262 entry->execute = execute;
263 }
264
265 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
266 t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
267
268 #else /* !T4_OS_LOG_MBOX_CMDS */
269
270 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
271 /* nothing */
272
273 #endif /* !T4_OS_LOG_MBOX_CMDS */
274
275 /**
276 * t4_record_mbox_marker - record a marker in the mailbox log
277 * @adapter: the adapter
278 * @marker: byte array marker
279 * @size: marker size in bytes
280 *
281 * We inject a "fake mailbox command" into the Firmware Mailbox Log
282 * using a known command token and then the bytes of the specified
283 * marker. This lets debugging code inject markers into the log to
284 * help identify which commands are in response to higher level code.
285 */
t4_record_mbox_marker(struct adapter * adapter,const void * marker,unsigned int size)286 void t4_record_mbox_marker(struct adapter *adapter,
287 const void *marker, unsigned int size)
288 {
289 #ifdef T4_OS_LOG_MBOX_CMDS
290 __be64 marker_cmd[MBOX_LEN/8];
291 const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
292 unsigned int marker_cmd_size;
293
294 if (size > max_marker)
295 size = max_marker;
296
297 marker_cmd[0] = cpu_to_be64(~0LLU);
298 memcpy(&marker_cmd[1], marker, size);
299 memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
300 marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
301
302 t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
303 #endif /* T4_OS_LOG_MBOX_CMDS */
304 }
305
306 /*
307 * Delay time in microseconds to wait for mailbox access/fw reply
308 * to mailbox command
309 */
310 #define MIN_MBOX_CMD_DELAY 900
311 #define MBOX_CMD_DELAY 1000
312
313 /**
314 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
315 * @adap: the adapter
316 * @mbox: index of the mailbox to use
317 * @cmd: the command to write
318 * @size: command length in bytes
319 * @rpl: where to optionally store the reply
320 * @sleep_ok: if true we may sleep while awaiting command completion
321 * @timeout: time to wait for command to finish before timing out
322 * (negative implies @sleep_ok=false)
323 *
324 * Sends the given command to FW through the selected mailbox and waits
325 * for the FW to execute the command. If @rpl is not %NULL it is used to
326 * store the FW's reply to the command. The command and its optional
327 * reply are of the same length. Some FW commands like RESET and
328 * INITIALIZE can take a considerable amount of time to execute.
329 * @sleep_ok determines whether we may sleep while awaiting the response.
330 * If sleeping is allowed we use progressive backoff otherwise we spin.
331 * Note that passing in a negative @timeout is an alternate mechanism
332 * for specifying @sleep_ok=false. This is useful when a higher level
333 * interface allows for specification of @timeout but not @sleep_ok ...
334 *
335 * The return value is 0 on success or a negative errno on failure. A
336 * failure can happen either because we are not able to execute the
337 * command or FW executes it but signals an error. In the latter case
338 * the return value is the error code indicated by FW (negated).
339 */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)340 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
341 int size, void *rpl, bool sleep_ok, int timeout)
342 {
343 #ifdef T4_OS_LOG_MBOX_CMDS
344 u16 access = 0;
345 #endif /* T4_OS_LOG_MBOX_CMDS */
346 u32 v;
347 u64 res;
348 int i, ret;
349 const __be64 *p = cmd;
350 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
351 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
352 u32 ctl;
353 __be64 cmd_rpl[MBOX_LEN/8];
354 struct t4_mbox_list entry;
355 u32 pcie_fw;
356
357 if ((size & 15) || size > MBOX_LEN)
358 return -EINVAL;
359
360 /*
361 * If we have a negative timeout, that implies that we can't sleep.
362 */
363 if (timeout < 0) {
364 sleep_ok = false;
365 timeout = -timeout;
366 }
367
368 /*
369 * Queue ourselves onto the mailbox access list. When our entry is at
370 * the front of the list, we have rights to access the mailbox. So we
371 * wait [for a while] till we're at the front [or bail out with an
372 * EBUSY] ...
373 */
374 t4_mbox_list_add(adap, &entry);
375
376 for (i = 0; ; i++) {
377 /*
378 * If we've waited too long, return a busy indication. This
379 * really ought to be based on our initial position in the
380 * mailbox access list but this is a start. We very rarely
381 * contend on access to the mailbox ... Also check for a
382 * firmware error which we'll report as a device error.
383 */
384 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
385 if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
386 t4_mbox_list_del(adap, &entry);
387 t4_report_fw_error(adap);
388 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
389 T4_RECORD_MBOX(adap, cmd, size, ret, 0);
390 return ret;
391 }
392
393 /*
394 * If we're at the head, break out and start the mailbox
395 * protocol.
396 */
397 if (t4_mbox_list_first_entry(adap) == &entry)
398 break;
399
400 /*
401 * Delay for a bit before checking again ...
402 */
403 if (sleep_ok) {
404 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
405 } else {
406 T4_OS_TOUCH_NMI_WATCHDOG();
407 udelay(MBOX_CMD_DELAY);
408 }
409 }
410 #ifdef T4_OS_LOG_MBOX_CMDS
411 access = i;
412 #endif /* T4_OS_LOG_MBOX_CMDS */
413
414 /*
415 * Attempt to gain access to the mailbox.
416 */
417 for (i = 0; i < 4; i++) {
418 ctl = t4_read_reg(adap, ctl_reg);
419 v = G_MBOWNER(ctl);
420 if (v != X_MBOWNER_NONE)
421 break;
422 }
423
424 /*
425 * If we were unable to gain access, dequeue ourselves from the
426 * mailbox atomic access list and report the error to our caller.
427 */
428 if (v != X_MBOWNER_PL) {
429 t4_mbox_list_del(adap, &entry);
430 t4_report_fw_error(adap);
431 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
432 T4_RECORD_MBOX(adap, cmd, size, access, ret);
433 return ret;
434 }
435
436 /*
437 * If we gain ownership of the mailbox and there's a "valid" message
438 * in it, this is likely an asynchronous error message from the
439 * firmware. So we'll report that and then proceed on with attempting
440 * to issue our own command ... which may well fail if the error
441 * presaged the firmware crashing ...
442 */
443 if (ctl & F_MBMSGVALID) {
444 CH_ERR(adap, "found VALID command in mbox %u: "
445 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
446 (unsigned long long)t4_read_reg64(adap, data_reg),
447 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
448 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
449 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
450 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
451 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
452 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
453 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
454 }
455
456 /*
457 * Copy in the new mailbox command and send it on its way ...
458 */
459 T4_RECORD_MBOX(adap, cmd, size, access, 0);
460 for (i = 0; i < size; i += 8, p++)
461 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
462
463 /*
464 * XXX It's not clear that we need this anymore now
465 * XXX that we have mailbox logging ...
466 */
467 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
468
469 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
470 (void) t4_read_reg(adap, ctl_reg); /* flush write */
471
472 /*
473 * Loop waiting for the reply; bail out if we time out or the firmware
474 * reports an error.
475 */
476 for (i = 0;
477 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
478 i < timeout;
479 i++) {
480 if (sleep_ok) {
481 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
482 } else {
483 T4_OS_TOUCH_NMI_WATCHDOG();
484 udelay(MBOX_CMD_DELAY);
485 }
486
487 v = t4_read_reg(adap, ctl_reg);
488 if (v == X_CIM_PF_NOACCESS)
489 continue;
490 if (G_MBOWNER(v) == X_MBOWNER_PL) {
491 if (!(v & F_MBMSGVALID)) {
492 t4_write_reg(adap, ctl_reg,
493 V_MBOWNER(X_MBOWNER_NONE));
494 continue;
495 }
496
497 /*
498 * Retrieve the command reply and release the mailbox.
499 */
500 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
501 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
502 t4_mbox_list_del(adap, &entry);
503
504 T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1);
505
506 /*
507 * XXX It's not clear that we need this anymore now
508 * XXX that we have mailbox logging ...
509 */
510 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
511 CH_MSG(adap, INFO, HW,
512 "command completed in %d ms (%ssleeping)\n",
513 i + 1, sleep_ok ? "" : "non-");
514
515 res = be64_to_cpu(cmd_rpl[0]);
516 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
517 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
518 res = V_FW_CMD_RETVAL(EIO);
519 } else if (rpl)
520 memcpy(rpl, cmd_rpl, size);
521 return -G_FW_CMD_RETVAL((int)res);
522 }
523 }
524
525 /*
526 * We timed out waiting for a reply to our mailbox command. Report
527 * the error and also check to see if the firmware reported any
528 * errors ...
529 */
530 t4_mbox_list_del(adap, &entry);
531
532 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
533 T4_RECORD_MBOX(adap, cmd, size, access, ret);
534 CH_ERR(adap, "command 0x%x in mailbox %d timed out\n",
535 *(const u8 *)cmd, mbox);
536
537 t4_report_fw_error(adap);
538 t4_fatal_err(adap);
539 return ret;
540 }
541
542 #ifdef CONFIG_CUDBG
543 /*
544 * The maximum number of times to iterate for FW reply before
545 * issuing a mailbox timeout
546 */
547 #define FW_REPLY_WAIT_LOOP 6000000
548
549 /**
550 * t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
551 * mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
552 * and is only invoked during a kernel crash. Since this function is
553 * called through a atomic notifier chain ,we cannot sleep awaiting a
554 * response from FW, hence repeatedly loop until we get a reply.
555 *
556 * @adap: the adapter
557 * @mbox: index of the mailbox to use
558 * @cmd: the command to write
559 * @size: command length in bytes
560 * @rpl: where to optionally store the reply
561 */
562
t4_wr_mbox_meat_timeout_panic(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)563 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
564 const void *cmd, int size, void *rpl)
565 {
566 u32 v;
567 u64 res;
568 int i, ret;
569 u64 cnt;
570 const __be64 *p = cmd;
571 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
572 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
573 u32 ctl;
574 __be64 cmd_rpl[MBOX_LEN/8];
575 u32 pcie_fw;
576
577 if ((size & 15) || size > MBOX_LEN)
578 return -EINVAL;
579
580 /*
581 * Check for a firmware error which we'll report as a
582 * device error.
583 */
584 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
585 if (pcie_fw & F_PCIE_FW_ERR) {
586 t4_report_fw_error(adap);
587 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
588 return ret;
589 }
590
591 /*
592 * Attempt to gain access to the mailbox.
593 */
594 for (i = 0; i < 4; i++) {
595 ctl = t4_read_reg(adap, ctl_reg);
596 v = G_MBOWNER(ctl);
597 if (v != X_MBOWNER_NONE)
598 break;
599 }
600
601 /*
602 * If we were unable to gain access, report the error to our caller.
603 */
604 if (v != X_MBOWNER_PL) {
605 t4_report_fw_error(adap);
606 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
607 return ret;
608 }
609
610 /*
611 * If we gain ownership of the mailbox and there's a "valid" message
612 * in it, this is likely an asynchronous error message from the
613 * firmware. So we'll report that and then proceed on with attempting
614 * to issue our own command ... which may well fail if the error
615 * presaged the firmware crashing ...
616 */
617 if (ctl & F_MBMSGVALID) {
618 CH_ERR(adap, "found VALID command in mbox %u: "
619 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
620 (unsigned long long)t4_read_reg64(adap, data_reg),
621 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
622 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
623 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
624 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
625 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
626 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
627 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
628 }
629
630 /*
631 * Copy in the new mailbox command and send it on its way ...
632 */
633 for (i = 0; i < size; i += 8, p++)
634 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
635
636 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
637
638 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
639 t4_read_reg(adap, ctl_reg); /* flush write */
640
641 /*
642 * Loop waiting for the reply; bail out if we time out or the firmware
643 * reports an error.
644 */
645 for (cnt = 0;
646 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
647 cnt < FW_REPLY_WAIT_LOOP;
648 cnt++) {
649 v = t4_read_reg(adap, ctl_reg);
650 if (v == X_CIM_PF_NOACCESS)
651 continue;
652 if (G_MBOWNER(v) == X_MBOWNER_PL) {
653 if (!(v & F_MBMSGVALID)) {
654 t4_write_reg(adap, ctl_reg,
655 V_MBOWNER(X_MBOWNER_NONE));
656 continue;
657 }
658
659 /*
660 * Retrieve the command reply and release the mailbox.
661 */
662 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
663 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
664
665 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
666
667 res = be64_to_cpu(cmd_rpl[0]);
668 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
669 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
670 res = V_FW_CMD_RETVAL(EIO);
671 } else if (rpl)
672 memcpy(rpl, cmd_rpl, size);
673 return -G_FW_CMD_RETVAL((int)res);
674 }
675 }
676
677 /*
678 * We timed out waiting for a reply to our mailbox command. Report
679 * the error and also check to see if the firmware reported any
680 * errors ...
681 */
682 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
683 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
684 *(const u8 *)cmd, mbox);
685
686 t4_report_fw_error(adap);
687 t4_fatal_err(adap);
688 return ret;
689 }
690 #endif
691
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)692 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
693 void *rpl, bool sleep_ok)
694 {
695 #ifdef CONFIG_CUDBG
696 if (adap->flags & K_CRASH)
697 return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
698 rpl);
699 else
700 #endif
701 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
702 sleep_ok, FW_CMD_MAX_TIMEOUT);
703
704 }
705
t4_edc_err_read(struct adapter * adap,int idx)706 static int t4_edc_err_read(struct adapter *adap, int idx)
707 {
708 u32 edc_ecc_err_addr_reg;
709 u32 edc_bist_status_rdata_reg;
710
711 if (is_t4(adap->params.chip)) {
712 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
713 return 0;
714 }
715 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
716 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
717 return 0;
718 }
719
720 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
721 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
722
723 CH_WARN(adap,
724 "edc%d err addr 0x%x: 0x%x.\n",
725 idx, edc_ecc_err_addr_reg,
726 t4_read_reg(adap, edc_ecc_err_addr_reg));
727 CH_WARN(adap,
728 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
729 edc_bist_status_rdata_reg,
730 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
731 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
732 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
733 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
734 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
735 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
736 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
737 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
738 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
739
740 return 0;
741 }
742
743 /**
744 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
745 * @adap: the adapter
746 * @win: PCI-E Memory Window to use
747 * @addr: address within adapter memory
748 * @len: amount of memory to transfer
749 * @hbuf: host memory buffer
750 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
751 *
752 * Reads/writes an [almost] arbitrary memory region in the firmware: the
753 * firmware memory address and host buffer must be aligned on 32-bit
754 * boudaries; the length may be arbitrary.
755 *
756 * NOTES:
757 * 1. The memory is transferred as a raw byte sequence from/to the
758 * firmware's memory. If this memory contains data structures which
759 * contain multi-byte integers, it's the caller's responsibility to
760 * perform appropriate byte order conversions.
761 *
762 * 2. It is the Caller's responsibility to ensure that no other code
763 * uses the specified PCI-E Memory Window while this routine is
764 * using it. This is typically done via the use of OS-specific
765 * locks, etc.
766 */
t4_memory_rw_addr(struct adapter * adap,int win,u32 addr,u32 len,void * hbuf,int dir)767 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
768 u32 len, void *hbuf, int dir)
769 {
770 u32 pos, offset, resid;
771 u32 win_pf, mem_reg, mem_aperture, mem_base;
772 u32 *buf;
773
774 /* Argument sanity checks ...
775 */
776 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
777 return -EINVAL;
778 buf = (u32 *)hbuf;
779
780 /* It's convenient to be able to handle lengths which aren't a
781 * multiple of 32-bits because we often end up transferring files to
782 * the firmware. So we'll handle that by normalizing the length here
783 * and then handling any residual transfer at the end.
784 */
785 resid = len & 0x3;
786 len -= resid;
787
788 /* Each PCI-E Memory Window is programmed with a window size -- or
789 * "aperture" -- which controls the granularity of its mapping onto
790 * adapter memory. We need to grab that aperture in order to know
791 * how to use the specified window. The window is also programmed
792 * with the base address of the Memory Window in BAR0's address
793 * space. For T4 this is an absolute PCI-E Bus Address. For T5
794 * the address is relative to BAR0.
795 */
796 mem_reg = t4_read_reg(adap,
797 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
798 win));
799
800 /* a dead adapter will return 0xffffffff for PIO reads */
801 if (mem_reg == 0xffffffff) {
802 CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
803 win);
804 return -ENXIO;
805 }
806
807 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
808 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
809 if (is_t4(adap->params.chip))
810 mem_base -= adap->t4_bar0;
811 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
812
813 /* Calculate our initial PCI-E Memory Window Position and Offset into
814 * that Window.
815 */
816 pos = addr & ~(mem_aperture-1);
817 offset = addr - pos;
818
819 /* Set up initial PCI-E Memory Window to cover the start of our
820 * transfer. (Read it back to ensure that changes propagate before we
821 * attempt to use the new value.)
822 */
823 t4_write_reg(adap,
824 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
825 pos | win_pf);
826 t4_read_reg(adap,
827 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
828
829 /* Transfer data to/from the adapter as long as there's an integral
830 * number of 32-bit transfers to complete.
831 *
832 * A note on Endianness issues:
833 *
834 * The "register" reads and writes below from/to the PCI-E Memory
835 * Window invoke the standard adapter Big-Endian to PCI-E Link
836 * Little-Endian "swizzel." As a result, if we have the following
837 * data in adapter memory:
838 *
839 * Memory: ... | b0 | b1 | b2 | b3 | ...
840 * Address: i+0 i+1 i+2 i+3
841 *
842 * Then a read of the adapter memory via the PCI-E Memory Window
843 * will yield:
844 *
845 * x = readl(i)
846 * 31 0
847 * [ b3 | b2 | b1 | b0 ]
848 *
849 * If this value is stored into local memory on a Little-Endian system
850 * it will show up correctly in local memory as:
851 *
852 * ( ..., b0, b1, b2, b3, ... )
853 *
854 * But on a Big-Endian system, the store will show up in memory
855 * incorrectly swizzled as:
856 *
857 * ( ..., b3, b2, b1, b0, ... )
858 *
859 * So we need to account for this in the reads and writes to the
860 * PCI-E Memory Window below by undoing the register read/write
861 * swizzels.
862 */
863 while (len > 0) {
864 if (dir == T4_MEMORY_READ)
865 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
866 mem_base + offset));
867 else
868 t4_write_reg(adap, mem_base + offset,
869 (__force u32)cpu_to_le32(*buf++));
870 offset += sizeof(__be32);
871 len -= sizeof(__be32);
872
873 /* If we've reached the end of our current window aperture,
874 * move the PCI-E Memory Window on to the next. Note that
875 * doing this here after "len" may be 0 allows us to set up
876 * the PCI-E Memory Window for a possible final residual
877 * transfer below ...
878 */
879 if (offset == mem_aperture) {
880 pos += mem_aperture;
881 offset = 0;
882 t4_write_reg(adap,
883 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
884 win), pos | win_pf);
885 t4_read_reg(adap,
886 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
887 win));
888 }
889 }
890
891 /* If the original transfer had a length which wasn't a multiple of
892 * 32-bits, now's where we need to finish off the transfer of the
893 * residual amount. The PCI-E Memory Window has already been moved
894 * above (if necessary) to cover this final transfer.
895 */
896 if (resid) {
897 union {
898 u32 word;
899 char byte[4];
900 } last;
901 unsigned char *bp;
902 int i;
903
904 if (dir == T4_MEMORY_READ) {
905 last.word = le32_to_cpu(
906 (__force __le32)t4_read_reg(adap,
907 mem_base + offset));
908 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
909 bp[i] = last.byte[i];
910 } else {
911 last.word = *buf;
912 for (i = resid; i < 4; i++)
913 last.byte[i] = 0;
914 t4_write_reg(adap, mem_base + offset,
915 (__force u32)cpu_to_le32(last.word));
916 }
917 }
918
919 return 0;
920 }
921
922 /**
923 * t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
924 * @adap: the adapter
925 * @win: PCI-E Memory Window to use
926 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
927 * @maddr: address within indicated memory type
928 * @len: amount of memory to transfer
929 * @hbuf: host memory buffer
930 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
931 *
932 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
933 * provides an (memory type, address withing memory type) interface.
934 */
t4_memory_rw_mtype(struct adapter * adap,int win,int mtype,u32 maddr,u32 len,void * hbuf,int dir)935 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
936 u32 len, void *hbuf, int dir)
937 {
938 u32 mtype_offset;
939 u32 edc_size, mc_size;
940
941 /* Offset into the region of memory which is being accessed
942 * MEM_EDC0 = 0
943 * MEM_EDC1 = 1
944 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
945 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
946 * MEM_HMA = 4
947 */
948 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
949 if (mtype == MEM_HMA) {
950 mtype_offset = 2 * (edc_size * 1024 * 1024);
951 } else if (mtype != MEM_MC1)
952 mtype_offset = (mtype * (edc_size * 1024 * 1024));
953 else {
954 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
955 A_MA_EXT_MEMORY0_BAR));
956 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
957 }
958
959 return t4_memory_rw_addr(adap, win,
960 mtype_offset + maddr, len,
961 hbuf, dir);
962 }
963
964 /*
965 * Return the specified PCI-E Configuration Space register from our Physical
966 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
967 * since we prefer to let the firmware own all of these registers, but if that
968 * fails we go for it directly ourselves.
969 */
t4_read_pcie_cfg4(struct adapter * adap,int reg,int drv_fw_attach)970 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
971 {
972 u32 val;
973
974 /*
975 * If fw_attach != 0, construct and send the Firmware LDST Command to
976 * retrieve the specified PCI-E Configuration Space register.
977 */
978 if (drv_fw_attach != 0) {
979 struct fw_ldst_cmd ldst_cmd;
980 int ret;
981
982 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
983 ldst_cmd.op_to_addrspace =
984 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
985 F_FW_CMD_REQUEST |
986 F_FW_CMD_READ |
987 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
988 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
989 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
990 ldst_cmd.u.pcie.ctrl_to_fn =
991 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
992 ldst_cmd.u.pcie.r = reg;
993
994 /*
995 * If the LDST Command succeeds, return the result, otherwise
996 * fall through to reading it directly ourselves ...
997 */
998 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
999 &ldst_cmd);
1000 if (ret == 0)
1001 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1002
1003 CH_WARN(adap, "Firmware failed to return "
1004 "Configuration Space register %d, err = %d\n",
1005 reg, -ret);
1006 }
1007
1008 /*
1009 * Read the desired Configuration Space register via the PCI-E
1010 * Backdoor mechanism.
1011 */
1012 t4_hw_pci_read_cfg4(adap, reg, &val);
1013 return val;
1014 }
1015
1016 /*
1017 * Get the window based on base passed to it.
1018 * Window aperture is currently unhandled, but there is no use case for it
1019 * right now
1020 */
t4_get_window(struct adapter * adap,u64 pci_base,u64 pci_mask,u64 memwin_base,int drv_fw_attach)1021 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1022 {
1023 if (is_t4(adap->params.chip)) {
1024 u32 bar0;
1025
1026 /*
1027 * Truncation intentional: we only read the bottom 32-bits of
1028 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
1029 * mechanism to read BAR0 instead of using
1030 * pci_resource_start() because we could be operating from
1031 * within a Virtual Machine which is trapping our accesses to
1032 * our Configuration Space and we need to set up the PCI-E
1033 * Memory Window decoders with the actual addresses which will
1034 * be coming across the PCI-E link.
1035 */
1036 bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1037 bar0 &= pci_mask;
1038 adap->t4_bar0 = bar0;
1039
1040 return bar0 + memwin_base;
1041 } else {
1042 /* For T5, only relative offset inside the PCIe BAR is passed */
1043 return memwin_base;
1044 }
1045 }
1046
1047 /* Get the default utility window (win0) used by everyone */
t4_get_util_window(struct adapter * adap,int drv_fw_attach)1048 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1049 {
1050 return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1051 }
1052
1053 /*
1054 * Set up memory window for accessing adapter memory ranges. (Read
1055 * back MA register to ensure that changes propagate before we attempt
1056 * to use the new values.)
1057 */
t4_setup_memwin(struct adapter * adap,u32 memwin_base,u32 window)1058 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1059 {
1060 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1061 memwin_base | V_BIR(0) |
1062 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1063 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1064 }
1065
1066 /**
1067 * t4_get_regs_len - return the size of the chips register set
1068 * @adapter: the adapter
1069 *
1070 * Returns the size of the chip's BAR0 register space.
1071 */
t4_get_regs_len(struct adapter * adapter)1072 unsigned int t4_get_regs_len(struct adapter *adapter)
1073 {
1074 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1075
1076 switch (chip_version) {
1077 case CHELSIO_T4:
1078 return T4_REGMAP_SIZE;
1079
1080 case CHELSIO_T5:
1081 case CHELSIO_T6:
1082 return T5_REGMAP_SIZE;
1083 }
1084
1085 CH_ERR(adapter,
1086 "Unsupported chip version %d\n", chip_version);
1087 return 0;
1088 }
1089
1090 /**
1091 * t4_get_regs - read chip registers into provided buffer
1092 * @adap: the adapter
1093 * @buf: register buffer
1094 * @buf_size: size (in bytes) of register buffer
1095 *
1096 * If the provided register buffer isn't large enough for the chip's
1097 * full register range, the register dump will be truncated to the
1098 * register buffer's size.
1099 */
t4_get_regs(struct adapter * adap,void * buf,size_t buf_size)1100 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1101 {
1102 static const unsigned int t4_reg_ranges[] = {
1103 0x1008, 0x1108,
1104 0x1180, 0x1184,
1105 0x1190, 0x1194,
1106 0x11a0, 0x11a4,
1107 0x11b0, 0x11b4,
1108 0x11fc, 0x123c,
1109 0x1300, 0x173c,
1110 0x1800, 0x18fc,
1111 0x3000, 0x30d8,
1112 0x30e0, 0x30e4,
1113 0x30ec, 0x5910,
1114 0x5920, 0x5924,
1115 0x5960, 0x5960,
1116 0x5968, 0x5968,
1117 0x5970, 0x5970,
1118 0x5978, 0x5978,
1119 0x5980, 0x5980,
1120 0x5988, 0x5988,
1121 0x5990, 0x5990,
1122 0x5998, 0x5998,
1123 0x59a0, 0x59d4,
1124 0x5a00, 0x5ae0,
1125 0x5ae8, 0x5ae8,
1126 0x5af0, 0x5af0,
1127 0x5af8, 0x5af8,
1128 0x6000, 0x6098,
1129 0x6100, 0x6150,
1130 0x6200, 0x6208,
1131 0x6240, 0x6248,
1132 0x6280, 0x62b0,
1133 0x62c0, 0x6338,
1134 0x6370, 0x638c,
1135 0x6400, 0x643c,
1136 0x6500, 0x6524,
1137 0x6a00, 0x6a04,
1138 0x6a14, 0x6a38,
1139 0x6a60, 0x6a70,
1140 0x6a78, 0x6a78,
1141 0x6b00, 0x6b0c,
1142 0x6b1c, 0x6b84,
1143 0x6bf0, 0x6bf8,
1144 0x6c00, 0x6c0c,
1145 0x6c1c, 0x6c84,
1146 0x6cf0, 0x6cf8,
1147 0x6d00, 0x6d0c,
1148 0x6d1c, 0x6d84,
1149 0x6df0, 0x6df8,
1150 0x6e00, 0x6e0c,
1151 0x6e1c, 0x6e84,
1152 0x6ef0, 0x6ef8,
1153 0x6f00, 0x6f0c,
1154 0x6f1c, 0x6f84,
1155 0x6ff0, 0x6ff8,
1156 0x7000, 0x700c,
1157 0x701c, 0x7084,
1158 0x70f0, 0x70f8,
1159 0x7100, 0x710c,
1160 0x711c, 0x7184,
1161 0x71f0, 0x71f8,
1162 0x7200, 0x720c,
1163 0x721c, 0x7284,
1164 0x72f0, 0x72f8,
1165 0x7300, 0x730c,
1166 0x731c, 0x7384,
1167 0x73f0, 0x73f8,
1168 0x7400, 0x7450,
1169 0x7500, 0x7530,
1170 0x7600, 0x760c,
1171 0x7614, 0x761c,
1172 0x7680, 0x76cc,
1173 0x7700, 0x7798,
1174 0x77c0, 0x77fc,
1175 0x7900, 0x79fc,
1176 0x7b00, 0x7b58,
1177 0x7b60, 0x7b84,
1178 0x7b8c, 0x7c38,
1179 0x7d00, 0x7d38,
1180 0x7d40, 0x7d80,
1181 0x7d8c, 0x7ddc,
1182 0x7de4, 0x7e04,
1183 0x7e10, 0x7e1c,
1184 0x7e24, 0x7e38,
1185 0x7e40, 0x7e44,
1186 0x7e4c, 0x7e78,
1187 0x7e80, 0x7ea4,
1188 0x7eac, 0x7edc,
1189 0x7ee8, 0x7efc,
1190 0x8dc0, 0x8e04,
1191 0x8e10, 0x8e1c,
1192 0x8e30, 0x8e78,
1193 0x8ea0, 0x8eb8,
1194 0x8ec0, 0x8f6c,
1195 0x8fc0, 0x9008,
1196 0x9010, 0x9058,
1197 0x9060, 0x9060,
1198 0x9068, 0x9074,
1199 0x90fc, 0x90fc,
1200 0x9400, 0x9408,
1201 0x9410, 0x9458,
1202 0x9600, 0x9600,
1203 0x9608, 0x9638,
1204 0x9640, 0x96bc,
1205 0x9800, 0x9808,
1206 0x9820, 0x983c,
1207 0x9850, 0x9864,
1208 0x9c00, 0x9c6c,
1209 0x9c80, 0x9cec,
1210 0x9d00, 0x9d6c,
1211 0x9d80, 0x9dec,
1212 0x9e00, 0x9e6c,
1213 0x9e80, 0x9eec,
1214 0x9f00, 0x9f6c,
1215 0x9f80, 0x9fec,
1216 0xd004, 0xd004,
1217 0xd010, 0xd03c,
1218 0xdfc0, 0xdfe0,
1219 0xe000, 0xea7c,
1220 0xf000, 0x11110,
1221 0x11118, 0x11190,
1222 0x19040, 0x1906c,
1223 0x19078, 0x19080,
1224 0x1908c, 0x190e4,
1225 0x190f0, 0x190f8,
1226 0x19100, 0x19110,
1227 0x19120, 0x19124,
1228 0x19150, 0x19194,
1229 0x1919c, 0x191b0,
1230 0x191d0, 0x191e8,
1231 0x19238, 0x1924c,
1232 0x193f8, 0x1943c,
1233 0x1944c, 0x19474,
1234 0x19490, 0x194e0,
1235 0x194f0, 0x194f8,
1236 0x19800, 0x19c08,
1237 0x19c10, 0x19c90,
1238 0x19ca0, 0x19ce4,
1239 0x19cf0, 0x19d40,
1240 0x19d50, 0x19d94,
1241 0x19da0, 0x19de8,
1242 0x19df0, 0x19e40,
1243 0x19e50, 0x19e90,
1244 0x19ea0, 0x19f4c,
1245 0x1a000, 0x1a004,
1246 0x1a010, 0x1a06c,
1247 0x1a0b0, 0x1a0e4,
1248 0x1a0ec, 0x1a0f4,
1249 0x1a100, 0x1a108,
1250 0x1a114, 0x1a120,
1251 0x1a128, 0x1a130,
1252 0x1a138, 0x1a138,
1253 0x1a190, 0x1a1c4,
1254 0x1a1fc, 0x1a1fc,
1255 0x1e040, 0x1e04c,
1256 0x1e284, 0x1e28c,
1257 0x1e2c0, 0x1e2c0,
1258 0x1e2e0, 0x1e2e0,
1259 0x1e300, 0x1e384,
1260 0x1e3c0, 0x1e3c8,
1261 0x1e440, 0x1e44c,
1262 0x1e684, 0x1e68c,
1263 0x1e6c0, 0x1e6c0,
1264 0x1e6e0, 0x1e6e0,
1265 0x1e700, 0x1e784,
1266 0x1e7c0, 0x1e7c8,
1267 0x1e840, 0x1e84c,
1268 0x1ea84, 0x1ea8c,
1269 0x1eac0, 0x1eac0,
1270 0x1eae0, 0x1eae0,
1271 0x1eb00, 0x1eb84,
1272 0x1ebc0, 0x1ebc8,
1273 0x1ec40, 0x1ec4c,
1274 0x1ee84, 0x1ee8c,
1275 0x1eec0, 0x1eec0,
1276 0x1eee0, 0x1eee0,
1277 0x1ef00, 0x1ef84,
1278 0x1efc0, 0x1efc8,
1279 0x1f040, 0x1f04c,
1280 0x1f284, 0x1f28c,
1281 0x1f2c0, 0x1f2c0,
1282 0x1f2e0, 0x1f2e0,
1283 0x1f300, 0x1f384,
1284 0x1f3c0, 0x1f3c8,
1285 0x1f440, 0x1f44c,
1286 0x1f684, 0x1f68c,
1287 0x1f6c0, 0x1f6c0,
1288 0x1f6e0, 0x1f6e0,
1289 0x1f700, 0x1f784,
1290 0x1f7c0, 0x1f7c8,
1291 0x1f840, 0x1f84c,
1292 0x1fa84, 0x1fa8c,
1293 0x1fac0, 0x1fac0,
1294 0x1fae0, 0x1fae0,
1295 0x1fb00, 0x1fb84,
1296 0x1fbc0, 0x1fbc8,
1297 0x1fc40, 0x1fc4c,
1298 0x1fe84, 0x1fe8c,
1299 0x1fec0, 0x1fec0,
1300 0x1fee0, 0x1fee0,
1301 0x1ff00, 0x1ff84,
1302 0x1ffc0, 0x1ffc8,
1303 0x20000, 0x2002c,
1304 0x20100, 0x2013c,
1305 0x20190, 0x201a0,
1306 0x201a8, 0x201b8,
1307 0x201c4, 0x201c8,
1308 0x20200, 0x20318,
1309 0x20400, 0x204b4,
1310 0x204c0, 0x20528,
1311 0x20540, 0x20614,
1312 0x21000, 0x21040,
1313 0x2104c, 0x21060,
1314 0x210c0, 0x210ec,
1315 0x21200, 0x21268,
1316 0x21270, 0x21284,
1317 0x212fc, 0x21388,
1318 0x21400, 0x21404,
1319 0x21500, 0x21500,
1320 0x21510, 0x21518,
1321 0x2152c, 0x21530,
1322 0x2153c, 0x2153c,
1323 0x21550, 0x21554,
1324 0x21600, 0x21600,
1325 0x21608, 0x2161c,
1326 0x21624, 0x21628,
1327 0x21630, 0x21634,
1328 0x2163c, 0x2163c,
1329 0x21700, 0x2171c,
1330 0x21780, 0x2178c,
1331 0x21800, 0x21818,
1332 0x21820, 0x21828,
1333 0x21830, 0x21848,
1334 0x21850, 0x21854,
1335 0x21860, 0x21868,
1336 0x21870, 0x21870,
1337 0x21878, 0x21898,
1338 0x218a0, 0x218a8,
1339 0x218b0, 0x218c8,
1340 0x218d0, 0x218d4,
1341 0x218e0, 0x218e8,
1342 0x218f0, 0x218f0,
1343 0x218f8, 0x21a18,
1344 0x21a20, 0x21a28,
1345 0x21a30, 0x21a48,
1346 0x21a50, 0x21a54,
1347 0x21a60, 0x21a68,
1348 0x21a70, 0x21a70,
1349 0x21a78, 0x21a98,
1350 0x21aa0, 0x21aa8,
1351 0x21ab0, 0x21ac8,
1352 0x21ad0, 0x21ad4,
1353 0x21ae0, 0x21ae8,
1354 0x21af0, 0x21af0,
1355 0x21af8, 0x21c18,
1356 0x21c20, 0x21c20,
1357 0x21c28, 0x21c30,
1358 0x21c38, 0x21c38,
1359 0x21c80, 0x21c98,
1360 0x21ca0, 0x21ca8,
1361 0x21cb0, 0x21cc8,
1362 0x21cd0, 0x21cd4,
1363 0x21ce0, 0x21ce8,
1364 0x21cf0, 0x21cf0,
1365 0x21cf8, 0x21d7c,
1366 0x21e00, 0x21e04,
1367 0x22000, 0x2202c,
1368 0x22100, 0x2213c,
1369 0x22190, 0x221a0,
1370 0x221a8, 0x221b8,
1371 0x221c4, 0x221c8,
1372 0x22200, 0x22318,
1373 0x22400, 0x224b4,
1374 0x224c0, 0x22528,
1375 0x22540, 0x22614,
1376 0x23000, 0x23040,
1377 0x2304c, 0x23060,
1378 0x230c0, 0x230ec,
1379 0x23200, 0x23268,
1380 0x23270, 0x23284,
1381 0x232fc, 0x23388,
1382 0x23400, 0x23404,
1383 0x23500, 0x23500,
1384 0x23510, 0x23518,
1385 0x2352c, 0x23530,
1386 0x2353c, 0x2353c,
1387 0x23550, 0x23554,
1388 0x23600, 0x23600,
1389 0x23608, 0x2361c,
1390 0x23624, 0x23628,
1391 0x23630, 0x23634,
1392 0x2363c, 0x2363c,
1393 0x23700, 0x2371c,
1394 0x23780, 0x2378c,
1395 0x23800, 0x23818,
1396 0x23820, 0x23828,
1397 0x23830, 0x23848,
1398 0x23850, 0x23854,
1399 0x23860, 0x23868,
1400 0x23870, 0x23870,
1401 0x23878, 0x23898,
1402 0x238a0, 0x238a8,
1403 0x238b0, 0x238c8,
1404 0x238d0, 0x238d4,
1405 0x238e0, 0x238e8,
1406 0x238f0, 0x238f0,
1407 0x238f8, 0x23a18,
1408 0x23a20, 0x23a28,
1409 0x23a30, 0x23a48,
1410 0x23a50, 0x23a54,
1411 0x23a60, 0x23a68,
1412 0x23a70, 0x23a70,
1413 0x23a78, 0x23a98,
1414 0x23aa0, 0x23aa8,
1415 0x23ab0, 0x23ac8,
1416 0x23ad0, 0x23ad4,
1417 0x23ae0, 0x23ae8,
1418 0x23af0, 0x23af0,
1419 0x23af8, 0x23c18,
1420 0x23c20, 0x23c20,
1421 0x23c28, 0x23c30,
1422 0x23c38, 0x23c38,
1423 0x23c80, 0x23c98,
1424 0x23ca0, 0x23ca8,
1425 0x23cb0, 0x23cc8,
1426 0x23cd0, 0x23cd4,
1427 0x23ce0, 0x23ce8,
1428 0x23cf0, 0x23cf0,
1429 0x23cf8, 0x23d7c,
1430 0x23e00, 0x23e04,
1431 0x24000, 0x2402c,
1432 0x24100, 0x2413c,
1433 0x24190, 0x241a0,
1434 0x241a8, 0x241b8,
1435 0x241c4, 0x241c8,
1436 0x24200, 0x24318,
1437 0x24400, 0x244b4,
1438 0x244c0, 0x24528,
1439 0x24540, 0x24614,
1440 0x25000, 0x25040,
1441 0x2504c, 0x25060,
1442 0x250c0, 0x250ec,
1443 0x25200, 0x25268,
1444 0x25270, 0x25284,
1445 0x252fc, 0x25388,
1446 0x25400, 0x25404,
1447 0x25500, 0x25500,
1448 0x25510, 0x25518,
1449 0x2552c, 0x25530,
1450 0x2553c, 0x2553c,
1451 0x25550, 0x25554,
1452 0x25600, 0x25600,
1453 0x25608, 0x2561c,
1454 0x25624, 0x25628,
1455 0x25630, 0x25634,
1456 0x2563c, 0x2563c,
1457 0x25700, 0x2571c,
1458 0x25780, 0x2578c,
1459 0x25800, 0x25818,
1460 0x25820, 0x25828,
1461 0x25830, 0x25848,
1462 0x25850, 0x25854,
1463 0x25860, 0x25868,
1464 0x25870, 0x25870,
1465 0x25878, 0x25898,
1466 0x258a0, 0x258a8,
1467 0x258b0, 0x258c8,
1468 0x258d0, 0x258d4,
1469 0x258e0, 0x258e8,
1470 0x258f0, 0x258f0,
1471 0x258f8, 0x25a18,
1472 0x25a20, 0x25a28,
1473 0x25a30, 0x25a48,
1474 0x25a50, 0x25a54,
1475 0x25a60, 0x25a68,
1476 0x25a70, 0x25a70,
1477 0x25a78, 0x25a98,
1478 0x25aa0, 0x25aa8,
1479 0x25ab0, 0x25ac8,
1480 0x25ad0, 0x25ad4,
1481 0x25ae0, 0x25ae8,
1482 0x25af0, 0x25af0,
1483 0x25af8, 0x25c18,
1484 0x25c20, 0x25c20,
1485 0x25c28, 0x25c30,
1486 0x25c38, 0x25c38,
1487 0x25c80, 0x25c98,
1488 0x25ca0, 0x25ca8,
1489 0x25cb0, 0x25cc8,
1490 0x25cd0, 0x25cd4,
1491 0x25ce0, 0x25ce8,
1492 0x25cf0, 0x25cf0,
1493 0x25cf8, 0x25d7c,
1494 0x25e00, 0x25e04,
1495 0x26000, 0x2602c,
1496 0x26100, 0x2613c,
1497 0x26190, 0x261a0,
1498 0x261a8, 0x261b8,
1499 0x261c4, 0x261c8,
1500 0x26200, 0x26318,
1501 0x26400, 0x264b4,
1502 0x264c0, 0x26528,
1503 0x26540, 0x26614,
1504 0x27000, 0x27040,
1505 0x2704c, 0x27060,
1506 0x270c0, 0x270ec,
1507 0x27200, 0x27268,
1508 0x27270, 0x27284,
1509 0x272fc, 0x27388,
1510 0x27400, 0x27404,
1511 0x27500, 0x27500,
1512 0x27510, 0x27518,
1513 0x2752c, 0x27530,
1514 0x2753c, 0x2753c,
1515 0x27550, 0x27554,
1516 0x27600, 0x27600,
1517 0x27608, 0x2761c,
1518 0x27624, 0x27628,
1519 0x27630, 0x27634,
1520 0x2763c, 0x2763c,
1521 0x27700, 0x2771c,
1522 0x27780, 0x2778c,
1523 0x27800, 0x27818,
1524 0x27820, 0x27828,
1525 0x27830, 0x27848,
1526 0x27850, 0x27854,
1527 0x27860, 0x27868,
1528 0x27870, 0x27870,
1529 0x27878, 0x27898,
1530 0x278a0, 0x278a8,
1531 0x278b0, 0x278c8,
1532 0x278d0, 0x278d4,
1533 0x278e0, 0x278e8,
1534 0x278f0, 0x278f0,
1535 0x278f8, 0x27a18,
1536 0x27a20, 0x27a28,
1537 0x27a30, 0x27a48,
1538 0x27a50, 0x27a54,
1539 0x27a60, 0x27a68,
1540 0x27a70, 0x27a70,
1541 0x27a78, 0x27a98,
1542 0x27aa0, 0x27aa8,
1543 0x27ab0, 0x27ac8,
1544 0x27ad0, 0x27ad4,
1545 0x27ae0, 0x27ae8,
1546 0x27af0, 0x27af0,
1547 0x27af8, 0x27c18,
1548 0x27c20, 0x27c20,
1549 0x27c28, 0x27c30,
1550 0x27c38, 0x27c38,
1551 0x27c80, 0x27c98,
1552 0x27ca0, 0x27ca8,
1553 0x27cb0, 0x27cc8,
1554 0x27cd0, 0x27cd4,
1555 0x27ce0, 0x27ce8,
1556 0x27cf0, 0x27cf0,
1557 0x27cf8, 0x27d7c,
1558 0x27e00, 0x27e04,
1559 };
1560
1561 static const unsigned int t5_reg_ranges[] = {
1562 0x1008, 0x10c0,
1563 0x10cc, 0x10f8,
1564 0x1100, 0x1100,
1565 0x110c, 0x1148,
1566 0x1180, 0x1184,
1567 0x1190, 0x1194,
1568 0x11a0, 0x11a4,
1569 0x11b0, 0x11b4,
1570 0x11fc, 0x123c,
1571 0x1280, 0x173c,
1572 0x1800, 0x18fc,
1573 0x3000, 0x3028,
1574 0x3060, 0x30b0,
1575 0x30b8, 0x30d8,
1576 0x30e0, 0x30fc,
1577 0x3140, 0x357c,
1578 0x35a8, 0x35cc,
1579 0x35ec, 0x35ec,
1580 0x3600, 0x5624,
1581 0x56cc, 0x56ec,
1582 0x56f4, 0x5720,
1583 0x5728, 0x575c,
1584 0x580c, 0x5814,
1585 0x5890, 0x589c,
1586 0x58a4, 0x58ac,
1587 0x58b8, 0x58bc,
1588 0x5940, 0x59c8,
1589 0x59d0, 0x59dc,
1590 0x59fc, 0x5a18,
1591 0x5a60, 0x5a70,
1592 0x5a80, 0x5a9c,
1593 0x5b94, 0x5bfc,
1594 0x6000, 0x6020,
1595 0x6028, 0x6040,
1596 0x6058, 0x609c,
1597 0x60a8, 0x614c,
1598 0x7700, 0x7798,
1599 0x77c0, 0x78fc,
1600 0x7b00, 0x7b58,
1601 0x7b60, 0x7b84,
1602 0x7b8c, 0x7c54,
1603 0x7d00, 0x7d38,
1604 0x7d40, 0x7d80,
1605 0x7d8c, 0x7ddc,
1606 0x7de4, 0x7e04,
1607 0x7e10, 0x7e1c,
1608 0x7e24, 0x7e38,
1609 0x7e40, 0x7e44,
1610 0x7e4c, 0x7e78,
1611 0x7e80, 0x7edc,
1612 0x7ee8, 0x7efc,
1613 0x8dc0, 0x8de0,
1614 0x8df8, 0x8e04,
1615 0x8e10, 0x8e84,
1616 0x8ea0, 0x8f84,
1617 0x8fc0, 0x9058,
1618 0x9060, 0x9060,
1619 0x9068, 0x90f8,
1620 0x9400, 0x9408,
1621 0x9410, 0x9470,
1622 0x9600, 0x9600,
1623 0x9608, 0x9638,
1624 0x9640, 0x96f4,
1625 0x9800, 0x9808,
1626 0x9820, 0x983c,
1627 0x9850, 0x9864,
1628 0x9c00, 0x9c6c,
1629 0x9c80, 0x9cec,
1630 0x9d00, 0x9d6c,
1631 0x9d80, 0x9dec,
1632 0x9e00, 0x9e6c,
1633 0x9e80, 0x9eec,
1634 0x9f00, 0x9f6c,
1635 0x9f80, 0xa020,
1636 0xd004, 0xd004,
1637 0xd010, 0xd03c,
1638 0xdfc0, 0xdfe0,
1639 0xe000, 0x1106c,
1640 0x11074, 0x11088,
1641 0x1109c, 0x1117c,
1642 0x11190, 0x11204,
1643 0x19040, 0x1906c,
1644 0x19078, 0x19080,
1645 0x1908c, 0x190e8,
1646 0x190f0, 0x190f8,
1647 0x19100, 0x19110,
1648 0x19120, 0x19124,
1649 0x19150, 0x19194,
1650 0x1919c, 0x191b0,
1651 0x191d0, 0x191e8,
1652 0x19238, 0x19290,
1653 0x193f8, 0x19428,
1654 0x19430, 0x19444,
1655 0x1944c, 0x1946c,
1656 0x19474, 0x19474,
1657 0x19490, 0x194cc,
1658 0x194f0, 0x194f8,
1659 0x19c00, 0x19c08,
1660 0x19c10, 0x19c60,
1661 0x19c94, 0x19ce4,
1662 0x19cf0, 0x19d40,
1663 0x19d50, 0x19d94,
1664 0x19da0, 0x19de8,
1665 0x19df0, 0x19e10,
1666 0x19e50, 0x19e90,
1667 0x19ea0, 0x19f24,
1668 0x19f34, 0x19f34,
1669 0x19f40, 0x19f50,
1670 0x19f90, 0x19fb4,
1671 0x19fc4, 0x19fe4,
1672 0x1a000, 0x1a004,
1673 0x1a010, 0x1a06c,
1674 0x1a0b0, 0x1a0e4,
1675 0x1a0ec, 0x1a0f8,
1676 0x1a100, 0x1a108,
1677 0x1a114, 0x1a120,
1678 0x1a128, 0x1a130,
1679 0x1a138, 0x1a138,
1680 0x1a190, 0x1a1c4,
1681 0x1a1fc, 0x1a1fc,
1682 0x1e008, 0x1e00c,
1683 0x1e040, 0x1e044,
1684 0x1e04c, 0x1e04c,
1685 0x1e284, 0x1e290,
1686 0x1e2c0, 0x1e2c0,
1687 0x1e2e0, 0x1e2e0,
1688 0x1e300, 0x1e384,
1689 0x1e3c0, 0x1e3c8,
1690 0x1e408, 0x1e40c,
1691 0x1e440, 0x1e444,
1692 0x1e44c, 0x1e44c,
1693 0x1e684, 0x1e690,
1694 0x1e6c0, 0x1e6c0,
1695 0x1e6e0, 0x1e6e0,
1696 0x1e700, 0x1e784,
1697 0x1e7c0, 0x1e7c8,
1698 0x1e808, 0x1e80c,
1699 0x1e840, 0x1e844,
1700 0x1e84c, 0x1e84c,
1701 0x1ea84, 0x1ea90,
1702 0x1eac0, 0x1eac0,
1703 0x1eae0, 0x1eae0,
1704 0x1eb00, 0x1eb84,
1705 0x1ebc0, 0x1ebc8,
1706 0x1ec08, 0x1ec0c,
1707 0x1ec40, 0x1ec44,
1708 0x1ec4c, 0x1ec4c,
1709 0x1ee84, 0x1ee90,
1710 0x1eec0, 0x1eec0,
1711 0x1eee0, 0x1eee0,
1712 0x1ef00, 0x1ef84,
1713 0x1efc0, 0x1efc8,
1714 0x1f008, 0x1f00c,
1715 0x1f040, 0x1f044,
1716 0x1f04c, 0x1f04c,
1717 0x1f284, 0x1f290,
1718 0x1f2c0, 0x1f2c0,
1719 0x1f2e0, 0x1f2e0,
1720 0x1f300, 0x1f384,
1721 0x1f3c0, 0x1f3c8,
1722 0x1f408, 0x1f40c,
1723 0x1f440, 0x1f444,
1724 0x1f44c, 0x1f44c,
1725 0x1f684, 0x1f690,
1726 0x1f6c0, 0x1f6c0,
1727 0x1f6e0, 0x1f6e0,
1728 0x1f700, 0x1f784,
1729 0x1f7c0, 0x1f7c8,
1730 0x1f808, 0x1f80c,
1731 0x1f840, 0x1f844,
1732 0x1f84c, 0x1f84c,
1733 0x1fa84, 0x1fa90,
1734 0x1fac0, 0x1fac0,
1735 0x1fae0, 0x1fae0,
1736 0x1fb00, 0x1fb84,
1737 0x1fbc0, 0x1fbc8,
1738 0x1fc08, 0x1fc0c,
1739 0x1fc40, 0x1fc44,
1740 0x1fc4c, 0x1fc4c,
1741 0x1fe84, 0x1fe90,
1742 0x1fec0, 0x1fec0,
1743 0x1fee0, 0x1fee0,
1744 0x1ff00, 0x1ff84,
1745 0x1ffc0, 0x1ffc8,
1746 0x30000, 0x30030,
1747 0x30100, 0x30144,
1748 0x30190, 0x301a0,
1749 0x301a8, 0x301b8,
1750 0x301c4, 0x301c8,
1751 0x301d0, 0x301d0,
1752 0x30200, 0x30318,
1753 0x30400, 0x304b4,
1754 0x304c0, 0x3052c,
1755 0x30540, 0x3061c,
1756 0x30800, 0x30828,
1757 0x30834, 0x30834,
1758 0x308c0, 0x30908,
1759 0x30910, 0x309ac,
1760 0x30a00, 0x30a14,
1761 0x30a1c, 0x30a2c,
1762 0x30a44, 0x30a50,
1763 0x30a74, 0x30a74,
1764 0x30a7c, 0x30afc,
1765 0x30b08, 0x30c24,
1766 0x30d00, 0x30d00,
1767 0x30d08, 0x30d14,
1768 0x30d1c, 0x30d20,
1769 0x30d3c, 0x30d3c,
1770 0x30d48, 0x30d50,
1771 0x31200, 0x3120c,
1772 0x31220, 0x31220,
1773 0x31240, 0x31240,
1774 0x31600, 0x3160c,
1775 0x31a00, 0x31a1c,
1776 0x31e00, 0x31e20,
1777 0x31e38, 0x31e3c,
1778 0x31e80, 0x31e80,
1779 0x31e88, 0x31ea8,
1780 0x31eb0, 0x31eb4,
1781 0x31ec8, 0x31ed4,
1782 0x31fb8, 0x32004,
1783 0x32200, 0x32200,
1784 0x32208, 0x32240,
1785 0x32248, 0x32280,
1786 0x32288, 0x322c0,
1787 0x322c8, 0x322fc,
1788 0x32600, 0x32630,
1789 0x32a00, 0x32abc,
1790 0x32b00, 0x32b10,
1791 0x32b20, 0x32b30,
1792 0x32b40, 0x32b50,
1793 0x32b60, 0x32b70,
1794 0x33000, 0x33028,
1795 0x33030, 0x33048,
1796 0x33060, 0x33068,
1797 0x33070, 0x3309c,
1798 0x330f0, 0x33128,
1799 0x33130, 0x33148,
1800 0x33160, 0x33168,
1801 0x33170, 0x3319c,
1802 0x331f0, 0x33238,
1803 0x33240, 0x33240,
1804 0x33248, 0x33250,
1805 0x3325c, 0x33264,
1806 0x33270, 0x332b8,
1807 0x332c0, 0x332e4,
1808 0x332f8, 0x33338,
1809 0x33340, 0x33340,
1810 0x33348, 0x33350,
1811 0x3335c, 0x33364,
1812 0x33370, 0x333b8,
1813 0x333c0, 0x333e4,
1814 0x333f8, 0x33428,
1815 0x33430, 0x33448,
1816 0x33460, 0x33468,
1817 0x33470, 0x3349c,
1818 0x334f0, 0x33528,
1819 0x33530, 0x33548,
1820 0x33560, 0x33568,
1821 0x33570, 0x3359c,
1822 0x335f0, 0x33638,
1823 0x33640, 0x33640,
1824 0x33648, 0x33650,
1825 0x3365c, 0x33664,
1826 0x33670, 0x336b8,
1827 0x336c0, 0x336e4,
1828 0x336f8, 0x33738,
1829 0x33740, 0x33740,
1830 0x33748, 0x33750,
1831 0x3375c, 0x33764,
1832 0x33770, 0x337b8,
1833 0x337c0, 0x337e4,
1834 0x337f8, 0x337fc,
1835 0x33814, 0x33814,
1836 0x3382c, 0x3382c,
1837 0x33880, 0x3388c,
1838 0x338e8, 0x338ec,
1839 0x33900, 0x33928,
1840 0x33930, 0x33948,
1841 0x33960, 0x33968,
1842 0x33970, 0x3399c,
1843 0x339f0, 0x33a38,
1844 0x33a40, 0x33a40,
1845 0x33a48, 0x33a50,
1846 0x33a5c, 0x33a64,
1847 0x33a70, 0x33ab8,
1848 0x33ac0, 0x33ae4,
1849 0x33af8, 0x33b10,
1850 0x33b28, 0x33b28,
1851 0x33b3c, 0x33b50,
1852 0x33bf0, 0x33c10,
1853 0x33c28, 0x33c28,
1854 0x33c3c, 0x33c50,
1855 0x33cf0, 0x33cfc,
1856 0x34000, 0x34030,
1857 0x34100, 0x34144,
1858 0x34190, 0x341a0,
1859 0x341a8, 0x341b8,
1860 0x341c4, 0x341c8,
1861 0x341d0, 0x341d0,
1862 0x34200, 0x34318,
1863 0x34400, 0x344b4,
1864 0x344c0, 0x3452c,
1865 0x34540, 0x3461c,
1866 0x34800, 0x34828,
1867 0x34834, 0x34834,
1868 0x348c0, 0x34908,
1869 0x34910, 0x349ac,
1870 0x34a00, 0x34a14,
1871 0x34a1c, 0x34a2c,
1872 0x34a44, 0x34a50,
1873 0x34a74, 0x34a74,
1874 0x34a7c, 0x34afc,
1875 0x34b08, 0x34c24,
1876 0x34d00, 0x34d00,
1877 0x34d08, 0x34d14,
1878 0x34d1c, 0x34d20,
1879 0x34d3c, 0x34d3c,
1880 0x34d48, 0x34d50,
1881 0x35200, 0x3520c,
1882 0x35220, 0x35220,
1883 0x35240, 0x35240,
1884 0x35600, 0x3560c,
1885 0x35a00, 0x35a1c,
1886 0x35e00, 0x35e20,
1887 0x35e38, 0x35e3c,
1888 0x35e80, 0x35e80,
1889 0x35e88, 0x35ea8,
1890 0x35eb0, 0x35eb4,
1891 0x35ec8, 0x35ed4,
1892 0x35fb8, 0x36004,
1893 0x36200, 0x36200,
1894 0x36208, 0x36240,
1895 0x36248, 0x36280,
1896 0x36288, 0x362c0,
1897 0x362c8, 0x362fc,
1898 0x36600, 0x36630,
1899 0x36a00, 0x36abc,
1900 0x36b00, 0x36b10,
1901 0x36b20, 0x36b30,
1902 0x36b40, 0x36b50,
1903 0x36b60, 0x36b70,
1904 0x37000, 0x37028,
1905 0x37030, 0x37048,
1906 0x37060, 0x37068,
1907 0x37070, 0x3709c,
1908 0x370f0, 0x37128,
1909 0x37130, 0x37148,
1910 0x37160, 0x37168,
1911 0x37170, 0x3719c,
1912 0x371f0, 0x37238,
1913 0x37240, 0x37240,
1914 0x37248, 0x37250,
1915 0x3725c, 0x37264,
1916 0x37270, 0x372b8,
1917 0x372c0, 0x372e4,
1918 0x372f8, 0x37338,
1919 0x37340, 0x37340,
1920 0x37348, 0x37350,
1921 0x3735c, 0x37364,
1922 0x37370, 0x373b8,
1923 0x373c0, 0x373e4,
1924 0x373f8, 0x37428,
1925 0x37430, 0x37448,
1926 0x37460, 0x37468,
1927 0x37470, 0x3749c,
1928 0x374f0, 0x37528,
1929 0x37530, 0x37548,
1930 0x37560, 0x37568,
1931 0x37570, 0x3759c,
1932 0x375f0, 0x37638,
1933 0x37640, 0x37640,
1934 0x37648, 0x37650,
1935 0x3765c, 0x37664,
1936 0x37670, 0x376b8,
1937 0x376c0, 0x376e4,
1938 0x376f8, 0x37738,
1939 0x37740, 0x37740,
1940 0x37748, 0x37750,
1941 0x3775c, 0x37764,
1942 0x37770, 0x377b8,
1943 0x377c0, 0x377e4,
1944 0x377f8, 0x377fc,
1945 0x37814, 0x37814,
1946 0x3782c, 0x3782c,
1947 0x37880, 0x3788c,
1948 0x378e8, 0x378ec,
1949 0x37900, 0x37928,
1950 0x37930, 0x37948,
1951 0x37960, 0x37968,
1952 0x37970, 0x3799c,
1953 0x379f0, 0x37a38,
1954 0x37a40, 0x37a40,
1955 0x37a48, 0x37a50,
1956 0x37a5c, 0x37a64,
1957 0x37a70, 0x37ab8,
1958 0x37ac0, 0x37ae4,
1959 0x37af8, 0x37b10,
1960 0x37b28, 0x37b28,
1961 0x37b3c, 0x37b50,
1962 0x37bf0, 0x37c10,
1963 0x37c28, 0x37c28,
1964 0x37c3c, 0x37c50,
1965 0x37cf0, 0x37cfc,
1966 0x38000, 0x38030,
1967 0x38100, 0x38144,
1968 0x38190, 0x381a0,
1969 0x381a8, 0x381b8,
1970 0x381c4, 0x381c8,
1971 0x381d0, 0x381d0,
1972 0x38200, 0x38318,
1973 0x38400, 0x384b4,
1974 0x384c0, 0x3852c,
1975 0x38540, 0x3861c,
1976 0x38800, 0x38828,
1977 0x38834, 0x38834,
1978 0x388c0, 0x38908,
1979 0x38910, 0x389ac,
1980 0x38a00, 0x38a14,
1981 0x38a1c, 0x38a2c,
1982 0x38a44, 0x38a50,
1983 0x38a74, 0x38a74,
1984 0x38a7c, 0x38afc,
1985 0x38b08, 0x38c24,
1986 0x38d00, 0x38d00,
1987 0x38d08, 0x38d14,
1988 0x38d1c, 0x38d20,
1989 0x38d3c, 0x38d3c,
1990 0x38d48, 0x38d50,
1991 0x39200, 0x3920c,
1992 0x39220, 0x39220,
1993 0x39240, 0x39240,
1994 0x39600, 0x3960c,
1995 0x39a00, 0x39a1c,
1996 0x39e00, 0x39e20,
1997 0x39e38, 0x39e3c,
1998 0x39e80, 0x39e80,
1999 0x39e88, 0x39ea8,
2000 0x39eb0, 0x39eb4,
2001 0x39ec8, 0x39ed4,
2002 0x39fb8, 0x3a004,
2003 0x3a200, 0x3a200,
2004 0x3a208, 0x3a240,
2005 0x3a248, 0x3a280,
2006 0x3a288, 0x3a2c0,
2007 0x3a2c8, 0x3a2fc,
2008 0x3a600, 0x3a630,
2009 0x3aa00, 0x3aabc,
2010 0x3ab00, 0x3ab10,
2011 0x3ab20, 0x3ab30,
2012 0x3ab40, 0x3ab50,
2013 0x3ab60, 0x3ab70,
2014 0x3b000, 0x3b028,
2015 0x3b030, 0x3b048,
2016 0x3b060, 0x3b068,
2017 0x3b070, 0x3b09c,
2018 0x3b0f0, 0x3b128,
2019 0x3b130, 0x3b148,
2020 0x3b160, 0x3b168,
2021 0x3b170, 0x3b19c,
2022 0x3b1f0, 0x3b238,
2023 0x3b240, 0x3b240,
2024 0x3b248, 0x3b250,
2025 0x3b25c, 0x3b264,
2026 0x3b270, 0x3b2b8,
2027 0x3b2c0, 0x3b2e4,
2028 0x3b2f8, 0x3b338,
2029 0x3b340, 0x3b340,
2030 0x3b348, 0x3b350,
2031 0x3b35c, 0x3b364,
2032 0x3b370, 0x3b3b8,
2033 0x3b3c0, 0x3b3e4,
2034 0x3b3f8, 0x3b428,
2035 0x3b430, 0x3b448,
2036 0x3b460, 0x3b468,
2037 0x3b470, 0x3b49c,
2038 0x3b4f0, 0x3b528,
2039 0x3b530, 0x3b548,
2040 0x3b560, 0x3b568,
2041 0x3b570, 0x3b59c,
2042 0x3b5f0, 0x3b638,
2043 0x3b640, 0x3b640,
2044 0x3b648, 0x3b650,
2045 0x3b65c, 0x3b664,
2046 0x3b670, 0x3b6b8,
2047 0x3b6c0, 0x3b6e4,
2048 0x3b6f8, 0x3b738,
2049 0x3b740, 0x3b740,
2050 0x3b748, 0x3b750,
2051 0x3b75c, 0x3b764,
2052 0x3b770, 0x3b7b8,
2053 0x3b7c0, 0x3b7e4,
2054 0x3b7f8, 0x3b7fc,
2055 0x3b814, 0x3b814,
2056 0x3b82c, 0x3b82c,
2057 0x3b880, 0x3b88c,
2058 0x3b8e8, 0x3b8ec,
2059 0x3b900, 0x3b928,
2060 0x3b930, 0x3b948,
2061 0x3b960, 0x3b968,
2062 0x3b970, 0x3b99c,
2063 0x3b9f0, 0x3ba38,
2064 0x3ba40, 0x3ba40,
2065 0x3ba48, 0x3ba50,
2066 0x3ba5c, 0x3ba64,
2067 0x3ba70, 0x3bab8,
2068 0x3bac0, 0x3bae4,
2069 0x3baf8, 0x3bb10,
2070 0x3bb28, 0x3bb28,
2071 0x3bb3c, 0x3bb50,
2072 0x3bbf0, 0x3bc10,
2073 0x3bc28, 0x3bc28,
2074 0x3bc3c, 0x3bc50,
2075 0x3bcf0, 0x3bcfc,
2076 0x3c000, 0x3c030,
2077 0x3c100, 0x3c144,
2078 0x3c190, 0x3c1a0,
2079 0x3c1a8, 0x3c1b8,
2080 0x3c1c4, 0x3c1c8,
2081 0x3c1d0, 0x3c1d0,
2082 0x3c200, 0x3c318,
2083 0x3c400, 0x3c4b4,
2084 0x3c4c0, 0x3c52c,
2085 0x3c540, 0x3c61c,
2086 0x3c800, 0x3c828,
2087 0x3c834, 0x3c834,
2088 0x3c8c0, 0x3c908,
2089 0x3c910, 0x3c9ac,
2090 0x3ca00, 0x3ca14,
2091 0x3ca1c, 0x3ca2c,
2092 0x3ca44, 0x3ca50,
2093 0x3ca74, 0x3ca74,
2094 0x3ca7c, 0x3cafc,
2095 0x3cb08, 0x3cc24,
2096 0x3cd00, 0x3cd00,
2097 0x3cd08, 0x3cd14,
2098 0x3cd1c, 0x3cd20,
2099 0x3cd3c, 0x3cd3c,
2100 0x3cd48, 0x3cd50,
2101 0x3d200, 0x3d20c,
2102 0x3d220, 0x3d220,
2103 0x3d240, 0x3d240,
2104 0x3d600, 0x3d60c,
2105 0x3da00, 0x3da1c,
2106 0x3de00, 0x3de20,
2107 0x3de38, 0x3de3c,
2108 0x3de80, 0x3de80,
2109 0x3de88, 0x3dea8,
2110 0x3deb0, 0x3deb4,
2111 0x3dec8, 0x3ded4,
2112 0x3dfb8, 0x3e004,
2113 0x3e200, 0x3e200,
2114 0x3e208, 0x3e240,
2115 0x3e248, 0x3e280,
2116 0x3e288, 0x3e2c0,
2117 0x3e2c8, 0x3e2fc,
2118 0x3e600, 0x3e630,
2119 0x3ea00, 0x3eabc,
2120 0x3eb00, 0x3eb10,
2121 0x3eb20, 0x3eb30,
2122 0x3eb40, 0x3eb50,
2123 0x3eb60, 0x3eb70,
2124 0x3f000, 0x3f028,
2125 0x3f030, 0x3f048,
2126 0x3f060, 0x3f068,
2127 0x3f070, 0x3f09c,
2128 0x3f0f0, 0x3f128,
2129 0x3f130, 0x3f148,
2130 0x3f160, 0x3f168,
2131 0x3f170, 0x3f19c,
2132 0x3f1f0, 0x3f238,
2133 0x3f240, 0x3f240,
2134 0x3f248, 0x3f250,
2135 0x3f25c, 0x3f264,
2136 0x3f270, 0x3f2b8,
2137 0x3f2c0, 0x3f2e4,
2138 0x3f2f8, 0x3f338,
2139 0x3f340, 0x3f340,
2140 0x3f348, 0x3f350,
2141 0x3f35c, 0x3f364,
2142 0x3f370, 0x3f3b8,
2143 0x3f3c0, 0x3f3e4,
2144 0x3f3f8, 0x3f428,
2145 0x3f430, 0x3f448,
2146 0x3f460, 0x3f468,
2147 0x3f470, 0x3f49c,
2148 0x3f4f0, 0x3f528,
2149 0x3f530, 0x3f548,
2150 0x3f560, 0x3f568,
2151 0x3f570, 0x3f59c,
2152 0x3f5f0, 0x3f638,
2153 0x3f640, 0x3f640,
2154 0x3f648, 0x3f650,
2155 0x3f65c, 0x3f664,
2156 0x3f670, 0x3f6b8,
2157 0x3f6c0, 0x3f6e4,
2158 0x3f6f8, 0x3f738,
2159 0x3f740, 0x3f740,
2160 0x3f748, 0x3f750,
2161 0x3f75c, 0x3f764,
2162 0x3f770, 0x3f7b8,
2163 0x3f7c0, 0x3f7e4,
2164 0x3f7f8, 0x3f7fc,
2165 0x3f814, 0x3f814,
2166 0x3f82c, 0x3f82c,
2167 0x3f880, 0x3f88c,
2168 0x3f8e8, 0x3f8ec,
2169 0x3f900, 0x3f928,
2170 0x3f930, 0x3f948,
2171 0x3f960, 0x3f968,
2172 0x3f970, 0x3f99c,
2173 0x3f9f0, 0x3fa38,
2174 0x3fa40, 0x3fa40,
2175 0x3fa48, 0x3fa50,
2176 0x3fa5c, 0x3fa64,
2177 0x3fa70, 0x3fab8,
2178 0x3fac0, 0x3fae4,
2179 0x3faf8, 0x3fb10,
2180 0x3fb28, 0x3fb28,
2181 0x3fb3c, 0x3fb50,
2182 0x3fbf0, 0x3fc10,
2183 0x3fc28, 0x3fc28,
2184 0x3fc3c, 0x3fc50,
2185 0x3fcf0, 0x3fcfc,
2186 0x40000, 0x4000c,
2187 0x40040, 0x40050,
2188 0x40060, 0x40068,
2189 0x4007c, 0x4008c,
2190 0x40094, 0x400b0,
2191 0x400c0, 0x40144,
2192 0x40180, 0x4018c,
2193 0x40200, 0x40254,
2194 0x40260, 0x40264,
2195 0x40270, 0x40288,
2196 0x40290, 0x40298,
2197 0x402ac, 0x402c8,
2198 0x402d0, 0x402e0,
2199 0x402f0, 0x402f0,
2200 0x40300, 0x4033c,
2201 0x403f8, 0x403fc,
2202 0x41304, 0x413c4,
2203 0x41400, 0x4140c,
2204 0x41414, 0x4141c,
2205 0x41480, 0x414d0,
2206 0x44000, 0x44054,
2207 0x4405c, 0x44078,
2208 0x440c0, 0x44174,
2209 0x44180, 0x441ac,
2210 0x441b4, 0x441b8,
2211 0x441c0, 0x44254,
2212 0x4425c, 0x44278,
2213 0x442c0, 0x44374,
2214 0x44380, 0x443ac,
2215 0x443b4, 0x443b8,
2216 0x443c0, 0x44454,
2217 0x4445c, 0x44478,
2218 0x444c0, 0x44574,
2219 0x44580, 0x445ac,
2220 0x445b4, 0x445b8,
2221 0x445c0, 0x44654,
2222 0x4465c, 0x44678,
2223 0x446c0, 0x44774,
2224 0x44780, 0x447ac,
2225 0x447b4, 0x447b8,
2226 0x447c0, 0x44854,
2227 0x4485c, 0x44878,
2228 0x448c0, 0x44974,
2229 0x44980, 0x449ac,
2230 0x449b4, 0x449b8,
2231 0x449c0, 0x449fc,
2232 0x45000, 0x45004,
2233 0x45010, 0x45030,
2234 0x45040, 0x45060,
2235 0x45068, 0x45068,
2236 0x45080, 0x45084,
2237 0x450a0, 0x450b0,
2238 0x45200, 0x45204,
2239 0x45210, 0x45230,
2240 0x45240, 0x45260,
2241 0x45268, 0x45268,
2242 0x45280, 0x45284,
2243 0x452a0, 0x452b0,
2244 0x460c0, 0x460e4,
2245 0x47000, 0x4703c,
2246 0x47044, 0x4708c,
2247 0x47200, 0x47250,
2248 0x47400, 0x47408,
2249 0x47414, 0x47420,
2250 0x47600, 0x47618,
2251 0x47800, 0x47814,
2252 0x48000, 0x4800c,
2253 0x48040, 0x48050,
2254 0x48060, 0x48068,
2255 0x4807c, 0x4808c,
2256 0x48094, 0x480b0,
2257 0x480c0, 0x48144,
2258 0x48180, 0x4818c,
2259 0x48200, 0x48254,
2260 0x48260, 0x48264,
2261 0x48270, 0x48288,
2262 0x48290, 0x48298,
2263 0x482ac, 0x482c8,
2264 0x482d0, 0x482e0,
2265 0x482f0, 0x482f0,
2266 0x48300, 0x4833c,
2267 0x483f8, 0x483fc,
2268 0x49304, 0x493c4,
2269 0x49400, 0x4940c,
2270 0x49414, 0x4941c,
2271 0x49480, 0x494d0,
2272 0x4c000, 0x4c054,
2273 0x4c05c, 0x4c078,
2274 0x4c0c0, 0x4c174,
2275 0x4c180, 0x4c1ac,
2276 0x4c1b4, 0x4c1b8,
2277 0x4c1c0, 0x4c254,
2278 0x4c25c, 0x4c278,
2279 0x4c2c0, 0x4c374,
2280 0x4c380, 0x4c3ac,
2281 0x4c3b4, 0x4c3b8,
2282 0x4c3c0, 0x4c454,
2283 0x4c45c, 0x4c478,
2284 0x4c4c0, 0x4c574,
2285 0x4c580, 0x4c5ac,
2286 0x4c5b4, 0x4c5b8,
2287 0x4c5c0, 0x4c654,
2288 0x4c65c, 0x4c678,
2289 0x4c6c0, 0x4c774,
2290 0x4c780, 0x4c7ac,
2291 0x4c7b4, 0x4c7b8,
2292 0x4c7c0, 0x4c854,
2293 0x4c85c, 0x4c878,
2294 0x4c8c0, 0x4c974,
2295 0x4c980, 0x4c9ac,
2296 0x4c9b4, 0x4c9b8,
2297 0x4c9c0, 0x4c9fc,
2298 0x4d000, 0x4d004,
2299 0x4d010, 0x4d030,
2300 0x4d040, 0x4d060,
2301 0x4d068, 0x4d068,
2302 0x4d080, 0x4d084,
2303 0x4d0a0, 0x4d0b0,
2304 0x4d200, 0x4d204,
2305 0x4d210, 0x4d230,
2306 0x4d240, 0x4d260,
2307 0x4d268, 0x4d268,
2308 0x4d280, 0x4d284,
2309 0x4d2a0, 0x4d2b0,
2310 0x4e0c0, 0x4e0e4,
2311 0x4f000, 0x4f03c,
2312 0x4f044, 0x4f08c,
2313 0x4f200, 0x4f250,
2314 0x4f400, 0x4f408,
2315 0x4f414, 0x4f420,
2316 0x4f600, 0x4f618,
2317 0x4f800, 0x4f814,
2318 0x50000, 0x50084,
2319 0x50090, 0x500cc,
2320 0x50400, 0x50400,
2321 0x50800, 0x50884,
2322 0x50890, 0x508cc,
2323 0x50c00, 0x50c00,
2324 0x51000, 0x5101c,
2325 0x51300, 0x51308,
2326 };
2327
2328 static const unsigned int t6_reg_ranges[] = {
2329 0x1008, 0x101c,
2330 0x1024, 0x10a8,
2331 0x10b4, 0x10f8,
2332 0x1100, 0x1114,
2333 0x111c, 0x112c,
2334 0x1138, 0x113c,
2335 0x1144, 0x114c,
2336 0x1180, 0x1184,
2337 0x1190, 0x1194,
2338 0x11a0, 0x11a4,
2339 0x11b0, 0x11c4,
2340 0x11fc, 0x1274,
2341 0x1280, 0x133c,
2342 0x1800, 0x18fc,
2343 0x3000, 0x302c,
2344 0x3060, 0x30b0,
2345 0x30b8, 0x30d8,
2346 0x30e0, 0x30fc,
2347 0x3140, 0x357c,
2348 0x35a8, 0x35cc,
2349 0x35ec, 0x35ec,
2350 0x3600, 0x5624,
2351 0x56cc, 0x56ec,
2352 0x56f4, 0x5720,
2353 0x5728, 0x575c,
2354 0x580c, 0x5814,
2355 0x5890, 0x589c,
2356 0x58a4, 0x58ac,
2357 0x58b8, 0x58bc,
2358 0x5940, 0x595c,
2359 0x5980, 0x598c,
2360 0x59b0, 0x59c8,
2361 0x59d0, 0x59dc,
2362 0x59fc, 0x5a18,
2363 0x5a60, 0x5a6c,
2364 0x5a80, 0x5a8c,
2365 0x5a94, 0x5a9c,
2366 0x5b94, 0x5bfc,
2367 0x5c10, 0x5e48,
2368 0x5e50, 0x5e94,
2369 0x5ea0, 0x5eb0,
2370 0x5ec0, 0x5ec0,
2371 0x5ec8, 0x5ed0,
2372 0x5ee0, 0x5ee0,
2373 0x5ef0, 0x5ef0,
2374 0x5f00, 0x5f00,
2375 0x6000, 0x6020,
2376 0x6028, 0x6040,
2377 0x6058, 0x609c,
2378 0x60a8, 0x619c,
2379 0x7700, 0x7798,
2380 0x77c0, 0x7880,
2381 0x78cc, 0x78fc,
2382 0x7b00, 0x7b58,
2383 0x7b60, 0x7b84,
2384 0x7b8c, 0x7c54,
2385 0x7d00, 0x7d38,
2386 0x7d40, 0x7d84,
2387 0x7d8c, 0x7ddc,
2388 0x7de4, 0x7e04,
2389 0x7e10, 0x7e1c,
2390 0x7e24, 0x7e38,
2391 0x7e40, 0x7e44,
2392 0x7e4c, 0x7e78,
2393 0x7e80, 0x7edc,
2394 0x7ee8, 0x7efc,
2395 0x8dc0, 0x8de0,
2396 0x8df8, 0x8e04,
2397 0x8e10, 0x8e84,
2398 0x8ea0, 0x8f88,
2399 0x8fb8, 0x9058,
2400 0x9060, 0x9060,
2401 0x9068, 0x90f8,
2402 0x9100, 0x9124,
2403 0x9400, 0x9470,
2404 0x9600, 0x9600,
2405 0x9608, 0x9638,
2406 0x9640, 0x9704,
2407 0x9710, 0x971c,
2408 0x9800, 0x9808,
2409 0x9820, 0x983c,
2410 0x9850, 0x9864,
2411 0x9c00, 0x9c6c,
2412 0x9c80, 0x9cec,
2413 0x9d00, 0x9d6c,
2414 0x9d80, 0x9dec,
2415 0x9e00, 0x9e6c,
2416 0x9e80, 0x9eec,
2417 0x9f00, 0x9f6c,
2418 0x9f80, 0xa020,
2419 0xd004, 0xd03c,
2420 0xd100, 0xd118,
2421 0xd200, 0xd214,
2422 0xd220, 0xd234,
2423 0xd240, 0xd254,
2424 0xd260, 0xd274,
2425 0xd280, 0xd294,
2426 0xd2a0, 0xd2b4,
2427 0xd2c0, 0xd2d4,
2428 0xd2e0, 0xd2f4,
2429 0xd300, 0xd31c,
2430 0xdfc0, 0xdfe0,
2431 0xe000, 0xf008,
2432 0xf010, 0xf018,
2433 0xf020, 0xf028,
2434 0x11000, 0x11014,
2435 0x11048, 0x1106c,
2436 0x11074, 0x11088,
2437 0x11098, 0x11120,
2438 0x1112c, 0x1117c,
2439 0x11190, 0x112e0,
2440 0x11300, 0x1130c,
2441 0x12000, 0x1206c,
2442 0x19040, 0x1906c,
2443 0x19078, 0x19080,
2444 0x1908c, 0x190e8,
2445 0x190f0, 0x190f8,
2446 0x19100, 0x19110,
2447 0x19120, 0x19124,
2448 0x19150, 0x19194,
2449 0x1919c, 0x191b0,
2450 0x191d0, 0x191e8,
2451 0x19238, 0x19290,
2452 0x192a4, 0x192b0,
2453 0x19348, 0x1934c,
2454 0x193f8, 0x19418,
2455 0x19420, 0x19428,
2456 0x19430, 0x19444,
2457 0x1944c, 0x1946c,
2458 0x19474, 0x19474,
2459 0x19490, 0x194cc,
2460 0x194f0, 0x194f8,
2461 0x19c00, 0x19c48,
2462 0x19c50, 0x19c80,
2463 0x19c94, 0x19c98,
2464 0x19ca0, 0x19cbc,
2465 0x19ce4, 0x19ce4,
2466 0x19cf0, 0x19cf8,
2467 0x19d00, 0x19d28,
2468 0x19d50, 0x19d78,
2469 0x19d94, 0x19d98,
2470 0x19da0, 0x19de0,
2471 0x19df0, 0x19e10,
2472 0x19e50, 0x19e6c,
2473 0x19ea0, 0x19ebc,
2474 0x19ec4, 0x19ef4,
2475 0x19f04, 0x19f2c,
2476 0x19f34, 0x19f34,
2477 0x19f40, 0x19f50,
2478 0x19f90, 0x19fac,
2479 0x19fc4, 0x19fc8,
2480 0x19fd0, 0x19fe4,
2481 0x1a000, 0x1a004,
2482 0x1a010, 0x1a06c,
2483 0x1a0b0, 0x1a0e4,
2484 0x1a0ec, 0x1a0f8,
2485 0x1a100, 0x1a108,
2486 0x1a114, 0x1a120,
2487 0x1a128, 0x1a130,
2488 0x1a138, 0x1a138,
2489 0x1a190, 0x1a1c4,
2490 0x1a1fc, 0x1a1fc,
2491 0x1e008, 0x1e00c,
2492 0x1e040, 0x1e044,
2493 0x1e04c, 0x1e04c,
2494 0x1e284, 0x1e290,
2495 0x1e2c0, 0x1e2c0,
2496 0x1e2e0, 0x1e2e0,
2497 0x1e300, 0x1e384,
2498 0x1e3c0, 0x1e3c8,
2499 0x1e408, 0x1e40c,
2500 0x1e440, 0x1e444,
2501 0x1e44c, 0x1e44c,
2502 0x1e684, 0x1e690,
2503 0x1e6c0, 0x1e6c0,
2504 0x1e6e0, 0x1e6e0,
2505 0x1e700, 0x1e784,
2506 0x1e7c0, 0x1e7c8,
2507 0x1e808, 0x1e80c,
2508 0x1e840, 0x1e844,
2509 0x1e84c, 0x1e84c,
2510 0x1ea84, 0x1ea90,
2511 0x1eac0, 0x1eac0,
2512 0x1eae0, 0x1eae0,
2513 0x1eb00, 0x1eb84,
2514 0x1ebc0, 0x1ebc8,
2515 0x1ec08, 0x1ec0c,
2516 0x1ec40, 0x1ec44,
2517 0x1ec4c, 0x1ec4c,
2518 0x1ee84, 0x1ee90,
2519 0x1eec0, 0x1eec0,
2520 0x1eee0, 0x1eee0,
2521 0x1ef00, 0x1ef84,
2522 0x1efc0, 0x1efc8,
2523 0x1f008, 0x1f00c,
2524 0x1f040, 0x1f044,
2525 0x1f04c, 0x1f04c,
2526 0x1f284, 0x1f290,
2527 0x1f2c0, 0x1f2c0,
2528 0x1f2e0, 0x1f2e0,
2529 0x1f300, 0x1f384,
2530 0x1f3c0, 0x1f3c8,
2531 0x1f408, 0x1f40c,
2532 0x1f440, 0x1f444,
2533 0x1f44c, 0x1f44c,
2534 0x1f684, 0x1f690,
2535 0x1f6c0, 0x1f6c0,
2536 0x1f6e0, 0x1f6e0,
2537 0x1f700, 0x1f784,
2538 0x1f7c0, 0x1f7c8,
2539 0x1f808, 0x1f80c,
2540 0x1f840, 0x1f844,
2541 0x1f84c, 0x1f84c,
2542 0x1fa84, 0x1fa90,
2543 0x1fac0, 0x1fac0,
2544 0x1fae0, 0x1fae0,
2545 0x1fb00, 0x1fb84,
2546 0x1fbc0, 0x1fbc8,
2547 0x1fc08, 0x1fc0c,
2548 0x1fc40, 0x1fc44,
2549 0x1fc4c, 0x1fc4c,
2550 0x1fe84, 0x1fe90,
2551 0x1fec0, 0x1fec0,
2552 0x1fee0, 0x1fee0,
2553 0x1ff00, 0x1ff84,
2554 0x1ffc0, 0x1ffc8,
2555 0x30000, 0x30030,
2556 0x30100, 0x30168,
2557 0x30190, 0x301a0,
2558 0x301a8, 0x301b8,
2559 0x301c4, 0x301c8,
2560 0x301d0, 0x301d0,
2561 0x30200, 0x30320,
2562 0x30400, 0x304b4,
2563 0x304c0, 0x3052c,
2564 0x30540, 0x3061c,
2565 0x30800, 0x308a0,
2566 0x308c0, 0x30908,
2567 0x30910, 0x309b8,
2568 0x30a00, 0x30a04,
2569 0x30a0c, 0x30a14,
2570 0x30a1c, 0x30a2c,
2571 0x30a44, 0x30a50,
2572 0x30a74, 0x30a74,
2573 0x30a7c, 0x30afc,
2574 0x30b08, 0x30c24,
2575 0x30d00, 0x30d14,
2576 0x30d1c, 0x30d3c,
2577 0x30d44, 0x30d4c,
2578 0x30d54, 0x30d74,
2579 0x30d7c, 0x30d7c,
2580 0x30de0, 0x30de0,
2581 0x30e00, 0x30ed4,
2582 0x30f00, 0x30fa4,
2583 0x30fc0, 0x30fc4,
2584 0x31000, 0x31004,
2585 0x31080, 0x310fc,
2586 0x31208, 0x31220,
2587 0x3123c, 0x31254,
2588 0x31300, 0x31300,
2589 0x31308, 0x3131c,
2590 0x31338, 0x3133c,
2591 0x31380, 0x31380,
2592 0x31388, 0x313a8,
2593 0x313b4, 0x313b4,
2594 0x31400, 0x31420,
2595 0x31438, 0x3143c,
2596 0x31480, 0x31480,
2597 0x314a8, 0x314a8,
2598 0x314b0, 0x314b4,
2599 0x314c8, 0x314d4,
2600 0x31a40, 0x31a4c,
2601 0x31af0, 0x31b20,
2602 0x31b38, 0x31b3c,
2603 0x31b80, 0x31b80,
2604 0x31ba8, 0x31ba8,
2605 0x31bb0, 0x31bb4,
2606 0x31bc8, 0x31bd4,
2607 0x32140, 0x3218c,
2608 0x321f0, 0x321f4,
2609 0x32200, 0x32200,
2610 0x32218, 0x32218,
2611 0x32400, 0x32400,
2612 0x32408, 0x3241c,
2613 0x32618, 0x32620,
2614 0x32664, 0x32664,
2615 0x326a8, 0x326a8,
2616 0x326ec, 0x326ec,
2617 0x32a00, 0x32abc,
2618 0x32b00, 0x32b18,
2619 0x32b20, 0x32b38,
2620 0x32b40, 0x32b58,
2621 0x32b60, 0x32b78,
2622 0x32c00, 0x32c00,
2623 0x32c08, 0x32c3c,
2624 0x33000, 0x3302c,
2625 0x33034, 0x33050,
2626 0x33058, 0x33058,
2627 0x33060, 0x3308c,
2628 0x3309c, 0x330ac,
2629 0x330c0, 0x330c0,
2630 0x330c8, 0x330d0,
2631 0x330d8, 0x330e0,
2632 0x330ec, 0x3312c,
2633 0x33134, 0x33150,
2634 0x33158, 0x33158,
2635 0x33160, 0x3318c,
2636 0x3319c, 0x331ac,
2637 0x331c0, 0x331c0,
2638 0x331c8, 0x331d0,
2639 0x331d8, 0x331e0,
2640 0x331ec, 0x33290,
2641 0x33298, 0x332c4,
2642 0x332e4, 0x33390,
2643 0x33398, 0x333c4,
2644 0x333e4, 0x3342c,
2645 0x33434, 0x33450,
2646 0x33458, 0x33458,
2647 0x33460, 0x3348c,
2648 0x3349c, 0x334ac,
2649 0x334c0, 0x334c0,
2650 0x334c8, 0x334d0,
2651 0x334d8, 0x334e0,
2652 0x334ec, 0x3352c,
2653 0x33534, 0x33550,
2654 0x33558, 0x33558,
2655 0x33560, 0x3358c,
2656 0x3359c, 0x335ac,
2657 0x335c0, 0x335c0,
2658 0x335c8, 0x335d0,
2659 0x335d8, 0x335e0,
2660 0x335ec, 0x33690,
2661 0x33698, 0x336c4,
2662 0x336e4, 0x33790,
2663 0x33798, 0x337c4,
2664 0x337e4, 0x337fc,
2665 0x33814, 0x33814,
2666 0x33854, 0x33868,
2667 0x33880, 0x3388c,
2668 0x338c0, 0x338d0,
2669 0x338e8, 0x338ec,
2670 0x33900, 0x3392c,
2671 0x33934, 0x33950,
2672 0x33958, 0x33958,
2673 0x33960, 0x3398c,
2674 0x3399c, 0x339ac,
2675 0x339c0, 0x339c0,
2676 0x339c8, 0x339d0,
2677 0x339d8, 0x339e0,
2678 0x339ec, 0x33a90,
2679 0x33a98, 0x33ac4,
2680 0x33ae4, 0x33b10,
2681 0x33b24, 0x33b28,
2682 0x33b38, 0x33b50,
2683 0x33bf0, 0x33c10,
2684 0x33c24, 0x33c28,
2685 0x33c38, 0x33c50,
2686 0x33cf0, 0x33cfc,
2687 0x34000, 0x34030,
2688 0x34100, 0x34168,
2689 0x34190, 0x341a0,
2690 0x341a8, 0x341b8,
2691 0x341c4, 0x341c8,
2692 0x341d0, 0x341d0,
2693 0x34200, 0x34320,
2694 0x34400, 0x344b4,
2695 0x344c0, 0x3452c,
2696 0x34540, 0x3461c,
2697 0x34800, 0x348a0,
2698 0x348c0, 0x34908,
2699 0x34910, 0x349b8,
2700 0x34a00, 0x34a04,
2701 0x34a0c, 0x34a14,
2702 0x34a1c, 0x34a2c,
2703 0x34a44, 0x34a50,
2704 0x34a74, 0x34a74,
2705 0x34a7c, 0x34afc,
2706 0x34b08, 0x34c24,
2707 0x34d00, 0x34d14,
2708 0x34d1c, 0x34d3c,
2709 0x34d44, 0x34d4c,
2710 0x34d54, 0x34d74,
2711 0x34d7c, 0x34d7c,
2712 0x34de0, 0x34de0,
2713 0x34e00, 0x34ed4,
2714 0x34f00, 0x34fa4,
2715 0x34fc0, 0x34fc4,
2716 0x35000, 0x35004,
2717 0x35080, 0x350fc,
2718 0x35208, 0x35220,
2719 0x3523c, 0x35254,
2720 0x35300, 0x35300,
2721 0x35308, 0x3531c,
2722 0x35338, 0x3533c,
2723 0x35380, 0x35380,
2724 0x35388, 0x353a8,
2725 0x353b4, 0x353b4,
2726 0x35400, 0x35420,
2727 0x35438, 0x3543c,
2728 0x35480, 0x35480,
2729 0x354a8, 0x354a8,
2730 0x354b0, 0x354b4,
2731 0x354c8, 0x354d4,
2732 0x35a40, 0x35a4c,
2733 0x35af0, 0x35b20,
2734 0x35b38, 0x35b3c,
2735 0x35b80, 0x35b80,
2736 0x35ba8, 0x35ba8,
2737 0x35bb0, 0x35bb4,
2738 0x35bc8, 0x35bd4,
2739 0x36140, 0x3618c,
2740 0x361f0, 0x361f4,
2741 0x36200, 0x36200,
2742 0x36218, 0x36218,
2743 0x36400, 0x36400,
2744 0x36408, 0x3641c,
2745 0x36618, 0x36620,
2746 0x36664, 0x36664,
2747 0x366a8, 0x366a8,
2748 0x366ec, 0x366ec,
2749 0x36a00, 0x36abc,
2750 0x36b00, 0x36b18,
2751 0x36b20, 0x36b38,
2752 0x36b40, 0x36b58,
2753 0x36b60, 0x36b78,
2754 0x36c00, 0x36c00,
2755 0x36c08, 0x36c3c,
2756 0x37000, 0x3702c,
2757 0x37034, 0x37050,
2758 0x37058, 0x37058,
2759 0x37060, 0x3708c,
2760 0x3709c, 0x370ac,
2761 0x370c0, 0x370c0,
2762 0x370c8, 0x370d0,
2763 0x370d8, 0x370e0,
2764 0x370ec, 0x3712c,
2765 0x37134, 0x37150,
2766 0x37158, 0x37158,
2767 0x37160, 0x3718c,
2768 0x3719c, 0x371ac,
2769 0x371c0, 0x371c0,
2770 0x371c8, 0x371d0,
2771 0x371d8, 0x371e0,
2772 0x371ec, 0x37290,
2773 0x37298, 0x372c4,
2774 0x372e4, 0x37390,
2775 0x37398, 0x373c4,
2776 0x373e4, 0x3742c,
2777 0x37434, 0x37450,
2778 0x37458, 0x37458,
2779 0x37460, 0x3748c,
2780 0x3749c, 0x374ac,
2781 0x374c0, 0x374c0,
2782 0x374c8, 0x374d0,
2783 0x374d8, 0x374e0,
2784 0x374ec, 0x3752c,
2785 0x37534, 0x37550,
2786 0x37558, 0x37558,
2787 0x37560, 0x3758c,
2788 0x3759c, 0x375ac,
2789 0x375c0, 0x375c0,
2790 0x375c8, 0x375d0,
2791 0x375d8, 0x375e0,
2792 0x375ec, 0x37690,
2793 0x37698, 0x376c4,
2794 0x376e4, 0x37790,
2795 0x37798, 0x377c4,
2796 0x377e4, 0x377fc,
2797 0x37814, 0x37814,
2798 0x37854, 0x37868,
2799 0x37880, 0x3788c,
2800 0x378c0, 0x378d0,
2801 0x378e8, 0x378ec,
2802 0x37900, 0x3792c,
2803 0x37934, 0x37950,
2804 0x37958, 0x37958,
2805 0x37960, 0x3798c,
2806 0x3799c, 0x379ac,
2807 0x379c0, 0x379c0,
2808 0x379c8, 0x379d0,
2809 0x379d8, 0x379e0,
2810 0x379ec, 0x37a90,
2811 0x37a98, 0x37ac4,
2812 0x37ae4, 0x37b10,
2813 0x37b24, 0x37b28,
2814 0x37b38, 0x37b50,
2815 0x37bf0, 0x37c10,
2816 0x37c24, 0x37c28,
2817 0x37c38, 0x37c50,
2818 0x37cf0, 0x37cfc,
2819 0x40040, 0x40040,
2820 0x40080, 0x40084,
2821 0x40100, 0x40100,
2822 0x40140, 0x401bc,
2823 0x40200, 0x40214,
2824 0x40228, 0x40228,
2825 0x40240, 0x40258,
2826 0x40280, 0x40280,
2827 0x40304, 0x40304,
2828 0x40330, 0x4033c,
2829 0x41304, 0x413c8,
2830 0x413d0, 0x413dc,
2831 0x413f0, 0x413f0,
2832 0x41400, 0x4140c,
2833 0x41414, 0x4141c,
2834 0x41480, 0x414d0,
2835 0x44000, 0x4407c,
2836 0x440c0, 0x441ac,
2837 0x441b4, 0x4427c,
2838 0x442c0, 0x443ac,
2839 0x443b4, 0x4447c,
2840 0x444c0, 0x445ac,
2841 0x445b4, 0x4467c,
2842 0x446c0, 0x447ac,
2843 0x447b4, 0x4487c,
2844 0x448c0, 0x449ac,
2845 0x449b4, 0x44a7c,
2846 0x44ac0, 0x44bac,
2847 0x44bb4, 0x44c7c,
2848 0x44cc0, 0x44dac,
2849 0x44db4, 0x44e7c,
2850 0x44ec0, 0x44fac,
2851 0x44fb4, 0x4507c,
2852 0x450c0, 0x451ac,
2853 0x451b4, 0x451fc,
2854 0x45800, 0x45804,
2855 0x45810, 0x45830,
2856 0x45840, 0x45860,
2857 0x45868, 0x45868,
2858 0x45880, 0x45884,
2859 0x458a0, 0x458b0,
2860 0x45a00, 0x45a04,
2861 0x45a10, 0x45a30,
2862 0x45a40, 0x45a60,
2863 0x45a68, 0x45a68,
2864 0x45a80, 0x45a84,
2865 0x45aa0, 0x45ab0,
2866 0x460c0, 0x460e4,
2867 0x47000, 0x4703c,
2868 0x47044, 0x4708c,
2869 0x47200, 0x47250,
2870 0x47400, 0x47408,
2871 0x47414, 0x47420,
2872 0x47600, 0x47618,
2873 0x47800, 0x47814,
2874 0x47820, 0x4782c,
2875 0x50000, 0x50084,
2876 0x50090, 0x500cc,
2877 0x50300, 0x50384,
2878 0x50400, 0x50400,
2879 0x50800, 0x50884,
2880 0x50890, 0x508cc,
2881 0x50b00, 0x50b84,
2882 0x50c00, 0x50c00,
2883 0x51000, 0x51020,
2884 0x51028, 0x510b0,
2885 0x51300, 0x51324,
2886 };
2887
2888 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2889 const unsigned int *reg_ranges;
2890 int reg_ranges_size, range;
2891 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2892
2893 /* Select the right set of register ranges to dump depending on the
2894 * adapter chip type.
2895 */
2896 switch (chip_version) {
2897 case CHELSIO_T4:
2898 reg_ranges = t4_reg_ranges;
2899 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2900 break;
2901
2902 case CHELSIO_T5:
2903 reg_ranges = t5_reg_ranges;
2904 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2905 break;
2906
2907 case CHELSIO_T6:
2908 reg_ranges = t6_reg_ranges;
2909 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2910 break;
2911
2912 default:
2913 CH_ERR(adap,
2914 "Unsupported chip version %d\n", chip_version);
2915 return;
2916 }
2917
2918 /* Clear the register buffer and insert the appropriate register
2919 * values selected by the above register ranges.
2920 */
2921 memset(buf, 0, buf_size);
2922 for (range = 0; range < reg_ranges_size; range += 2) {
2923 unsigned int reg = reg_ranges[range];
2924 unsigned int last_reg = reg_ranges[range + 1];
2925 u32 *bufp = (u32 *)((char *)buf + reg);
2926
2927 /* Iterate across the register range filling in the register
2928 * buffer but don't write past the end of the register buffer.
2929 */
2930 while (reg <= last_reg && bufp < buf_end) {
2931 *bufp++ = t4_read_reg(adap, reg);
2932 reg += sizeof(u32);
2933 }
2934 }
2935 }
2936
2937 /*
2938 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2939 */
2940 #define EEPROM_DELAY 10 // 10us per poll spin
2941 #define EEPROM_MAX_POLL 5000 // x 5000 == 50ms
2942
2943 #define EEPROM_STAT_ADDR 0x7bfc
2944 #define VPD_SIZE 0x800
2945 #define VPD_BASE 0x400
2946 #define VPD_BASE_OLD 0
2947 #define VPD_LEN 1024
2948 #define VPD_INFO_FLD_HDR_SIZE 3
2949 #define CHELSIO_VPD_UNIQUE_ID 0x82
2950
2951 /*
2952 * Small utility function to wait till any outstanding VPD Access is complete.
2953 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2954 * VPD Access in flight. This allows us to handle the problem of having a
2955 * previous VPD Access time out and prevent an attempt to inject a new VPD
2956 * Request before any in-flight VPD reguest has completed.
2957 */
t4_seeprom_wait(struct adapter * adapter)2958 static int t4_seeprom_wait(struct adapter *adapter)
2959 {
2960 unsigned int base = adapter->params.pci.vpd_cap_addr;
2961 int max_poll;
2962
2963 /*
2964 * If no VPD Access is in flight, we can just return success right
2965 * away.
2966 */
2967 if (!adapter->vpd_busy)
2968 return 0;
2969
2970 /*
2971 * Poll the VPD Capability Address/Flag register waiting for it
2972 * to indicate that the operation is complete.
2973 */
2974 max_poll = EEPROM_MAX_POLL;
2975 do {
2976 u16 val;
2977
2978 udelay(EEPROM_DELAY);
2979 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2980
2981 /*
2982 * If the operation is complete, mark the VPD as no longer
2983 * busy and return success.
2984 */
2985 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2986 adapter->vpd_busy = 0;
2987 return 0;
2988 }
2989 } while (--max_poll);
2990
2991 /*
2992 * Failure! Note that we leave the VPD Busy status set in order to
2993 * avoid pushing a new VPD Access request into the VPD Capability till
2994 * the current operation eventually succeeds. It's a bug to issue a
2995 * new request when an existing request is in flight and will result
2996 * in corrupt hardware state.
2997 */
2998 return -ETIMEDOUT;
2999 }
3000
3001 /**
3002 * t4_seeprom_read - read a serial EEPROM location
3003 * @adapter: adapter to read
3004 * @addr: EEPROM virtual address
3005 * @data: where to store the read data
3006 *
3007 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
3008 * VPD capability. Note that this function must be called with a virtual
3009 * address.
3010 */
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)3011 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3012 {
3013 unsigned int base = adapter->params.pci.vpd_cap_addr;
3014 int ret;
3015
3016 /*
3017 * VPD Accesses must alway be 4-byte aligned!
3018 */
3019 if (addr >= EEPROMVSIZE || (addr & 3))
3020 return -EINVAL;
3021
3022 /*
3023 * Wait for any previous operation which may still be in flight to
3024 * complete.
3025 */
3026 ret = t4_seeprom_wait(adapter);
3027 if (ret) {
3028 CH_ERR(adapter, "VPD still busy from previous operation\n");
3029 return ret;
3030 }
3031
3032 /*
3033 * Issue our new VPD Read request, mark the VPD as being busy and wait
3034 * for our request to complete. If it doesn't complete, note the
3035 * error and return it to our caller. Note that we do not reset the
3036 * VPD Busy status!
3037 */
3038 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3039 adapter->vpd_busy = 1;
3040 adapter->vpd_flag = PCI_VPD_ADDR_F;
3041 ret = t4_seeprom_wait(adapter);
3042 if (ret) {
3043 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3044 return ret;
3045 }
3046
3047 /*
3048 * Grab the returned data, swizzle it into our endianess and
3049 * return success.
3050 */
3051 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3052 *data = le32_to_cpu(*data);
3053 return 0;
3054 }
3055
3056 /**
3057 * t4_seeprom_write - write a serial EEPROM location
3058 * @adapter: adapter to write
3059 * @addr: virtual EEPROM address
3060 * @data: value to write
3061 *
3062 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
3063 * VPD capability. Note that this function must be called with a virtual
3064 * address.
3065 */
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)3066 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3067 {
3068 unsigned int base = adapter->params.pci.vpd_cap_addr;
3069 int ret;
3070 u32 stats_reg;
3071 int max_poll;
3072
3073 /*
3074 * VPD Accesses must alway be 4-byte aligned!
3075 */
3076 if (addr >= EEPROMVSIZE || (addr & 3))
3077 return -EINVAL;
3078
3079 /*
3080 * Wait for any previous operation which may still be in flight to
3081 * complete.
3082 */
3083 ret = t4_seeprom_wait(adapter);
3084 if (ret) {
3085 CH_ERR(adapter, "VPD still busy from previous operation\n");
3086 return ret;
3087 }
3088
3089 /*
3090 * Issue our new VPD Read request, mark the VPD as being busy and wait
3091 * for our request to complete. If it doesn't complete, note the
3092 * error and return it to our caller. Note that we do not reset the
3093 * VPD Busy status!
3094 */
3095 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3096 cpu_to_le32(data));
3097 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3098 (u16)addr | PCI_VPD_ADDR_F);
3099 adapter->vpd_busy = 1;
3100 adapter->vpd_flag = 0;
3101 ret = t4_seeprom_wait(adapter);
3102 if (ret) {
3103 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3104 return ret;
3105 }
3106
3107 /*
3108 * Reset PCI_VPD_DATA register after a transaction and wait for our
3109 * request to complete. If it doesn't complete, return error.
3110 */
3111 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3112 max_poll = EEPROM_MAX_POLL;
3113 do {
3114 udelay(EEPROM_DELAY);
3115 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3116 } while ((stats_reg & 0x1) && --max_poll);
3117 if (!max_poll)
3118 return -ETIMEDOUT;
3119
3120 /* Return success! */
3121 return 0;
3122 }
3123
3124 /**
3125 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
3126 * @phys_addr: the physical EEPROM address
3127 * @fn: the PCI function number
3128 * @sz: size of function-specific area
3129 *
3130 * Translate a physical EEPROM address to virtual. The first 1K is
3131 * accessed through virtual addresses starting at 31K, the rest is
3132 * accessed through virtual addresses starting at 0.
3133 *
3134 * The mapping is as follows:
3135 * [0..1K) -> [31K..32K)
3136 * [1K..1K+A) -> [ES-A..ES)
3137 * [1K+A..ES) -> [0..ES-A-1K)
3138 *
3139 * where A = @fn * @sz, and ES = EEPROM size.
3140 */
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)3141 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3142 {
3143 fn *= sz;
3144 if (phys_addr < 1024)
3145 return phys_addr + (31 << 10);
3146 if (phys_addr < 1024 + fn)
3147 return EEPROMSIZE - fn + phys_addr - 1024;
3148 if (phys_addr < EEPROMSIZE)
3149 return phys_addr - 1024 - fn;
3150 return -EINVAL;
3151 }
3152
3153 /**
3154 * t4_seeprom_wp - enable/disable EEPROM write protection
3155 * @adapter: the adapter
3156 * @enable: whether to enable or disable write protection
3157 *
3158 * Enables or disables write protection on the serial EEPROM.
3159 */
t4_seeprom_wp(struct adapter * adapter,int enable)3160 int t4_seeprom_wp(struct adapter *adapter, int enable)
3161 {
3162 return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3163 }
3164
3165 /**
3166 * get_vpd_keyword_val - Locates an information field keyword in the VPD
3167 * @v: Pointer to buffered vpd data structure
3168 * @kw: The keyword to search for
3169 *
3170 * Returns the value of the information field keyword or
3171 * -ENOENT otherwise.
3172 */
get_vpd_keyword_val(const struct t4_vpd_hdr * v,const char * kw)3173 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3174 {
3175 int i;
3176 unsigned int offset , len;
3177 const u8 *buf = (const u8 *)v;
3178 const u8 *vpdr_len = &v->vpdr_len[0];
3179 offset = sizeof(struct t4_vpd_hdr);
3180 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3181
3182 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3183 return -ENOENT;
3184 }
3185
3186 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3187 if(memcmp(buf + i , kw , 2) == 0){
3188 i += VPD_INFO_FLD_HDR_SIZE;
3189 return i;
3190 }
3191
3192 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3193 }
3194
3195 return -ENOENT;
3196 }
3197
3198 /*
3199 * str_strip
3200 * Removes trailing whitespaces from string "s"
3201 * Based on strstrip() implementation in string.c
3202 */
str_strip(char * s)3203 static void str_strip(char *s)
3204 {
3205 size_t size;
3206 char *end;
3207
3208 size = strlen(s);
3209 if (!size)
3210 return;
3211
3212 end = s + size - 1;
3213 while (end >= s && isspace(*end))
3214 end--;
3215 *(end + 1) = '\0';
3216 }
3217
3218 /**
3219 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3220 * @adapter: adapter to read
3221 * @p: where to store the parameters
3222 *
3223 * Reads card parameters stored in VPD EEPROM.
3224 */
t4_get_raw_vpd_params(struct adapter * adapter,struct vpd_params * p)3225 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3226 {
3227 int i, ret = 0, addr;
3228 int ec, sn, pn, na;
3229 u8 *vpd, csum;
3230 const struct t4_vpd_hdr *v;
3231
3232 vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3233 if (!vpd)
3234 return -ENOMEM;
3235
3236 /* We have two VPD data structures stored in the adapter VPD area.
3237 * By default, Linux calculates the size of the VPD area by traversing
3238 * the first VPD area at offset 0x0, so we need to tell the OS what
3239 * our real VPD size is.
3240 */
3241 ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3242 if (ret < 0)
3243 goto out;
3244
3245 /* Card information normally starts at VPD_BASE but early cards had
3246 * it at 0.
3247 */
3248 ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3249 if (ret)
3250 goto out;
3251
3252 /* The VPD shall have a unique identifier specified by the PCI SIG.
3253 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3254 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3255 * is expected to automatically put this entry at the
3256 * beginning of the VPD.
3257 */
3258 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3259
3260 for (i = 0; i < VPD_LEN; i += 4) {
3261 ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3262 if (ret)
3263 goto out;
3264 }
3265 v = (const struct t4_vpd_hdr *)vpd;
3266
3267 #define FIND_VPD_KW(var,name) do { \
3268 var = get_vpd_keyword_val(v , name); \
3269 if (var < 0) { \
3270 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3271 ret = -EINVAL; \
3272 goto out; \
3273 } \
3274 } while (0)
3275
3276 FIND_VPD_KW(i, "RV");
3277 for (csum = 0; i >= 0; i--)
3278 csum += vpd[i];
3279
3280 if (csum) {
3281 CH_ERR(adapter,
3282 "corrupted VPD EEPROM, actual csum %u\n", csum);
3283 ret = -EINVAL;
3284 goto out;
3285 }
3286
3287 FIND_VPD_KW(ec, "EC");
3288 FIND_VPD_KW(sn, "SN");
3289 FIND_VPD_KW(pn, "PN");
3290 FIND_VPD_KW(na, "NA");
3291 #undef FIND_VPD_KW
3292
3293 memcpy(p->id, v->id_data, ID_LEN);
3294 str_strip((char *)p->id);
3295 memcpy(p->ec, vpd + ec, EC_LEN);
3296 str_strip((char *)p->ec);
3297 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3298 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3299 str_strip((char *)p->sn);
3300 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3301 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3302 str_strip((char *)p->pn);
3303 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3304 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3305 str_strip((char *)p->na);
3306
3307 out:
3308 kmem_free(vpd, sizeof(u8) * VPD_LEN);
3309 return ret < 0 ? ret : 0;
3310 }
3311
3312 /**
3313 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3314 * @adapter: adapter to read
3315 * @p: where to store the parameters
3316 *
3317 * Reads card parameters stored in VPD EEPROM and retrieves the Core
3318 * Clock. This can only be called after a connection to the firmware
3319 * is established.
3320 */
t4_get_vpd_params(struct adapter * adapter,struct vpd_params * p)3321 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3322 {
3323 u32 cclk_param, cclk_val;
3324 int ret;
3325
3326 /*
3327 * Grab the raw VPD parameters.
3328 */
3329 ret = t4_get_raw_vpd_params(adapter, p);
3330 if (ret)
3331 return ret;
3332
3333 /*
3334 * Ask firmware for the Core Clock since it knows how to translate the
3335 * Reference Clock ('V2') VPD field into a Core Clock value ...
3336 */
3337 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3338 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3339 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3340 1, &cclk_param, &cclk_val);
3341
3342 if (ret)
3343 return ret;
3344 p->cclk = cclk_val;
3345
3346 return 0;
3347 }
3348
3349 /**
3350 * t4_get_pfres - retrieve VF resource limits
3351 * @adapter: the adapter
3352 *
3353 * Retrieves configured resource limits and capabilities for a physical
3354 * function. The results are stored in @adapter->pfres.
3355 */
t4_get_pfres(struct adapter * adapter)3356 int t4_get_pfres(struct adapter *adapter)
3357 {
3358 struct pf_resources *pfres = &adapter->params.pfres;
3359 struct fw_pfvf_cmd cmd, rpl;
3360 int v;
3361 u32 word;
3362
3363 /*
3364 * Execute PFVF Read command to get VF resource limits; bail out early
3365 * with error on command failure.
3366 */
3367 memset(&cmd, 0, sizeof(cmd));
3368 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
3369 F_FW_CMD_REQUEST |
3370 F_FW_CMD_READ |
3371 V_FW_PFVF_CMD_PFN(adapter->pf) |
3372 V_FW_PFVF_CMD_VFN(0));
3373 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3374 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
3375 if (v != FW_SUCCESS)
3376 return v;
3377
3378 /*
3379 * Extract PF resource limits and return success.
3380 */
3381 word = be32_to_cpu(rpl.niqflint_niq);
3382 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
3383
3384 word = be32_to_cpu(rpl.type_to_neq);
3385 pfres->neq = G_FW_PFVF_CMD_NEQ(word);
3386 pfres->pmask = G_FW_PFVF_CMD_PMASK(word);
3387
3388 word = be32_to_cpu(rpl.tc_to_nexactf);
3389 pfres->tc = G_FW_PFVF_CMD_TC(word);
3390 pfres->nvi = G_FW_PFVF_CMD_NVI(word);
3391 pfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
3392
3393 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
3394 pfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
3395 pfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
3396 pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
3397
3398 return 0;
3399 }
3400
3401 /* serial flash and firmware constants and flash config file constants */
3402 enum {
3403 SF_ATTEMPTS = 10, /* max retries for SF operations */
3404
3405 /* flash command opcodes */
3406 SF_PROG_PAGE = 2, /* program page */
3407 SF_WR_DISABLE = 4, /* disable writes */
3408 SF_RD_STATUS = 5, /* read status register */
3409 SF_WR_ENABLE = 6, /* enable writes */
3410 SF_RD_DATA_FAST = 0xb, /* read flash */
3411 SF_RD_ID = 0x9f, /* read ID */
3412 SF_ERASE_SECTOR = 0xd8, /* erase sector */
3413 };
3414
3415 /**
3416 * sf1_read - read data from the serial flash
3417 * @adapter: the adapter
3418 * @byte_cnt: number of bytes to read
3419 * @cont: whether another operation will be chained
3420 * @lock: whether to lock SF for PL access only
3421 * @valp: where to store the read data
3422 *
3423 * Reads up to 4 bytes of data from the serial flash. The location of
3424 * the read needs to be specified prior to calling this by issuing the
3425 * appropriate commands to the serial flash.
3426 */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)3427 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3428 int lock, u32 *valp)
3429 {
3430 int ret;
3431
3432 if (!byte_cnt || byte_cnt > 4)
3433 return -EINVAL;
3434 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3435 return -EBUSY;
3436 t4_write_reg(adapter, A_SF_OP,
3437 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3438 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3439 if (!ret)
3440 *valp = t4_read_reg(adapter, A_SF_DATA);
3441 return ret;
3442 }
3443
3444 /**
3445 * sf1_write - write data to the serial flash
3446 * @adapter: the adapter
3447 * @byte_cnt: number of bytes to write
3448 * @cont: whether another operation will be chained
3449 * @lock: whether to lock SF for PL access only
3450 * @val: value to write
3451 *
3452 * Writes up to 4 bytes of data to the serial flash. The location of
3453 * the write needs to be specified prior to calling this by issuing the
3454 * appropriate commands to the serial flash.
3455 */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)3456 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3457 int lock, u32 val)
3458 {
3459 if (!byte_cnt || byte_cnt > 4)
3460 return -EINVAL;
3461 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3462 return -EBUSY;
3463 t4_write_reg(adapter, A_SF_DATA, val);
3464 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3465 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3466 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3467 }
3468
3469 /**
3470 * flash_wait_op - wait for a flash operation to complete
3471 * @adapter: the adapter
3472 * @attempts: max number of polls of the status register
3473 * @delay: delay between polls in ms
3474 *
3475 * Wait for a flash operation to complete by polling the status register.
3476 */
flash_wait_op(struct adapter * adapter,int attempts,int ch_delay)3477 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3478 {
3479 int ret;
3480 u32 status;
3481
3482 while (1) {
3483 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3484 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3485 return ret;
3486 if (!(status & 1))
3487 return 0;
3488 if (--attempts == 0)
3489 return -EAGAIN;
3490 if (ch_delay) {
3491 #ifdef CONFIG_CUDBG
3492 if (adapter->flags & K_CRASH)
3493 mdelay(ch_delay);
3494 else
3495 #endif
3496 msleep(ch_delay);
3497 }
3498 }
3499 }
3500
3501 /**
3502 * t4_read_flash - read words from serial flash
3503 * @adapter: the adapter
3504 * @addr: the start address for the read
3505 * @nwords: how many 32-bit words to read
3506 * @data: where to store the read data
3507 * @byte_oriented: whether to store data as bytes or as words
3508 *
3509 * Read the specified number of 32-bit words from the serial flash.
3510 * If @byte_oriented is set the read data is stored as a byte array
3511 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3512 * natural endianness.
3513 */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)3514 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3515 unsigned int nwords, u32 *data, int byte_oriented)
3516 {
3517 int ret;
3518
3519 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3520 return -EINVAL;
3521
3522 addr = swab32(addr) | SF_RD_DATA_FAST;
3523
3524 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3525 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3526 return ret;
3527
3528 for ( ; nwords; nwords--, data++) {
3529 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3530 if (nwords == 1)
3531 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3532 if (ret)
3533 return ret;
3534 if (byte_oriented)
3535 *data = (__force __u32)(cpu_to_be32(*data));
3536 }
3537 return 0;
3538 }
3539
3540 /**
3541 * t4_write_flash - write up to a page of data to the serial flash
3542 * @adapter: the adapter
3543 * @addr: the start address to write
3544 * @n: length of data to write in bytes
3545 * @data: the data to write
3546 * @byte_oriented: whether to store data as bytes or as words
3547 *
3548 * Writes up to a page of data (256 bytes) to the serial flash starting
3549 * at the given address. All the data must be written to the same page.
3550 * If @byte_oriented is set the write data is stored as byte stream
3551 * (i.e. matches what on disk), otherwise in big-endian.
3552 */
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data,int byte_oriented)3553 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3554 unsigned int n, const u8 *data, int byte_oriented)
3555 {
3556 int ret;
3557 u32 buf[64];
3558 unsigned int i, c, left, val, offset = addr & 0xff;
3559
3560 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3561 return -EINVAL;
3562
3563 val = swab32(addr) | SF_PROG_PAGE;
3564
3565 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3566 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3567 goto unlock;
3568
3569 for (left = n; left; left -= c) {
3570 c = min(left, 4U);
3571 for (val = 0, i = 0; i < c; ++i)
3572 val = (val << 8) + *data++;
3573
3574 if (!byte_oriented)
3575 val = cpu_to_be32(val);
3576
3577 ret = sf1_write(adapter, c, c != left, 1, val);
3578 if (ret)
3579 goto unlock;
3580 }
3581 ret = flash_wait_op(adapter, 8, 1);
3582 if (ret)
3583 goto unlock;
3584
3585 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3586
3587 /* Read the page to verify the write succeeded */
3588 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3589 byte_oriented);
3590 if (ret)
3591 return ret;
3592
3593 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3594 CH_ERR(adapter,
3595 "failed to correctly write the flash page at %#x\n",
3596 addr);
3597 return -EIO;
3598 }
3599 return 0;
3600
3601 unlock:
3602 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3603 return ret;
3604 }
3605
3606 /**
3607 * t4_get_fw_version - read the firmware version
3608 * @adapter: the adapter
3609 * @vers: where to place the version
3610 *
3611 * Reads the FW version from flash.
3612 */
t4_get_fw_version(struct adapter * adapter,u32 * vers)3613 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3614 {
3615 return t4_read_flash(adapter, FLASH_FW_START +
3616 offsetof(struct fw_hdr, fw_ver), 1,
3617 vers, 0);
3618 }
3619
3620 /**
3621 * t4_get_bs_version - read the firmware bootstrap version
3622 * @adapter: the adapter
3623 * @vers: where to place the version
3624 *
3625 * Reads the FW Bootstrap version from flash.
3626 */
t4_get_bs_version(struct adapter * adapter,u32 * vers)3627 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3628 {
3629 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3630 offsetof(struct fw_hdr, fw_ver), 1,
3631 vers, 0);
3632 }
3633
3634 /**
3635 * t4_get_tp_version - read the TP microcode version
3636 * @adapter: the adapter
3637 * @vers: where to place the version
3638 *
3639 * Reads the TP microcode version from flash.
3640 */
t4_get_tp_version(struct adapter * adapter,u32 * vers)3641 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3642 {
3643 return t4_read_flash(adapter, FLASH_FW_START +
3644 offsetof(struct fw_hdr, tp_microcode_ver),
3645 1, vers, 0);
3646 }
3647
3648 /**
3649 * t4_get_exprom_version - return the Expansion ROM version (if any)
3650 * @adapter: the adapter
3651 * @vers: where to place the version
3652 *
3653 * Reads the Expansion ROM header from FLASH and returns the version
3654 * number (if present) through the @vers return value pointer. We return
3655 * this in the Firmware Version Format since it's convenient. Return
3656 * 0 on success, -ENOENT if no Expansion ROM is present.
3657 */
t4_get_exprom_version(struct adapter * adapter,u32 * vers)3658 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3659 {
3660 struct exprom_header {
3661 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3662 unsigned char hdr_ver[4]; /* Expansion ROM version */
3663 } *hdr;
3664 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3665 sizeof(u32))];
3666 int ret;
3667
3668 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3669 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3670 0);
3671 if (ret)
3672 return ret;
3673
3674 hdr = (struct exprom_header *)exprom_header_buf;
3675 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3676 return -ENOENT;
3677
3678 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3679 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3680 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3681 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3682 return 0;
3683 }
3684
3685 /**
3686 * t4_get_scfg_version - return the Serial Configuration version
3687 * @adapter: the adapter
3688 * @vers: where to place the version
3689 *
3690 * Reads the Serial Configuration Version via the Firmware interface
3691 * (thus this can only be called once we're ready to issue Firmware
3692 * commands). The format of the Serial Configuration version is
3693 * adapter specific. Returns 0 on success, an error on failure.
3694 *
3695 * Note that early versions of the Firmware didn't include the ability
3696 * to retrieve the Serial Configuration version, so we zero-out the
3697 * return-value parameter in that case to avoid leaving it with
3698 * garbage in it.
3699 *
3700 * Also note that the Firmware will return its cached copy of the Serial
3701 * Initialization Revision ID, not the actual Revision ID as written in
3702 * the Serial EEPROM. This is only an issue if a new VPD has been written
3703 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3704 * it's best to defer calling this routine till after a FW_RESET_CMD has
3705 * been issued if the Host Driver will be performing a full adapter
3706 * initialization.
3707 */
t4_get_scfg_version(struct adapter * adapter,u32 * vers)3708 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3709 {
3710 u32 scfgrev_param;
3711 int ret;
3712
3713 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3714 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3715 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3716 1, &scfgrev_param, vers);
3717 if (ret)
3718 *vers = 0;
3719 return ret;
3720 }
3721
3722 /**
3723 * t4_get_vpd_version - return the VPD version
3724 * @adapter: the adapter
3725 * @vers: where to place the version
3726 *
3727 * Reads the VPD via the Firmware interface (thus this can only be called
3728 * once we're ready to issue Firmware commands). The format of the
3729 * VPD version is adapter specific. Returns 0 on success, an error on
3730 * failure.
3731 *
3732 * Note that early versions of the Firmware didn't include the ability
3733 * to retrieve the VPD version, so we zero-out the return-value parameter
3734 * in that case to avoid leaving it with garbage in it.
3735 *
3736 * Also note that the Firmware will return its cached copy of the VPD
3737 * Revision ID, not the actual Revision ID as written in the Serial
3738 * EEPROM. This is only an issue if a new VPD has been written and the
3739 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3740 * to defer calling this routine till after a FW_RESET_CMD has been issued
3741 * if the Host Driver will be performing a full adapter initialization.
3742 */
t4_get_vpd_version(struct adapter * adapter,u32 * vers)3743 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3744 {
3745 u32 vpdrev_param;
3746 int ret;
3747
3748 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3749 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3750 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3751 1, &vpdrev_param, vers);
3752 if (ret)
3753 *vers = 0;
3754 return ret;
3755 }
3756
3757 /**
3758 * t4_get_version_info - extract various chip/firmware version information
3759 * @adapter: the adapter
3760 *
3761 * Reads various chip/firmware version numbers and stores them into the
3762 * adapter Adapter Parameters structure. If any of the efforts fails
3763 * the first failure will be returned, but all of the version numbers
3764 * will be read.
3765 */
t4_get_version_info(struct adapter * adapter)3766 int t4_get_version_info(struct adapter *adapter)
3767 {
3768 int ret = 0;
3769
3770 #define FIRST_RET(__getvinfo) \
3771 do { \
3772 int __ret = __getvinfo; \
3773 if (__ret && !ret) \
3774 ret = __ret; \
3775 } while (0)
3776
3777 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3778 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3779 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3780 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3781 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3782 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3783
3784 #undef FIRST_RET
3785
3786 return ret;
3787 }
3788
3789 /**
3790 * t4_dump_version_info - dump all of the adapter configuration IDs
3791 * @adapter: the adapter
3792 *
3793 * Dumps all of the various bits of adapter configuration version/revision
3794 * IDs information. This is typically called at some point after
3795 * t4_get_version_info() has been called.
3796 */
t4_dump_version_info(struct adapter * adapter)3797 void t4_dump_version_info(struct adapter *adapter)
3798 {
3799 /*
3800 * Device information.
3801 */
3802 CH_INFO(adapter, "Chelsio %s rev %d\n",
3803 adapter->params.vpd.id,
3804 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3805 CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3806 adapter->params.vpd.sn,
3807 adapter->params.vpd.pn);
3808
3809 /*
3810 * Firmware Version.
3811 */
3812 if (!adapter->params.fw_vers)
3813 CH_WARN(adapter, "No firmware loaded\n");
3814 else
3815 CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3816 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3817 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3818 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3819 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3820
3821 /*
3822 * Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3823 * Firmware, so dev_info() is more appropriate here.)
3824 */
3825 if (!adapter->params.bs_vers)
3826 CH_INFO(adapter, "No bootstrap loaded\n");
3827 else
3828 CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3829 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3830 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3831 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3832 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3833
3834 /*
3835 * TP Microcode Version.
3836 */
3837 if (!adapter->params.tp_vers)
3838 CH_WARN(adapter, "No TP Microcode loaded\n");
3839 else
3840 CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3841 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3842 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3843 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3844 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3845
3846 /*
3847 * Expansion ROM version.
3848 */
3849 if (!adapter->params.er_vers)
3850 CH_INFO(adapter, "No Expansion ROM loaded\n");
3851 else
3852 CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3853 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3854 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3855 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3856 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3857
3858
3859 /*
3860 * Serial Configuration version.
3861 */
3862 CH_INFO(adapter, "Serial Configuration version: %x\n",
3863 adapter->params.scfg_vers);
3864
3865 /*
3866 * VPD version.
3867 */
3868 CH_INFO(adapter, "VPD version: %x\n",
3869 adapter->params.vpd_vers);
3870 }
3871
3872 /**
3873 * t4_check_fw_version - check if the FW is supported with this driver
3874 * @adap: the adapter
3875 *
3876 * Checks if an adapter's FW is compatible with the driver. Returns 0
3877 * if there's exact match, a negative error if the version could not be
3878 * read or there's a major version mismatch
3879 */
t4_check_fw_version(struct adapter * adap)3880 int t4_check_fw_version(struct adapter *adap)
3881 {
3882 int ret, major, minor, micro;
3883 int exp_major, exp_minor, exp_micro;
3884 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3885
3886 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3887 if (ret)
3888 return ret;
3889
3890 major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3891 minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3892 micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3893
3894 switch (chip_version) {
3895 case CHELSIO_T4:
3896 exp_major = T4FW_MIN_VERSION_MAJOR;
3897 exp_minor = T4FW_MIN_VERSION_MINOR;
3898 exp_micro = T4FW_MIN_VERSION_MICRO;
3899 break;
3900 case CHELSIO_T5:
3901 exp_major = T5FW_MIN_VERSION_MAJOR;
3902 exp_minor = T5FW_MIN_VERSION_MINOR;
3903 exp_micro = T5FW_MIN_VERSION_MICRO;
3904 break;
3905 case CHELSIO_T6:
3906 exp_major = T6FW_MIN_VERSION_MAJOR;
3907 exp_minor = T6FW_MIN_VERSION_MINOR;
3908 exp_micro = T6FW_MIN_VERSION_MICRO;
3909 break;
3910 default:
3911 CH_ERR(adap, "Unsupported chip type, %x\n",
3912 adap->params.chip);
3913 return -EINVAL;
3914 }
3915
3916 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3917 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3918 CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
3919 "supported firmware is %u.%u.%u.\n", major, minor,
3920 micro, exp_major, exp_minor, exp_micro);
3921 return -EFAULT;
3922 }
3923 return 0;
3924 }
3925
3926 /* Is the given firmware API compatible with the one the driver was compiled
3927 * with?
3928 */
fw_compatible(const struct fw_hdr * hdr1,const struct fw_hdr * hdr2)3929 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3930 {
3931
3932 /* short circuit if it's the exact same firmware version */
3933 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3934 return 1;
3935
3936 /*
3937 * XXX: Is this too conservative? Perhaps I should limit this to the
3938 * features that are supported in the driver.
3939 */
3940 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3941 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3942 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3943 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3944 return 1;
3945 #undef SAME_INTF
3946
3947 return 0;
3948 }
3949
3950 /* The firmware in the filesystem is usable, but should it be installed?
3951 * This routine explains itself in detail if it indicates the filesystem
3952 * firmware should be installed.
3953 */
should_install_fs_fw(struct adapter * adap,int card_fw_usable,int k,int c,int t4_fw_install)3954 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3955 int k, int c, int t4_fw_install)
3956 {
3957 const char *reason;
3958
3959 if (!card_fw_usable) {
3960 reason = "incompatible or unusable";
3961 goto install;
3962 }
3963
3964 if (k > c) {
3965 reason = "older than the version bundled with this driver";
3966 goto install;
3967 }
3968
3969 if (t4_fw_install == 2 && k != c) {
3970 reason = "different than the version bundled with this driver";
3971 goto install;
3972 }
3973
3974 return 0;
3975
3976 install:
3977 if (t4_fw_install == 0) {
3978 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3979 "but the driver is prohibited from installing a "
3980 "different firmware on the card.\n",
3981 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3982 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3983 reason);
3984
3985 return (0);
3986 }
3987
3988 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3989 "installing firmware %u.%u.%u.%u on card.\n",
3990 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3991 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3992 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3993 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3994
3995 return 1;
3996 }
3997
t4_prep_fw(struct adapter * adap,struct fw_info * fw_info,const u8 * fw_data,unsigned int fw_size,struct fw_hdr * card_fw,const int t4_fw_install,enum dev_state state,int * reset)3998 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3999 const u8 *fw_data, unsigned int fw_size,
4000 struct fw_hdr *card_fw, const int t4_fw_install,
4001 enum dev_state state, int *reset)
4002 {
4003 int ret, card_fw_usable, fs_fw_usable;
4004 const struct fw_hdr *fs_fw;
4005 const struct fw_hdr *drv_fw;
4006
4007 drv_fw = &fw_info->fw_hdr;
4008
4009 /* Read the header of the firmware on the card */
4010 ret = -t4_read_flash(adap, FLASH_FW_START,
4011 sizeof(*card_fw) / sizeof(uint32_t),
4012 (uint32_t *)card_fw, 1);
4013 if (ret == 0) {
4014 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
4015 } else {
4016 CH_ERR(adap,
4017 "Unable to read card's firmware header: %d\n", ret);
4018 card_fw_usable = 0;
4019 }
4020
4021 if (fw_data != NULL) {
4022 fs_fw = (const void *)fw_data;
4023 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
4024 } else {
4025 fs_fw = NULL;
4026 fs_fw_usable = 0;
4027 }
4028
4029 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
4030 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
4031 /* Common case: the firmware on the card is an exact match and
4032 * the filesystem one is an exact match too, or the filesystem
4033 * one is absent/incompatible. Note that t4_fw_install = 2
4034 * is ignored here -- use cxgbtool loadfw if you want to
4035 * reinstall the same firmware as the one on the card.
4036 */
4037 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
4038 should_install_fs_fw(adap, card_fw_usable,
4039 be32_to_cpu(fs_fw->fw_ver),
4040 be32_to_cpu(card_fw->fw_ver),
4041 t4_fw_install)) {
4042
4043 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
4044 fw_size, 0);
4045 if (ret != 0) {
4046 CH_ERR(adap,
4047 "failed to install firmware: %d\n", ret);
4048 goto bye;
4049 }
4050
4051 /* Installed successfully, update cached information */
4052 memcpy(card_fw, fs_fw, sizeof(*card_fw));
4053 (void)t4_init_devlog_params(adap, 1);
4054 card_fw_usable = 1;
4055 *reset = 0; /* already reset as part of load_fw */
4056 }
4057
4058 if (!card_fw_usable) {
4059 uint32_t d, c, k;
4060
4061 d = be32_to_cpu(drv_fw->fw_ver);
4062 c = be32_to_cpu(card_fw->fw_ver);
4063 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4064
4065 CH_ERR(adap, "Cannot find a usable firmware: "
4066 "fw_install %d, chip state %d, "
4067 "driver compiled with %d.%d.%d.%d, "
4068 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4069 t4_fw_install, state,
4070 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4071 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4072 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4073 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4074 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4075 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4076 ret = EINVAL;
4077 goto bye;
4078 }
4079
4080 /* We're using whatever's on the card and it's known to be good. */
4081 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4082 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4083
4084 bye:
4085 return ret;
4086
4087 }
4088
4089 /**
4090 * t4_flash_erase_sectors - erase a range of flash sectors
4091 * @adapter: the adapter
4092 * @start: the first sector to erase
4093 * @end: the last sector to erase
4094 *
4095 * Erases the sectors in the given inclusive range.
4096 */
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)4097 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4098 {
4099 int ret = 0;
4100
4101 if (end >= adapter->params.sf_nsec)
4102 return -EINVAL;
4103
4104 while (start <= end) {
4105 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4106 (ret = sf1_write(adapter, 4, 0, 1,
4107 SF_ERASE_SECTOR | (start << 8))) != 0 ||
4108 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4109 CH_ERR(adapter,
4110 "erase of flash sector %d failed, error %d\n",
4111 start, ret);
4112 break;
4113 }
4114 start++;
4115 }
4116 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4117 return ret;
4118 }
4119
4120 /**
4121 * t4_flash_cfg_addr - return the address of the flash configuration file
4122 * @adapter: the adapter
4123 *
4124 * Return the address within the flash where the Firmware Configuration
4125 * File is stored, or an error if the device FLASH is too small to contain
4126 * a Firmware Configuration File.
4127 */
t4_flash_cfg_addr(struct adapter * adapter)4128 int t4_flash_cfg_addr(struct adapter *adapter)
4129 {
4130 /*
4131 * If the device FLASH isn't large enough to hold a Firmware
4132 * Configuration File, return an error.
4133 */
4134 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4135 return -ENOSPC;
4136
4137 return FLASH_CFG_START;
4138 }
4139
4140 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
4141 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
4142 * and emit an error message for mismatched firmware to save our caller the
4143 * effort ...
4144 */
t4_fw_matches_chip(const struct adapter * adap,const struct fw_hdr * hdr)4145 static int t4_fw_matches_chip(const struct adapter *adap,
4146 const struct fw_hdr *hdr)
4147 {
4148 /*
4149 * The expression below will return FALSE for any unsupported adapter
4150 * which will keep us "honest" in the future ...
4151 */
4152 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4153 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4154 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4155 return 1;
4156
4157 CH_ERR(adap,
4158 "FW image (%d) is not suitable for this adapter (%d)\n",
4159 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4160 return 0;
4161 }
4162
4163 /**
4164 * t4_load_fw - download firmware
4165 * @adap: the adapter
4166 * @fw_data: the firmware image to write
4167 * @size: image size
4168 * @bootstrap: indicates if the binary is a bootstrap fw
4169 *
4170 * Write the supplied firmware image to the card's serial flash.
4171 */
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size,unsigned int bootstrap)4172 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4173 unsigned int bootstrap)
4174 {
4175 u32 csum;
4176 int ret, addr;
4177 unsigned int i;
4178 u8 first_page[SF_PAGE_SIZE];
4179 const __be32 *p = (const __be32 *)fw_data;
4180 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4181 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4182 unsigned int fw_start_sec;
4183 unsigned int fw_start;
4184 unsigned int fw_size;
4185
4186 if (bootstrap) {
4187 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4188 fw_start = FLASH_FWBOOTSTRAP_START;
4189 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4190 } else {
4191 fw_start_sec = FLASH_FW_START_SEC;
4192 fw_start = FLASH_FW_START;
4193 fw_size = FLASH_FW_MAX_SIZE;
4194 }
4195
4196 if (!size) {
4197 CH_ERR(adap, "FW image has no data\n");
4198 return -EINVAL;
4199 }
4200 if (size & 511) {
4201 CH_ERR(adap,
4202 "FW image size not multiple of 512 bytes\n");
4203 return -EINVAL;
4204 }
4205 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4206 CH_ERR(adap,
4207 "FW image size differs from size in FW header\n");
4208 return -EINVAL;
4209 }
4210 if (size > fw_size) {
4211 CH_ERR(adap, "FW image too large, max is %u bytes\n",
4212 fw_size);
4213 return -EFBIG;
4214 }
4215 if (!t4_fw_matches_chip(adap, hdr))
4216 return -EINVAL;
4217
4218 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4219 csum += be32_to_cpu(p[i]);
4220
4221 if (csum != 0xffffffff) {
4222 CH_ERR(adap,
4223 "corrupted firmware image, checksum %#x\n", csum);
4224 return -EINVAL;
4225 }
4226
4227 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
4228 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4229 if (ret)
4230 goto out;
4231
4232 /*
4233 * We write the correct version at the end so the driver can see a bad
4234 * version if the FW write fails. Start by writing a copy of the
4235 * first page with a bad version.
4236 */
4237 memcpy(first_page, fw_data, SF_PAGE_SIZE);
4238 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4239 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4240 if (ret)
4241 goto out;
4242
4243 addr = fw_start;
4244 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4245 addr += SF_PAGE_SIZE;
4246 fw_data += SF_PAGE_SIZE;
4247 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4248 if (ret)
4249 goto out;
4250 }
4251
4252 ret = t4_write_flash(adap,
4253 fw_start + offsetof(struct fw_hdr, fw_ver),
4254 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4255 out:
4256 if (ret)
4257 CH_ERR(adap, "firmware download failed, error %d\n",
4258 ret);
4259 else {
4260 if (bootstrap)
4261 ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4262 else
4263 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4264 }
4265 return ret;
4266 }
4267
4268 /**
4269 * t4_phy_fw_ver - return current PHY firmware version
4270 * @adap: the adapter
4271 * @phy_fw_ver: return value buffer for PHY firmware version
4272 *
4273 * Returns the current version of external PHY firmware on the
4274 * adapter.
4275 */
t4_phy_fw_ver(struct adapter * adap,int * phy_fw_ver)4276 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4277 {
4278 u32 param, val;
4279 int ret;
4280
4281 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4282 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4283 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4284 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4285 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4286 ¶m, &val);
4287 if (ret < 0)
4288 return ret;
4289 *phy_fw_ver = val;
4290 return 0;
4291 }
4292
4293 /**
4294 * t4_load_phy_fw - download port PHY firmware
4295 * @adap: the adapter
4296 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
4297 * @lock: the lock to use to guard the memory copy
4298 * @phy_fw_version: function to check PHY firmware versions
4299 * @phy_fw_data: the PHY firmware image to write
4300 * @phy_fw_size: image size
4301 *
4302 * Transfer the specified PHY firmware to the adapter. If a non-NULL
4303 * @phy_fw_version is supplied, then it will be used to determine if
4304 * it's necessary to perform the transfer by comparing the version
4305 * of any existing adapter PHY firmware with that of the passed in
4306 * PHY firmware image. If @lock is non-NULL then it will be used
4307 * around the call to t4_memory_rw() which transfers the PHY firmware
4308 * to the adapter.
4309 *
4310 * A negative error number will be returned if an error occurs. If
4311 * version number support is available and there's no need to upgrade
4312 * the firmware, 0 will be returned. If firmware is successfully
4313 * transferred to the adapter, 1 will be retured.
4314 *
4315 * NOTE: some adapters only have local RAM to store the PHY firmware. As
4316 * a result, a RESET of the adapter would cause that RAM to lose its
4317 * contents. Thus, loading PHY firmware on such adapters must happen after any
4318 * FW_RESET_CMDs ...
4319 */
t4_load_phy_fw(struct adapter * adap,int win,t4_os_lock_t * lock,int (* phy_fw_version)(const u8 *,size_t),const u8 * phy_fw_data,size_t phy_fw_size)4320 int t4_load_phy_fw(struct adapter *adap,
4321 int win, t4_os_lock_t *lock,
4322 int (*phy_fw_version)(const u8 *, size_t),
4323 const u8 *phy_fw_data, size_t phy_fw_size)
4324 {
4325 unsigned long mtype = 0, maddr = 0;
4326 u32 param, val;
4327 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4328 int ret;
4329
4330 /*
4331 * If we have version number support, then check to see if the adapter
4332 * already has up-to-date PHY firmware loaded.
4333 */
4334 if (phy_fw_version) {
4335 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4336 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4337 if (ret < 0)
4338 return ret;;
4339
4340 if (cur_phy_fw_ver >= new_phy_fw_vers) {
4341 CH_WARN(adap, "PHY Firmware already up-to-date, "
4342 "version %#x\n", cur_phy_fw_ver);
4343 return 0;
4344 }
4345 }
4346
4347 /*
4348 * Ask the firmware where it wants us to copy the PHY firmware image.
4349 * The size of the file requires a special version of the READ coommand
4350 * which will pass the file size via the values field in PARAMS_CMD and
4351 * retreive the return value from firmware and place it in the same
4352 * buffer values
4353 */
4354 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4355 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4356 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4357 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4358 val = phy_fw_size;
4359 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4360 ¶m, &val, 1, true);
4361 if (ret < 0)
4362 return ret;
4363 mtype = val >> 8;
4364 maddr = (val & 0xff) << 16;
4365
4366 /*
4367 * Copy the supplied PHY Firmware image to the adapter memory location
4368 * allocated by the adapter firmware.
4369 */
4370 if (lock)
4371 t4_os_lock(lock);
4372 ret = t4_memory_rw(adap, win, mtype, maddr,
4373 phy_fw_size, (__be32*)phy_fw_data,
4374 T4_MEMORY_WRITE);
4375 if (lock)
4376 t4_os_unlock(lock);
4377 if (ret)
4378 return ret;
4379
4380 /*
4381 * Tell the firmware that the PHY firmware image has been written to
4382 * RAM and it can now start copying it over to the PHYs. The chip
4383 * firmware will RESET the affected PHYs as part of this operation
4384 * leaving them running the new PHY firmware image.
4385 */
4386 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4387 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4388 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4389 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4390 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4391 ¶m, &val, 30000);
4392
4393 /*
4394 * If we have version number support, then check to see that the new
4395 * firmware got loaded properly.
4396 */
4397 if (phy_fw_version) {
4398 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4399 if (ret < 0)
4400 return ret;
4401
4402 if (cur_phy_fw_ver != new_phy_fw_vers) {
4403 CH_WARN(adap, "PHY Firmware did not update: "
4404 "version on adapter %#x, "
4405 "version flashed %#x\n",
4406 cur_phy_fw_ver, new_phy_fw_vers);
4407 return -ENXIO;
4408 }
4409 }
4410
4411 return 1;
4412 }
4413
4414 /**
4415 * t4_fwcache - firmware cache operation
4416 * @adap: the adapter
4417 * @op : the operation (flush or flush and invalidate)
4418 */
t4_fwcache(struct adapter * adap,enum fw_params_param_dev_fwcache op)4419 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4420 {
4421 struct fw_params_cmd c;
4422
4423 memset(&c, 0, sizeof(c));
4424 c.op_to_vfn =
4425 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4426 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4427 V_FW_PARAMS_CMD_PFN(adap->pf) |
4428 V_FW_PARAMS_CMD_VFN(0));
4429 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4430 c.param[0].mnem =
4431 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4432 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4433 c.param[0].val = (__force __be32)op;
4434
4435 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4436 }
4437
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)4438 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4439 unsigned int *pif_req_wrptr,
4440 unsigned int *pif_rsp_wrptr)
4441 {
4442 int i, j;
4443 u32 cfg, val, req, rsp;
4444
4445 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4446 if (cfg & F_LADBGEN)
4447 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4448
4449 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4450 req = G_POLADBGWRPTR(val);
4451 rsp = G_PILADBGWRPTR(val);
4452 if (pif_req_wrptr)
4453 *pif_req_wrptr = req;
4454 if (pif_rsp_wrptr)
4455 *pif_rsp_wrptr = rsp;
4456
4457 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4458 for (j = 0; j < 6; j++) {
4459 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4460 V_PILADBGRDPTR(rsp));
4461 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4462 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4463 req++;
4464 rsp++;
4465 }
4466 req = (req + 2) & M_POLADBGRDPTR;
4467 rsp = (rsp + 2) & M_PILADBGRDPTR;
4468 }
4469 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4470 }
4471
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)4472 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4473 {
4474 u32 cfg;
4475 int i, j, idx;
4476
4477 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4478 if (cfg & F_LADBGEN)
4479 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4480
4481 for (i = 0; i < CIM_MALA_SIZE; i++) {
4482 for (j = 0; j < 5; j++) {
4483 idx = 8 * i + j;
4484 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4485 V_PILADBGRDPTR(idx));
4486 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4487 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4488 }
4489 }
4490 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4491 }
4492
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)4493 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4494 {
4495 unsigned int i, j;
4496
4497 for (i = 0; i < 8; i++) {
4498 u32 *p = la_buf + i;
4499
4500 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4501 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4502 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4503 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4504 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4505 }
4506 }
4507
4508 typedef void (*int_handler_t)(struct adapter *adap);
4509
4510 struct intr_info {
4511 unsigned int mask; /* bits to check in interrupt status */
4512 const char *msg; /* message to print or NULL */
4513 short stat_idx; /* stat counter to increment or -1 */
4514 unsigned short fatal; /* whether the condition reported is fatal */
4515 int_handler_t int_handler; /* platform-specific int handler */
4516 };
4517
4518 /**
4519 * t4_handle_intr_status - table driven interrupt handler
4520 * @adapter: the adapter that generated the interrupt
4521 * @reg: the interrupt status register to process
4522 * @acts: table of interrupt actions
4523 *
4524 * A table driven interrupt handler that applies a set of masks to an
4525 * interrupt status word and performs the corresponding actions if the
4526 * interrupts described by the mask have occurred. The actions include
4527 * optionally emitting a warning or alert message. The table is terminated
4528 * by an entry specifying mask 0. Returns the number of fatal interrupt
4529 * conditions.
4530 */
t4_handle_intr_status(struct adapter * adapter,unsigned int reg,const struct intr_info * acts)4531 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4532 const struct intr_info *acts)
4533 {
4534 int fatal = 0;
4535 unsigned int mask = 0;
4536 unsigned int status = t4_read_reg(adapter, reg);
4537
4538 for ( ; acts->mask; ++acts) {
4539 if (!(status & acts->mask))
4540 continue;
4541 if (acts->fatal) {
4542 fatal++;
4543 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4544 status & acts->mask);
4545 } else if (acts->msg)
4546 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4547 status & acts->mask);
4548 if (acts->int_handler)
4549 acts->int_handler(adapter);
4550 mask |= acts->mask;
4551 }
4552 status &= mask;
4553 if (status) /* clear processed interrupts */
4554 t4_write_reg(adapter, reg, status);
4555 return fatal;
4556 }
4557
4558 /*
4559 * Interrupt handler for the PCIE module.
4560 */
pcie_intr_handler(struct adapter * adapter)4561 static void pcie_intr_handler(struct adapter *adapter)
4562 {
4563 static const struct intr_info sysbus_intr_info[] = {
4564 { F_RNPP, "RXNP array parity error", -1, 1 },
4565 { F_RPCP, "RXPC array parity error", -1, 1 },
4566 { F_RCIP, "RXCIF array parity error", -1, 1 },
4567 { F_RCCP, "Rx completions control array parity error", -1, 1 },
4568 { F_RFTP, "RXFT array parity error", -1, 1 },
4569 { 0 }
4570 };
4571 static const struct intr_info pcie_port_intr_info[] = {
4572 { F_TPCP, "TXPC array parity error", -1, 1 },
4573 { F_TNPP, "TXNP array parity error", -1, 1 },
4574 { F_TFTP, "TXFT array parity error", -1, 1 },
4575 { F_TCAP, "TXCA array parity error", -1, 1 },
4576 { F_TCIP, "TXCIF array parity error", -1, 1 },
4577 { F_RCAP, "RXCA array parity error", -1, 1 },
4578 { F_OTDD, "outbound request TLP discarded", -1, 1 },
4579 { F_RDPE, "Rx data parity error", -1, 1 },
4580 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
4581 { 0 }
4582 };
4583 static const struct intr_info pcie_intr_info[] = {
4584 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4585 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4586 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4587 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4588 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4589 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4590 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4591 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4592 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4593 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4594 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4595 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4596 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4597 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4598 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4599 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4600 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4601 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4602 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4603 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4604 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4605 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4606 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4607 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4608 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4609 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4610 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4611 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
4612 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
4613 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4614 0 },
4615 { 0 }
4616 };
4617
4618 static struct intr_info t5_pcie_intr_info[] = {
4619 { F_MSTGRPPERR, "Master Response Read Queue parity error",
4620 -1, 1 },
4621 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4622 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4623 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4624 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4625 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4626 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4627 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4628 -1, 1 },
4629 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4630 -1, 1 },
4631 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4632 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4633 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4634 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4635 { F_DREQWRPERR, "PCI DMA channel write request parity error",
4636 -1, 1 },
4637 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4638 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4639 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4640 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4641 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4642 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4643 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4644 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4645 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4646 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4647 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4648 -1, 1 },
4649 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4650 -1, 1 },
4651 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4652 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4653 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4654 { F_READRSPERR, "Outbound read error", -1,
4655 0 },
4656 { 0 }
4657 };
4658
4659 int fat;
4660
4661 if (is_t4(adapter->params.chip))
4662 fat = t4_handle_intr_status(adapter,
4663 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4664 sysbus_intr_info) +
4665 t4_handle_intr_status(adapter,
4666 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4667 pcie_port_intr_info) +
4668 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4669 pcie_intr_info);
4670 else
4671 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4672 t5_pcie_intr_info);
4673 if (fat)
4674 t4_fatal_err(adapter);
4675 }
4676
4677 /*
4678 * TP interrupt handler.
4679 */
tp_intr_handler(struct adapter * adapter)4680 static void tp_intr_handler(struct adapter *adapter)
4681 {
4682 static const struct intr_info tp_intr_info[] = {
4683 { 0x3fffffff, "TP parity error", -1, 1 },
4684 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4685 { 0 }
4686 };
4687
4688 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4689 t4_fatal_err(adapter);
4690 }
4691
4692 /*
4693 * SGE interrupt handler.
4694 */
sge_intr_handler(struct adapter * adapter)4695 static void sge_intr_handler(struct adapter *adapter)
4696 {
4697 u32 v = 0, perr;
4698 u32 err;
4699
4700 static const struct intr_info sge_intr_info[] = {
4701 { F_ERR_CPL_EXCEED_IQE_SIZE,
4702 "SGE received CPL exceeding IQE size", -1, 1 },
4703 { F_ERR_INVALID_CIDX_INC,
4704 "SGE GTS CIDX increment too large", -1, 0 },
4705 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4706 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4707 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4708 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4709 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4710 0 },
4711 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4712 0 },
4713 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4714 0 },
4715 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4716 0 },
4717 { F_ERR_ING_CTXT_PRIO,
4718 "SGE too many priority ingress contexts", -1, 0 },
4719 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4720 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4721 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4722 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4723 "SGE PCIe error for a DBP thread", -1, 0 },
4724 { 0 }
4725 };
4726
4727 static struct intr_info t4t5_sge_intr_info[] = {
4728 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4729 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4730 { F_ERR_EGR_CTXT_PRIO,
4731 "SGE too many priority egress contexts", -1, 0 },
4732 { 0 }
4733 };
4734
4735 /*
4736 * For now, treat below interrupts as fatal so that we disable SGE and
4737 * get better debug */
4738 static struct intr_info t6_sge_intr_info[] = {
4739 { F_FATAL_WRE_LEN,
4740 "SGE Actual WRE packet is less than advertized length",
4741 -1, 1 },
4742 { 0 }
4743 };
4744
4745 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE1);
4746 if (perr) {
4747 v |= perr;
4748 CH_ALERT(adapter, "SGE Cause1 Parity Error %#x\n", perr);
4749 }
4750 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE2);
4751 if (perr) {
4752 v |= perr;
4753 CH_ALERT(adapter, "SGE Cause2 Parity Error %#x\n", perr);
4754 }
4755 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4756 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE5);
4757 if (perr) {
4758 v |= perr;
4759 CH_ALERT(adapter, "SGE Cause5 Parity Error %#x\n", perr);
4760 }
4761 }
4762
4763 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4764 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4765 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4766 t4t5_sge_intr_info);
4767 else
4768 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4769 t6_sge_intr_info);
4770
4771 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4772 if (err & F_ERROR_QID_VALID) {
4773 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4774 if (err & F_UNCAPTURED_ERROR)
4775 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4776 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4777 F_UNCAPTURED_ERROR);
4778 }
4779
4780 if (v != 0)
4781 t4_fatal_err(adapter);
4782 }
4783
4784 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4785 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4786 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4787 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4788
4789 /*
4790 * CIM interrupt handler.
4791 */
cim_intr_handler(struct adapter * adapter)4792 static void cim_intr_handler(struct adapter *adapter)
4793 {
4794 static const struct intr_info cim_intr_info[] = {
4795 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4796 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4797 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4798 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4799 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4800 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4801 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4802 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4803 { 0 }
4804 };
4805 static const struct intr_info cim_upintr_info[] = {
4806 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4807 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4808 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4809 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4810 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4811 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4812 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4813 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4814 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4815 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4816 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4817 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4818 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4819 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4820 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4821 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4822 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4823 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4824 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4825 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4826 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4827 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4828 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4829 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4830 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4831 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4832 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4833 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4834 { 0 }
4835 };
4836 u32 val, fw_err;
4837 int fat;
4838
4839 fw_err = t4_read_reg(adapter, A_PCIE_FW);
4840 if (fw_err & F_PCIE_FW_ERR)
4841 t4_report_fw_error(adapter);
4842
4843 /* When the Firmware detects an internal error which normally wouldn't
4844 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4845 * to make sure the Host sees the Firmware Crash. So if we have a
4846 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4847 * interrupt.
4848 */
4849 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4850 if (val & F_TIMER0INT)
4851 if (!(fw_err & F_PCIE_FW_ERR) ||
4852 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4853 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4854 F_TIMER0INT);
4855
4856 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4857 cim_intr_info) +
4858 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4859 cim_upintr_info);
4860 if (fat)
4861 t4_fatal_err(adapter);
4862 }
4863
4864 /*
4865 * ULP RX interrupt handler.
4866 */
ulprx_intr_handler(struct adapter * adapter)4867 static void ulprx_intr_handler(struct adapter *adapter)
4868 {
4869 static const struct intr_info ulprx_intr_info[] = {
4870 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4871 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4872 { 0x7fffff, "ULPRX parity error", -1, 1 },
4873 { 0 }
4874 };
4875
4876 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4877 t4_fatal_err(adapter);
4878 }
4879
4880 /*
4881 * ULP TX interrupt handler.
4882 */
ulptx_intr_handler(struct adapter * adapter)4883 static void ulptx_intr_handler(struct adapter *adapter)
4884 {
4885 static const struct intr_info ulptx_intr_info[] = {
4886 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4887 0 },
4888 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4889 0 },
4890 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4891 0 },
4892 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4893 0 },
4894 { 0xfffffff, "ULPTX parity error", -1, 1 },
4895 { 0 }
4896 };
4897
4898 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4899 t4_fatal_err(adapter);
4900 }
4901
4902 /*
4903 * PM TX interrupt handler.
4904 */
pmtx_intr_handler(struct adapter * adapter)4905 static void pmtx_intr_handler(struct adapter *adapter)
4906 {
4907 static const struct intr_info pmtx_intr_info[] = {
4908 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4909 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4910 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4911 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4912 { 0xffffff0, "PMTX framing error", -1, 1 },
4913 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4914 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4915 1 },
4916 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4917 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4918 { 0 }
4919 };
4920
4921 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4922 t4_fatal_err(adapter);
4923 }
4924
4925 /*
4926 * PM RX interrupt handler.
4927 */
pmrx_intr_handler(struct adapter * adapter)4928 static void pmrx_intr_handler(struct adapter *adapter)
4929 {
4930 static const struct intr_info pmrx_intr_info[] = {
4931 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4932 { 0x3ffff0, "PMRX framing error", -1, 1 },
4933 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4934 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4935 1 },
4936 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4937 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4938 { 0 }
4939 };
4940
4941 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4942 t4_fatal_err(adapter);
4943 }
4944
4945 /*
4946 * CPL switch interrupt handler.
4947 */
cplsw_intr_handler(struct adapter * adapter)4948 static void cplsw_intr_handler(struct adapter *adapter)
4949 {
4950 static const struct intr_info cplsw_intr_info[] = {
4951 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4952 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4953 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4954 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4955 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4956 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4957 { 0 }
4958 };
4959
4960 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4961 t4_fatal_err(adapter);
4962 }
4963
4964 /*
4965 * LE interrupt handler.
4966 */
le_intr_handler(struct adapter * adap)4967 static void le_intr_handler(struct adapter *adap)
4968 {
4969 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4970 static const struct intr_info le_intr_info[] = {
4971 { F_LIPMISS, "LE LIP miss", -1, 0 },
4972 { F_LIP0, "LE 0 LIP error", -1, 0 },
4973 { F_PARITYERR, "LE parity error", -1, 1 },
4974 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4975 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
4976 { 0 }
4977 };
4978
4979 static struct intr_info t6_le_intr_info[] = {
4980 /* log an error for HASHTBLMEMCRCERR and clear the bit */
4981 { F_T6_HASHTBLMEMCRCERR, "LE hash table mem crc error", -1, 0 },
4982 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4983 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4984 { F_TCAMINTPERR, "LE parity error", -1, 1 },
4985 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4986 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4987 { 0 }
4988 };
4989
4990 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4991 (chip_ver <= CHELSIO_T5) ?
4992 le_intr_info : t6_le_intr_info))
4993 t4_fatal_err(adap);
4994 }
4995
4996 /*
4997 * MPS interrupt handler.
4998 */
mps_intr_handler(struct adapter * adapter)4999 static void mps_intr_handler(struct adapter *adapter)
5000 {
5001 static const struct intr_info mps_rx_intr_info[] = {
5002 { 0xffffff, "MPS Rx parity error", -1, 1 },
5003 { 0 }
5004 };
5005 static const struct intr_info mps_tx_intr_info[] = {
5006 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5007 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5008 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5009 -1, 1 },
5010 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5011 -1, 1 },
5012 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
5013 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5014 { F_FRMERR, "MPS Tx framing error", -1, 1 },
5015 { 0 }
5016 };
5017 static const struct intr_info t6_mps_tx_intr_info[] = {
5018 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5019 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5020 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5021 -1, 1 },
5022 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5023 -1, 1 },
5024 /* MPS Tx Bubble is normal for T6 */
5025 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5026 { F_FRMERR, "MPS Tx framing error", -1, 1 },
5027 { 0 }
5028 };
5029 static const struct intr_info mps_trc_intr_info[] = {
5030 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5031 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5032 1 },
5033 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5034 { 0 }
5035 };
5036 static const struct intr_info mps_stat_sram_intr_info[] = {
5037 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5038 { 0 }
5039 };
5040 static const struct intr_info mps_stat_tx_intr_info[] = {
5041 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5042 { 0 }
5043 };
5044 static const struct intr_info mps_stat_rx_intr_info[] = {
5045 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5046 { 0 }
5047 };
5048 static const struct intr_info mps_cls_intr_info[] = {
5049 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5050 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5051 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5052 { 0 }
5053 };
5054
5055 int fat;
5056
5057 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5058 mps_rx_intr_info) +
5059 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5060 is_t6(adapter->params.chip)
5061 ? t6_mps_tx_intr_info
5062 : mps_tx_intr_info) +
5063 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5064 mps_trc_intr_info) +
5065 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5066 mps_stat_sram_intr_info) +
5067 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5068 mps_stat_tx_intr_info) +
5069 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5070 mps_stat_rx_intr_info) +
5071 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5072 mps_cls_intr_info);
5073
5074 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5075 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
5076 if (fat)
5077 t4_fatal_err(adapter);
5078 }
5079
5080 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5081 F_ECC_UE_INT_CAUSE)
5082
5083 /*
5084 * EDC/MC interrupt handler.
5085 */
mem_intr_handler(struct adapter * adapter,int idx)5086 static void mem_intr_handler(struct adapter *adapter, int idx)
5087 {
5088 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5089
5090 unsigned int addr, cnt_addr, v;
5091
5092 if (idx <= MEM_EDC1) {
5093 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5094 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5095 } else if (idx == MEM_MC) {
5096 if (is_t4(adapter->params.chip)) {
5097 addr = A_MC_INT_CAUSE;
5098 cnt_addr = A_MC_ECC_STATUS;
5099 } else {
5100 addr = A_MC_P_INT_CAUSE;
5101 cnt_addr = A_MC_P_ECC_STATUS;
5102 }
5103 } else {
5104 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5105 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5106 }
5107
5108 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5109 if (v & F_PERR_INT_CAUSE)
5110 CH_ALERT(adapter, "%s FIFO parity error\n",
5111 name[idx]);
5112 if (v & F_ECC_CE_INT_CAUSE) {
5113 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5114
5115 if (idx <= MEM_EDC1)
5116 t4_edc_err_read(adapter, idx);
5117
5118 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5119 CH_WARN_RATELIMIT(adapter,
5120 "%u %s correctable ECC data error%s\n",
5121 cnt, name[idx], cnt > 1 ? "s" : "");
5122 }
5123 if (v & F_ECC_UE_INT_CAUSE)
5124 CH_ALERT(adapter,
5125 "%s uncorrectable ECC data error\n", name[idx]);
5126
5127 t4_write_reg(adapter, addr, v);
5128 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5129 t4_fatal_err(adapter);
5130 }
5131
5132 /*
5133 * MA interrupt handler.
5134 */
ma_intr_handler(struct adapter * adapter)5135 static void ma_intr_handler(struct adapter *adapter)
5136 {
5137 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5138
5139 if (status & F_MEM_PERR_INT_CAUSE) {
5140 CH_ALERT(adapter,
5141 "MA parity error, parity status %#x\n",
5142 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5143 if (is_t5(adapter->params.chip))
5144 CH_ALERT(adapter,
5145 "MA parity error, parity status %#x\n",
5146 t4_read_reg(adapter,
5147 A_MA_PARITY_ERROR_STATUS2));
5148 }
5149 if (status & F_MEM_WRAP_INT_CAUSE) {
5150 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5151 CH_ALERT(adapter, "MA address wrap-around error by "
5152 "client %u to address %#x\n",
5153 G_MEM_WRAP_CLIENT_NUM(v),
5154 G_MEM_WRAP_ADDRESS(v) << 4);
5155 }
5156 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5157 t4_fatal_err(adapter);
5158 }
5159
5160 /*
5161 * SMB interrupt handler.
5162 */
smb_intr_handler(struct adapter * adap)5163 static void smb_intr_handler(struct adapter *adap)
5164 {
5165 static const struct intr_info smb_intr_info[] = {
5166 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5167 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5168 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5169 { 0 }
5170 };
5171
5172 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5173 t4_fatal_err(adap);
5174 }
5175
5176 /*
5177 * NC-SI interrupt handler.
5178 */
ncsi_intr_handler(struct adapter * adap)5179 static void ncsi_intr_handler(struct adapter *adap)
5180 {
5181 static const struct intr_info ncsi_intr_info[] = {
5182 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5183 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5184 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5185 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5186 { 0 }
5187 };
5188
5189 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5190 t4_fatal_err(adap);
5191 }
5192
5193 /*
5194 * XGMAC interrupt handler.
5195 */
xgmac_intr_handler(struct adapter * adap,int port)5196 static void xgmac_intr_handler(struct adapter *adap, int port)
5197 {
5198 u32 v, int_cause_reg;
5199
5200 if (is_t4(adap->params.chip))
5201 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5202 else
5203 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5204
5205 v = t4_read_reg(adap, int_cause_reg);
5206
5207 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5208 if (!v)
5209 return;
5210
5211 if (v & F_TXFIFO_PRTY_ERR)
5212 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5213 port);
5214 if (v & F_RXFIFO_PRTY_ERR)
5215 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5216 port);
5217 t4_write_reg(adap, int_cause_reg, v);
5218 t4_fatal_err(adap);
5219 }
5220
5221 /*
5222 * PL Parity Error interrupt handler.
5223 */
pl_perr_intr_handler(struct adapter * adap)5224 static void pl_perr_intr_handler(struct adapter *adap)
5225 {
5226 static const struct intr_info pl_perr_info[] = {
5227 { F_UART, "UART Parity Error", -1, },
5228 { F_ULP_TX, "ULP TX Parity Error", -1 },
5229 { F_SGE, "SGE Parity Error", -1 },
5230 { F_HMA, "HMA Parity Error", -1 },
5231 { F_CPL_SWITCH, "CPL Switch Parity Error", -1 },
5232 { F_ULP_RX, "ULP RX Parity Error", -1 },
5233 { F_PM_RX, "PM RX Parity Error", -1 },
5234 { F_PM_TX, "PM TX Parity Error", -1 },
5235 { F_MA, "MA Parity Error", -1 },
5236 { F_TP, "TP Parity Error", -1 },
5237 { F_LE, "LE Parity Error", -1 },
5238 { F_EDC1, "EDC1 Parity Error", -1 },
5239 { F_EDC0, "EDC0 Parity Error", -1 },
5240 { F_MC, "MC Parity Error", -1 },
5241 { F_PCIE, "PCIE Parity Error", -1 },
5242 { F_PMU, "PMU Parity Error", -1 },
5243 { F_XGMAC_KR1, "XGMAC_KR1 Parity Error", -1 },
5244 { F_XGMAC_KR0, "XGMAC_KR0 Parity Error", -1 },
5245 { F_XGMAC1, "XGMAC1 Parity Error", -1 },
5246 { F_XGMAC0, "XGMAC0 Parity Error", -1 },
5247 { F_SMB, "SMB Parity Error", -1 },
5248 { F_SF, "SF Parity Error", -1 },
5249 { F_PL, "PL Parity Error", -1 },
5250 { F_NCSI, "NCSI Parity Error", -1 },
5251 { F_MPS, "MPS Parity Error", -1 },
5252 { F_MI, "MI Parity Error", -1 },
5253 { F_DBG, "DBG Parity Error", -1 },
5254 { F_I2CM, "I2CM Parity Error", -1 },
5255 { F_CIM, "CIM Parity Error", -1 },
5256 };
5257
5258 t4_handle_intr_status(adap, A_PL_PERR_CAUSE, pl_perr_info);
5259 /* pl_intr_handler() will do the t4_fatal_err(adap) */
5260 }
5261
5262 /*
5263 * PL interrupt handler.
5264 */
pl_intr_handler(struct adapter * adap)5265 static void pl_intr_handler(struct adapter *adap)
5266 {
5267 static const struct intr_info pl_intr_info[] = {
5268 { F_FATALPERR, "Fatal parity error", -1, 1,
5269 pl_perr_intr_handler },
5270 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5271 { 0 }
5272 };
5273
5274 static struct intr_info t5_pl_intr_info[] = {
5275 { F_FATALPERR, "Fatal parity error", -1, 1,
5276 pl_perr_intr_handler },
5277 { 0 }
5278 };
5279
5280 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5281 is_t4(adap->params.chip) ?
5282 pl_intr_info : t5_pl_intr_info))
5283 t4_fatal_err(adap);
5284 }
5285
5286 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5287
5288 /**
5289 * t4_slow_intr_handler - control path interrupt handler
5290 * @adapter: the adapter
5291 *
5292 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5293 * The designation 'slow' is because it involves register reads, while
5294 * data interrupts typically don't involve any MMIOs.
5295 */
t4_slow_intr_handler(struct adapter * adapter)5296 int t4_slow_intr_handler(struct adapter *adapter)
5297 {
5298 /* There are rare cases where a PL_INT_CAUSE bit may end up getting
5299 * set when the corresponding PL_INT_ENABLE bit isn't set. It's
5300 * easiest just to mask that case here.
5301 */
5302 u32 raw_cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5303 u32 enable = t4_read_reg(adapter, A_PL_INT_ENABLE);
5304 u32 cause = raw_cause & enable;
5305
5306 if (!(cause & GLBL_INTR_MASK))
5307 return 0;
5308
5309 /* Disable all the interrupt(bits) in PL_INT_ENABLE */
5310 t4_write_reg(adapter, A_PL_INT_ENABLE, 0);
5311 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5312
5313 if (cause & F_CIM)
5314 cim_intr_handler(adapter);
5315 if (cause & F_MPS)
5316 mps_intr_handler(adapter);
5317 if (cause & F_NCSI)
5318 ncsi_intr_handler(adapter);
5319 if (cause & F_PL)
5320 pl_intr_handler(adapter);
5321 if (cause & F_SMB)
5322 smb_intr_handler(adapter);
5323 if (cause & F_MAC0)
5324 xgmac_intr_handler(adapter, 0);
5325 if (cause & F_MAC1)
5326 xgmac_intr_handler(adapter, 1);
5327 if (cause & F_MAC2)
5328 xgmac_intr_handler(adapter, 2);
5329 if (cause & F_MAC3)
5330 xgmac_intr_handler(adapter, 3);
5331 if (cause & F_PCIE)
5332 pcie_intr_handler(adapter);
5333 if (cause & F_MC0)
5334 mem_intr_handler(adapter, MEM_MC);
5335 if (is_t5(adapter->params.chip) && (cause & F_MC1))
5336 mem_intr_handler(adapter, MEM_MC1);
5337 if (cause & F_EDC0)
5338 mem_intr_handler(adapter, MEM_EDC0);
5339 if (cause & F_EDC1)
5340 mem_intr_handler(adapter, MEM_EDC1);
5341 if (cause & F_LE)
5342 le_intr_handler(adapter);
5343 if (cause & F_TP)
5344 tp_intr_handler(adapter);
5345 if (cause & F_MA)
5346 ma_intr_handler(adapter);
5347 if (cause & F_PM_TX)
5348 pmtx_intr_handler(adapter);
5349 if (cause & F_PM_RX)
5350 pmrx_intr_handler(adapter);
5351 if (cause & F_ULP_RX)
5352 ulprx_intr_handler(adapter);
5353 if (cause & F_CPL_SWITCH)
5354 cplsw_intr_handler(adapter);
5355 if (cause & F_SGE)
5356 sge_intr_handler(adapter);
5357 if (cause & F_ULP_TX)
5358 ulptx_intr_handler(adapter);
5359
5360 /* Clear the interrupts just processed for which we are the master. */
5361 t4_write_reg(adapter, A_PL_INT_CAUSE, raw_cause & GLBL_INTR_MASK);
5362
5363 /* re-enable the interrupts (bits that were disabled
5364 * earlier in PL_INT_ENABLE)
5365 */
5366 t4_write_reg(adapter, A_PL_INT_ENABLE, enable);
5367 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5368 return 1;
5369 }
5370
5371 /**
5372 * t4_intr_enable - enable interrupts
5373 * @adapter: the adapter whose interrupts should be enabled
5374 *
5375 * Enable PF-specific interrupts for the calling function and the top-level
5376 * interrupt concentrator for global interrupts. Interrupts are already
5377 * enabled at each module, here we just enable the roots of the interrupt
5378 * hierarchies.
5379 *
5380 * Note: this function should be called only when the driver manages
5381 * non PF-specific interrupts from the various HW modules. Only one PCI
5382 * function at a time should be doing this.
5383 */
t4_intr_enable(struct adapter * adapter)5384 void t4_intr_enable(struct adapter *adapter)
5385 {
5386 u32 val = 0;
5387 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5388 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5389 ? G_SOURCEPF(whoami)
5390 : G_T6_SOURCEPF(whoami));
5391
5392 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5393 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5394 else
5395 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5396 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5397 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5398 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5399 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5400 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5401 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5402 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5403 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5404 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5405 }
5406
5407 /**
5408 * t4_intr_disable - disable interrupts
5409 * @adapter: the adapter whose interrupts should be disabled
5410 *
5411 * Disable interrupts. We only disable the top-level interrupt
5412 * concentrators. The caller must be a PCI function managing global
5413 * interrupts.
5414 */
t4_intr_disable(struct adapter * adapter)5415 void t4_intr_disable(struct adapter *adapter)
5416 {
5417 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5418 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5419 ? G_SOURCEPF(whoami)
5420 : G_T6_SOURCEPF(whoami));
5421
5422 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5423 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5424 }
5425
t4_chip_rss_size(struct adapter * adap)5426 unsigned int t4_chip_rss_size(struct adapter *adap)
5427 {
5428 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5429 return RSS_NENTRIES;
5430 else
5431 return T6_RSS_NENTRIES;
5432 }
5433
5434 /**
5435 * t4_config_rss_range - configure a portion of the RSS mapping table
5436 * @adapter: the adapter
5437 * @mbox: mbox to use for the FW command
5438 * @viid: virtual interface whose RSS subtable is to be written
5439 * @start: start entry in the table to write
5440 * @n: how many table entries to write
5441 * @rspq: values for the "response queue" (Ingress Queue) lookup table
5442 * @nrspq: number of values in @rspq
5443 *
5444 * Programs the selected part of the VI's RSS mapping table with the
5445 * provided values. If @nrspq < @n the supplied values are used repeatedly
5446 * until the full table range is populated.
5447 *
5448 * The caller must ensure the values in @rspq are in the range allowed for
5449 * @viid.
5450 */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)5451 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5452 int start, int n, const u16 *rspq, unsigned int nrspq)
5453 {
5454 int ret;
5455 const u16 *rsp = rspq;
5456 const u16 *rsp_end = rspq + nrspq;
5457 struct fw_rss_ind_tbl_cmd cmd;
5458
5459 memset(&cmd, 0, sizeof(cmd));
5460 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5461 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5462 V_FW_RSS_IND_TBL_CMD_VIID(viid));
5463 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5464
5465 /* Each firmware RSS command can accommodate up to 32 RSS Ingress
5466 * Queue Identifiers. These Ingress Queue IDs are packed three to
5467 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5468 * reserved.
5469 */
5470 while (n > 0) {
5471 int nq = min(n, 32);
5472 int nq_packed = 0;
5473 __be32 *qp = &cmd.iq0_to_iq2;
5474
5475 /* Set up the firmware RSS command header to send the next
5476 * "nq" Ingress Queue IDs to the firmware.
5477 */
5478 cmd.niqid = cpu_to_be16(nq);
5479 cmd.startidx = cpu_to_be16(start);
5480
5481 /* "nq" more done for the start of the next loop.
5482 */
5483 start += nq;
5484 n -= nq;
5485
5486 /* While there are still Ingress Queue IDs to stuff into the
5487 * current firmware RSS command, retrieve them from the
5488 * Ingress Queue ID array and insert them into the command.
5489 */
5490 while (nq > 0) {
5491 /* Grab up to the next 3 Ingress Queue IDs (wrapping
5492 * around the Ingress Queue ID array if necessary) and
5493 * insert them into the firmware RSS command at the
5494 * current 3-tuple position within the commad.
5495 */
5496 u16 qbuf[3];
5497 u16 *qbp = qbuf;
5498 int nqbuf = min(3, nq);
5499
5500 nq -= nqbuf;
5501 qbuf[0] = qbuf[1] = qbuf[2] = 0;
5502 while (nqbuf && nq_packed < 32) {
5503 nqbuf--;
5504 nq_packed++;
5505 *qbp++ = *rsp++;
5506 if (rsp >= rsp_end)
5507 rsp = rspq;
5508 }
5509 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5510 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5511 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5512 }
5513
5514 /* Send this portion of the RRS table update to the firmware;
5515 * bail out on any errors.
5516 */
5517 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5518 if (ret)
5519 return ret;
5520 }
5521 return 0;
5522 }
5523
5524 /**
5525 * t4_config_glbl_rss - configure the global RSS mode
5526 * @adapter: the adapter
5527 * @mbox: mbox to use for the FW command
5528 * @mode: global RSS mode
5529 * @flags: mode-specific flags
5530 *
5531 * Sets the global RSS mode.
5532 */
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)5533 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5534 unsigned int flags)
5535 {
5536 struct fw_rss_glb_config_cmd c;
5537
5538 memset(&c, 0, sizeof(c));
5539 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5540 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5541 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5542 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5543 c.u.manual.mode_pkd =
5544 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5545 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5546 c.u.basicvirtual.mode_keymode =
5547 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5548 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5549 } else
5550 return -EINVAL;
5551 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5552 }
5553
5554 /**
5555 * t4_config_vi_rss - configure per VI RSS settings
5556 * @adapter: the adapter
5557 * @mbox: mbox to use for the FW command
5558 * @viid: the VI id
5559 * @flags: RSS flags
5560 * @defq: id of the default RSS queue for the VI.
5561 * @skeyidx: RSS secret key table index for non-global mode
5562 * @skey: RSS vf_scramble key for VI.
5563 *
5564 * Configures VI-specific RSS properties.
5565 */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq,unsigned int skeyidx,unsigned int skey)5566 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5567 unsigned int flags, unsigned int defq, unsigned int skeyidx,
5568 unsigned int skey)
5569 {
5570 struct fw_rss_vi_config_cmd c;
5571
5572 memset(&c, 0, sizeof(c));
5573 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5574 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5575 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5576 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5577 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5578 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5579 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5580 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5581 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5582
5583 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5584 }
5585
5586 /* Read an RSS table row */
rd_rss_row(struct adapter * adap,int row,u32 * val)5587 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5588 {
5589 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5590 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5591 5, 0, val);
5592 }
5593
5594 /**
5595 * t4_read_rss - read the contents of the RSS mapping table
5596 * @adapter: the adapter
5597 * @map: holds the contents of the RSS mapping table
5598 *
5599 * Reads the contents of the RSS hash->queue mapping table.
5600 */
t4_read_rss(struct adapter * adapter,u16 * map)5601 int t4_read_rss(struct adapter *adapter, u16 *map)
5602 {
5603 u32 val;
5604 int i, ret, nentries;
5605
5606 nentries = t4_chip_rss_size(adapter);
5607 for (i = 0; i < nentries / 2; ++i) {
5608 ret = rd_rss_row(adapter, i, &val);
5609 if (ret)
5610 return ret;
5611 *map++ = G_LKPTBLQUEUE0(val);
5612 *map++ = G_LKPTBLQUEUE1(val);
5613 }
5614 return 0;
5615 }
5616
5617 /**
5618 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5619 * @adap: the adapter
5620 * @cmd: TP fw ldst address space type
5621 * @vals: where the indirect register values are stored/written
5622 * @nregs: how many indirect registers to read/write
5623 * @start_idx: index of first indirect register to read/write
5624 * @rw: Read (1) or Write (0)
5625 * @sleep_ok: if true we may sleep while awaiting command completion
5626 *
5627 * Access TP indirect registers through LDST
5628 **/
t4_tp_fw_ldst_rw(struct adapter * adap,int cmd,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw,bool sleep_ok)5629 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5630 unsigned int nregs, unsigned int start_index,
5631 unsigned int rw, bool sleep_ok)
5632 {
5633 int ret = 0;
5634 unsigned int i;
5635 struct fw_ldst_cmd c;
5636
5637 for (i = 0; i < nregs; i++) {
5638 memset(&c, 0, sizeof(c));
5639 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5640 F_FW_CMD_REQUEST |
5641 (rw ? F_FW_CMD_READ :
5642 F_FW_CMD_WRITE) |
5643 V_FW_LDST_CMD_ADDRSPACE(cmd));
5644 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5645
5646 c.u.addrval.addr = cpu_to_be32(start_index + i);
5647 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5648 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5649 sleep_ok);
5650 if (ret)
5651 return ret;
5652
5653 if (rw)
5654 vals[i] = be32_to_cpu(c.u.addrval.val);
5655 }
5656 return 0;
5657 }
5658
5659 /**
5660 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5661 * @adap: the adapter
5662 * @reg_addr: Address Register
5663 * @reg_data: Data register
5664 * @buff: where the indirect register values are stored/written
5665 * @nregs: how many indirect registers to read/write
5666 * @start_index: index of first indirect register to read/write
5667 * @rw: READ(1) or WRITE(0)
5668 * @sleep_ok: if true we may sleep while awaiting command completion
5669 *
5670 * Read/Write TP indirect registers through LDST if possible.
5671 * Else, use backdoor access
5672 **/
t4_tp_indirect_rw(struct adapter * adap,u32 reg_addr,u32 reg_data,u32 * buff,u32 nregs,u32 start_index,int rw,bool sleep_ok)5673 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5674 u32 *buff, u32 nregs, u32 start_index, int rw,
5675 bool sleep_ok)
5676 {
5677 int rc = -EINVAL;
5678 int cmd;
5679
5680 switch (reg_addr) {
5681 case A_TP_PIO_ADDR:
5682 cmd = FW_LDST_ADDRSPC_TP_PIO;
5683 break;
5684 case A_TP_TM_PIO_ADDR:
5685 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5686 break;
5687 case A_TP_MIB_INDEX:
5688 cmd = FW_LDST_ADDRSPC_TP_MIB;
5689 break;
5690 default:
5691 goto indirect_access;
5692 }
5693
5694 if (t4_use_ldst(adap))
5695 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5696 sleep_ok);
5697
5698 indirect_access:
5699
5700 if (rc) {
5701 if (rw)
5702 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5703 start_index);
5704 else
5705 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5706 start_index);
5707 }
5708 }
5709
5710 /**
5711 * t4_tp_pio_read - Read TP PIO registers
5712 * @adap: the adapter
5713 * @buff: where the indirect register values are written
5714 * @nregs: how many indirect registers to read
5715 * @start_index: index of first indirect register to read
5716 * @sleep_ok: if true we may sleep while awaiting command completion
5717 *
5718 * Read TP PIO Registers
5719 **/
t4_tp_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5720 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5721 u32 start_index, bool sleep_ok)
5722 {
5723 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5724 start_index, 1, sleep_ok);
5725 }
5726
5727 /**
5728 * t4_tp_pio_write - Write TP PIO registers
5729 * @adap: the adapter
5730 * @buff: where the indirect register values are stored
5731 * @nregs: how many indirect registers to write
5732 * @start_index: index of first indirect register to write
5733 * @sleep_ok: if true we may sleep while awaiting command completion
5734 *
5735 * Write TP PIO Registers
5736 **/
t4_tp_pio_write(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5737 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5738 u32 start_index, bool sleep_ok)
5739 {
5740 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5741 start_index, 0, sleep_ok);
5742 }
5743
5744 /**
5745 * t4_tp_tm_pio_read - Read TP TM PIO registers
5746 * @adap: the adapter
5747 * @buff: where the indirect register values are written
5748 * @nregs: how many indirect registers to read
5749 * @start_index: index of first indirect register to read
5750 * @sleep_ok: if true we may sleep while awaiting command completion
5751 *
5752 * Read TP TM PIO Registers
5753 **/
t4_tp_tm_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5754 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5755 u32 start_index, bool sleep_ok)
5756 {
5757 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5758 nregs, start_index, 1, sleep_ok);
5759 }
5760
5761 /**
5762 * t4_tp_mib_read - Read TP MIB registers
5763 * @adap: the adapter
5764 * @buff: where the indirect register values are written
5765 * @nregs: how many indirect registers to read
5766 * @start_index: index of first indirect register to read
5767 * @sleep_ok: if true we may sleep while awaiting command completion
5768 *
5769 * Read TP MIB Registers
5770 **/
t4_tp_mib_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5771 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5772 bool sleep_ok)
5773 {
5774 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5775 start_index, 1, sleep_ok);
5776 }
5777
5778 /**
5779 * t4_read_rss_key - read the global RSS key
5780 * @adap: the adapter
5781 * @key: 10-entry array holding the 320-bit RSS key
5782 * @sleep_ok: if true we may sleep while awaiting command completion
5783 *
5784 * Reads the global 320-bit RSS key.
5785 */
t4_read_rss_key(struct adapter * adap,u32 * key,bool sleep_ok)5786 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5787 {
5788 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5789 }
5790
5791 /**
5792 * t4_write_rss_key - program one of the RSS keys
5793 * @adap: the adapter
5794 * @key: 10-entry array holding the 320-bit RSS key
5795 * @idx: which RSS key to write
5796 * @sleep_ok: if true we may sleep while awaiting command completion
5797 *
5798 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5799 * 0..15 the corresponding entry in the RSS key table is written,
5800 * otherwise the global RSS key is written.
5801 */
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx,bool sleep_ok)5802 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5803 bool sleep_ok)
5804 {
5805 u8 rss_key_addr_cnt = 16;
5806 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5807
5808 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5809 * allows access to key addresses 16-63 by using KeyWrAddrX
5810 * as index[5:4](upper 2) into key table
5811 */
5812 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5813 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5814 rss_key_addr_cnt = 32;
5815
5816 t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5817
5818 if (idx >= 0 && idx < rss_key_addr_cnt) {
5819 if (rss_key_addr_cnt > 16)
5820 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5821 vrt | V_KEYWRADDRX(idx >> 4) |
5822 V_T6_VFWRADDR(idx) | F_KEYWREN);
5823 else
5824 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5825 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5826 }
5827 }
5828
5829 /**
5830 * t4_read_rss_pf_config - read PF RSS Configuration Table
5831 * @adapter: the adapter
5832 * @index: the entry in the PF RSS table to read
5833 * @valp: where to store the returned value
5834 * @sleep_ok: if true we may sleep while awaiting command completion
5835 *
5836 * Reads the PF RSS Configuration Table at the specified index and returns
5837 * the value found there.
5838 */
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp,bool sleep_ok)5839 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5840 u32 *valp, bool sleep_ok)
5841 {
5842 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5843 }
5844
5845 /**
5846 * t4_write_rss_pf_config - write PF RSS Configuration Table
5847 * @adapter: the adapter
5848 * @index: the entry in the VF RSS table to read
5849 * @val: the value to store
5850 * @sleep_ok: if true we may sleep while awaiting command completion
5851 *
5852 * Writes the PF RSS Configuration Table at the specified index with the
5853 * specified value.
5854 */
t4_write_rss_pf_config(struct adapter * adapter,unsigned int index,u32 val,bool sleep_ok)5855 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5856 u32 val, bool sleep_ok)
5857 {
5858 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5859 sleep_ok);
5860 }
5861
5862 /**
5863 * t4_read_rss_vf_config - read VF RSS Configuration Table
5864 * @adapter: the adapter
5865 * @index: the entry in the VF RSS table to read
5866 * @vfl: where to store the returned VFL
5867 * @vfh: where to store the returned VFH
5868 * @sleep_ok: if true we may sleep while awaiting command completion
5869 *
5870 * Reads the VF RSS Configuration Table at the specified index and returns
5871 * the (VFL, VFH) values found there.
5872 */
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh,bool sleep_ok)5873 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5874 u32 *vfl, u32 *vfh, bool sleep_ok)
5875 {
5876 u32 vrt, mask, data;
5877
5878 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5879 mask = V_VFWRADDR(M_VFWRADDR);
5880 data = V_VFWRADDR(index);
5881 } else {
5882 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5883 data = V_T6_VFWRADDR(index);
5884 }
5885 /*
5886 * Request that the index'th VF Table values be read into VFL/VFH.
5887 */
5888 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5889 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5890 vrt |= data | F_VFRDEN;
5891 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5892
5893 /*
5894 * Grab the VFL/VFH values ...
5895 */
5896 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5897 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5898 }
5899
5900 /**
5901 * t4_read_rss_pf_map - read PF RSS Map
5902 * @adapter: the adapter
5903 * @sleep_ok: if true we may sleep while awaiting command completion
5904 *
5905 * Reads the PF RSS Map register and returns its value.
5906 */
t4_read_rss_pf_map(struct adapter * adapter,bool sleep_ok)5907 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5908 {
5909 u32 pfmap;
5910
5911 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5912
5913 return pfmap;
5914 }
5915
5916 /**
5917 * t4_read_rss_pf_mask - read PF RSS Mask
5918 * @adapter: the adapter
5919 * @sleep_ok: if true we may sleep while awaiting command completion
5920 *
5921 * Reads the PF RSS Mask register and returns its value.
5922 */
t4_read_rss_pf_mask(struct adapter * adapter,bool sleep_ok)5923 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5924 {
5925 u32 pfmask;
5926
5927 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5928
5929 return pfmask;
5930 }
5931
5932 /**
5933 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5934 * @adap: the adapter
5935 * @v4: holds the TCP/IP counter values
5936 * @v6: holds the TCP/IPv6 counter values
5937 * @sleep_ok: if true we may sleep while awaiting command completion
5938 *
5939 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5940 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5941 */
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6,bool sleep_ok)5942 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5943 struct tp_tcp_stats *v6, bool sleep_ok)
5944 {
5945 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5946
5947 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5948 #define STAT(x) val[STAT_IDX(x)]
5949 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5950
5951 if (v4) {
5952 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5953 A_TP_MIB_TCP_OUT_RST, sleep_ok);
5954 v4->tcp_out_rsts = STAT(OUT_RST);
5955 v4->tcp_in_segs = STAT64(IN_SEG);
5956 v4->tcp_out_segs = STAT64(OUT_SEG);
5957 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5958 }
5959 if (v6) {
5960 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5961 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5962 v6->tcp_out_rsts = STAT(OUT_RST);
5963 v6->tcp_in_segs = STAT64(IN_SEG);
5964 v6->tcp_out_segs = STAT64(OUT_SEG);
5965 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5966 }
5967 #undef STAT64
5968 #undef STAT
5969 #undef STAT_IDX
5970 }
5971
5972 /**
5973 * t4_tp_get_err_stats - read TP's error MIB counters
5974 * @adap: the adapter
5975 * @st: holds the counter values
5976 * @sleep_ok: if true we may sleep while awaiting command completion
5977 *
5978 * Returns the values of TP's error counters.
5979 */
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st,bool sleep_ok)5980 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5981 bool sleep_ok)
5982 {
5983 int nchan = adap->params.arch.nchan;
5984
5985 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5986 sleep_ok);
5987
5988 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5989 sleep_ok);
5990
5991 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5992 sleep_ok);
5993
5994 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5995 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5996
5997 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5998 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5999
6000 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6001 sleep_ok);
6002
6003 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6004 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6005
6006 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6007 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6008
6009 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6010 sleep_ok);
6011 }
6012
6013 /**
6014 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
6015 * @adap: the adapter
6016 * @st: holds the counter values
6017 * @sleep_ok: if true we may sleep while awaiting command completion
6018 *
6019 * Returns the values of TP's CPL counters.
6020 */
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st,bool sleep_ok)6021 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6022 bool sleep_ok)
6023 {
6024 int nchan = adap->params.arch.nchan;
6025
6026 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6027
6028 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6029 }
6030
6031 /**
6032 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6033 * @adap: the adapter
6034 * @st: holds the counter values
6035 *
6036 * Returns the values of TP's RDMA counters.
6037 */
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st,bool sleep_ok)6038 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6039 bool sleep_ok)
6040 {
6041 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6042 sleep_ok);
6043 }
6044
6045 /**
6046 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6047 * @adap: the adapter
6048 * @idx: the port index
6049 * @st: holds the counter values
6050 * @sleep_ok: if true we may sleep while awaiting command completion
6051 *
6052 * Returns the values of TP's FCoE counters for the selected port.
6053 */
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st,bool sleep_ok)6054 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6055 struct tp_fcoe_stats *st, bool sleep_ok)
6056 {
6057 u32 val[2];
6058
6059 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6060 sleep_ok);
6061
6062 t4_tp_mib_read(adap, &st->frames_drop, 1,
6063 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6064
6065 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6066 sleep_ok);
6067
6068 st->octets_ddp = ((u64)val[0] << 32) | val[1];
6069 }
6070
6071 /**
6072 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6073 * @adap: the adapter
6074 * @st: holds the counter values
6075 * @sleep_ok: if true we may sleep while awaiting command completion
6076 *
6077 * Returns the values of TP's counters for non-TCP directly-placed packets.
6078 */
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st,bool sleep_ok)6079 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6080 bool sleep_ok)
6081 {
6082 u32 val[4];
6083
6084 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6085
6086 st->frames = val[0];
6087 st->drops = val[1];
6088 st->octets = ((u64)val[2] << 32) | val[3];
6089 }
6090
6091 /**
6092 * t4_read_mtu_tbl - returns the values in the HW path MTU table
6093 * @adap: the adapter
6094 * @mtus: where to store the MTU values
6095 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
6096 *
6097 * Reads the HW path MTU table.
6098 */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)6099 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6100 {
6101 u32 v;
6102 int i;
6103
6104 for (i = 0; i < NMTUS; ++i) {
6105 t4_write_reg(adap, A_TP_MTU_TABLE,
6106 V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6107 v = t4_read_reg(adap, A_TP_MTU_TABLE);
6108 mtus[i] = G_MTUVALUE(v);
6109 if (mtu_log)
6110 mtu_log[i] = G_MTUWIDTH(v);
6111 }
6112 }
6113
6114 /**
6115 * t4_read_cong_tbl - reads the congestion control table
6116 * @adap: the adapter
6117 * @incr: where to store the alpha values
6118 *
6119 * Reads the additive increments programmed into the HW congestion
6120 * control table.
6121 */
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])6122 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6123 {
6124 unsigned int mtu, w;
6125
6126 for (mtu = 0; mtu < NMTUS; ++mtu)
6127 for (w = 0; w < NCCTRL_WIN; ++w) {
6128 t4_write_reg(adap, A_TP_CCTRL_TABLE,
6129 V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6130 incr[mtu][w] = (u16)t4_read_reg(adap,
6131 A_TP_CCTRL_TABLE) & 0x1fff;
6132 }
6133 }
6134
6135 /**
6136 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6137 * @adap: the adapter
6138 * @addr: the indirect TP register address
6139 * @mask: specifies the field within the register to modify
6140 * @val: new value for the field
6141 *
6142 * Sets a field of an indirect TP register to the given value.
6143 */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)6144 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6145 unsigned int mask, unsigned int val)
6146 {
6147 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6148 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6149 t4_write_reg(adap, A_TP_PIO_DATA, val);
6150 }
6151
6152 /**
6153 * init_cong_ctrl - initialize congestion control parameters
6154 * @a: the alpha values for congestion control
6155 * @b: the beta values for congestion control
6156 *
6157 * Initialize the congestion control parameters.
6158 */
init_cong_ctrl(unsigned short * a,unsigned short * b)6159 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6160 {
6161 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6162 a[9] = 2;
6163 a[10] = 3;
6164 a[11] = 4;
6165 a[12] = 5;
6166 a[13] = 6;
6167 a[14] = 7;
6168 a[15] = 8;
6169 a[16] = 9;
6170 a[17] = 10;
6171 a[18] = 14;
6172 a[19] = 17;
6173 a[20] = 21;
6174 a[21] = 25;
6175 a[22] = 30;
6176 a[23] = 35;
6177 a[24] = 45;
6178 a[25] = 60;
6179 a[26] = 80;
6180 a[27] = 100;
6181 a[28] = 200;
6182 a[29] = 300;
6183 a[30] = 400;
6184 a[31] = 500;
6185
6186 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6187 b[9] = b[10] = 1;
6188 b[11] = b[12] = 2;
6189 b[13] = b[14] = b[15] = b[16] = 3;
6190 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6191 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6192 b[28] = b[29] = 6;
6193 b[30] = b[31] = 7;
6194 }
6195
6196 /* The minimum additive increment value for the congestion control table */
6197 #define CC_MIN_INCR 2U
6198
6199 /**
6200 * t4_load_mtus - write the MTU and congestion control HW tables
6201 * @adap: the adapter
6202 * @mtus: the values for the MTU table
6203 * @alpha: the values for the congestion control alpha parameter
6204 * @beta: the values for the congestion control beta parameter
6205 *
6206 * Write the HW MTU table with the supplied MTUs and the high-speed
6207 * congestion control table with the supplied alpha, beta, and MTUs.
6208 * We write the two tables together because the additive increments
6209 * depend on the MTUs.
6210 */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)6211 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6212 const unsigned short *alpha, const unsigned short *beta)
6213 {
6214 static const unsigned int avg_pkts[NCCTRL_WIN] = {
6215 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6216 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6217 28672, 40960, 57344, 81920, 114688, 163840, 229376
6218 };
6219
6220 unsigned int i, w;
6221
6222 for (i = 0; i < NMTUS; ++i) {
6223 unsigned int mtu = mtus[i];
6224 unsigned int log2 = fls(mtu);
6225
6226 if (!(mtu & ((1 << log2) >> 2))) /* round */
6227 log2--;
6228 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6229 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6230
6231 for (w = 0; w < NCCTRL_WIN; ++w) {
6232 unsigned int inc;
6233
6234 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6235 CC_MIN_INCR);
6236
6237 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6238 (w << 16) | (beta[w] << 13) | inc);
6239 }
6240 }
6241 }
6242
6243 /*
6244 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6245 * clocks. The formula is
6246 *
6247 * bytes/s = bytes256 * 256 * ClkFreq / 4096
6248 *
6249 * which is equivalent to
6250 *
6251 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6252 */
chan_rate(struct adapter * adap,unsigned int bytes256)6253 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6254 {
6255 u64 v = bytes256 * adap->params.vpd.cclk;
6256
6257 return v * 62 + v / 2;
6258 }
6259
6260 /**
6261 * t4_get_chan_txrate - get the current per channel Tx rates
6262 * @adap: the adapter
6263 * @nic_rate: rates for NIC traffic
6264 * @ofld_rate: rates for offloaded traffic
6265 *
6266 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
6267 * for each channel.
6268 */
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)6269 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6270 {
6271 u32 v;
6272
6273 v = t4_read_reg(adap, A_TP_TX_TRATE);
6274 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6275 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6276 if (adap->params.arch.nchan == NCHAN) {
6277 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6278 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6279 }
6280
6281 v = t4_read_reg(adap, A_TP_TX_ORATE);
6282 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6283 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6284 if (adap->params.arch.nchan == NCHAN) {
6285 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6286 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6287 }
6288 }
6289
6290 /**
6291 * t4_set_trace_filter - configure one of the tracing filters
6292 * @adap: the adapter
6293 * @tp: the desired trace filter parameters
6294 * @idx: which filter to configure
6295 * @enable: whether to enable or disable the filter
6296 *
6297 * Configures one of the tracing filters available in HW. If @enable is
6298 * %0 @tp is not examined and may be %NULL. The user is responsible to
6299 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6300 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6301 * docs/readme.txt for a complete description of how to setup traceing on
6302 * T4.
6303 */
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)6304 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6305 int enable)
6306 {
6307 int i, ofst = idx * 4;
6308 u32 data_reg, mask_reg, cfg;
6309
6310 if (!enable) {
6311 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6312 return 0;
6313 }
6314
6315 /*
6316 * TODO - After T4 data book is updated, specify the exact
6317 * section below.
6318 *
6319 * See T4 data book - MPS section for a complete description
6320 * of the below if..else handling of A_MPS_TRC_CFG register
6321 * value.
6322 */
6323 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6324 if (cfg & F_TRCMULTIFILTER) {
6325 /*
6326 * If multiple tracers are enabled, then maximum
6327 * capture size is 2.5KB (FIFO size of a single channel)
6328 * minus 2 flits for CPL_TRACE_PKT header.
6329 */
6330 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6331 return -EINVAL;
6332 }
6333 else {
6334 /*
6335 * If multiple tracers are disabled, to avoid deadlocks
6336 * maximum packet capture size of 9600 bytes is recommended.
6337 * Also in this mode, only trace0 can be enabled and running.
6338 */
6339 if (tp->snap_len > 9600 || idx)
6340 return -EINVAL;
6341 }
6342
6343 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6344 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6345 tp->min_len > M_TFMINPKTSIZE)
6346 return -EINVAL;
6347
6348 /* stop the tracer we'll be changing */
6349 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6350
6351 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6352 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6353 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6354
6355 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6356 t4_write_reg(adap, data_reg, tp->data[i]);
6357 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6358 }
6359 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6360 V_TFCAPTUREMAX(tp->snap_len) |
6361 V_TFMINPKTSIZE(tp->min_len));
6362 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6363 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6364 (is_t4(adap->params.chip) ?
6365 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6366 V_T5_TFPORT(tp->port) | F_T5_TFEN |
6367 V_T5_TFINVERTMATCH(tp->invert)));
6368
6369 return 0;
6370 }
6371
6372 /**
6373 * t4_get_trace_filter - query one of the tracing filters
6374 * @adap: the adapter
6375 * @tp: the current trace filter parameters
6376 * @idx: which trace filter to query
6377 * @enabled: non-zero if the filter is enabled
6378 *
6379 * Returns the current settings of one of the HW tracing filters.
6380 */
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)6381 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6382 int *enabled)
6383 {
6384 u32 ctla, ctlb;
6385 int i, ofst = idx * 4;
6386 u32 data_reg, mask_reg;
6387
6388 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6389 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6390
6391 if (is_t4(adap->params.chip)) {
6392 *enabled = !!(ctla & F_TFEN);
6393 tp->port = G_TFPORT(ctla);
6394 tp->invert = !!(ctla & F_TFINVERTMATCH);
6395 } else {
6396 *enabled = !!(ctla & F_T5_TFEN);
6397 tp->port = G_T5_TFPORT(ctla);
6398 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6399 }
6400 tp->snap_len = G_TFCAPTUREMAX(ctlb);
6401 tp->min_len = G_TFMINPKTSIZE(ctlb);
6402 tp->skip_ofst = G_TFOFFSET(ctla);
6403 tp->skip_len = G_TFLENGTH(ctla);
6404
6405 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6406 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6407 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6408
6409 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6410 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6411 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6412 }
6413 }
6414
6415 /**
6416 * t4_read_tcb - read a hardware TCP Control Block structure
6417 * @adap: the adapter
6418 * @win: PCI-E Memory Window to use
6419 * @tid: the TCB ID
6420 * @tcb: the buffer to return the TCB in
6421 *
6422 * Reads the indicated hardware TCP Control Block and returns it in
6423 * the supplied buffer. Returns 0 on success.
6424 */
t4_read_tcb(struct adapter * adap,int win,int tid,u32 tcb[TCB_SIZE/4])6425 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6426 {
6427 u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6428 u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6429 __be32 raw_tcb[TCB_SIZE/4];
6430 int ret, word;
6431
6432 ret = t4_memory_rw_addr(adap, win,
6433 tcb_addr, sizeof raw_tcb, raw_tcb,
6434 T4_MEMORY_READ);
6435 if (ret)
6436 return ret;
6437
6438 for (word = 0; word < 32; word++)
6439 tcb[word] = be32_to_cpu(raw_tcb[word]);
6440 return 0;
6441 }
6442
6443 /**
6444 * t4_pmtx_get_stats - returns the HW stats from PMTX
6445 * @adap: the adapter
6446 * @cnt: where to store the count statistics
6447 * @cycles: where to store the cycle statistics
6448 *
6449 * Returns performance statistics from PMTX.
6450 */
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6451 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6452 {
6453 int i;
6454 u32 data[2];
6455
6456 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6457 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6458 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6459 if (is_t4(adap->params.chip)) {
6460 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6461 } else {
6462 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6463 A_PM_TX_DBG_DATA, data, 2,
6464 A_PM_TX_DBG_STAT_MSB);
6465 cycles[i] = (((u64)data[0] << 32) | data[1]);
6466 }
6467 }
6468 }
6469
6470 /**
6471 * t4_pmrx_get_stats - returns the HW stats from PMRX
6472 * @adap: the adapter
6473 * @cnt: where to store the count statistics
6474 * @cycles: where to store the cycle statistics
6475 *
6476 * Returns performance statistics from PMRX.
6477 */
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6478 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6479 {
6480 int i;
6481 u32 data[2];
6482
6483 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6484 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6485 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6486 if (is_t4(adap->params.chip)) {
6487 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6488 } else {
6489 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6490 A_PM_RX_DBG_DATA, data, 2,
6491 A_PM_RX_DBG_STAT_MSB);
6492 cycles[i] = (((u64)data[0] << 32) | data[1]);
6493 }
6494 }
6495 }
6496
6497 /**
6498 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6499 * @adapter: the adapter
6500 * @pidx: the port index
6501 *
6502 * Compuytes and returns a bitmap indicating which MPS buffer groups are
6503 * associated with the given Port. Bit i is set if buffer group i is
6504 * used by the Port.
6505 */
compute_mps_bg_map(struct adapter * adapter,int pidx)6506 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6507 int pidx)
6508 {
6509 unsigned int chip_version, nports;
6510
6511 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6512 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6513
6514 switch (chip_version) {
6515 case CHELSIO_T4:
6516 case CHELSIO_T5:
6517 switch (nports) {
6518 case 1: return 0xf;
6519 case 2: return 3 << (2 * pidx);
6520 case 4: return 1 << pidx;
6521 }
6522 break;
6523
6524 case CHELSIO_T6:
6525 switch (nports) {
6526 case 2: return 1 << (2 * pidx);
6527 }
6528 break;
6529 }
6530
6531 CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6532 chip_version, nports);
6533
6534 return 0;
6535 }
6536
6537 /**
6538 * t4_get_mps_bg_map - return the buffer groups associated with a port
6539 * @adapter: the adapter
6540 * @pidx: the port index
6541 *
6542 * Returns a bitmap indicating which MPS buffer groups are associated
6543 * with the given Port. Bit i is set if buffer group i is used by the
6544 * Port.
6545 */
t4_get_mps_bg_map(struct adapter * adapter,int pidx)6546 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6547 {
6548 u8 *mps_bg_map;
6549 unsigned int nports;
6550
6551 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6552 if (pidx >= nports) {
6553 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6554 return 0;
6555 }
6556
6557 /* If we've already retrieved/computed this, just return the result.
6558 */
6559 mps_bg_map = adapter->params.mps_bg_map;
6560 if (mps_bg_map[pidx])
6561 return mps_bg_map[pidx];
6562
6563 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6564 * If we're talking to such Firmware, let it tell us. If the new
6565 * API isn't supported, revert back to old hardcoded way. The value
6566 * obtained from Firmware is encoded in below format:
6567 *
6568 * val = (( MPSBGMAP[Port 3] << 24 ) |
6569 * ( MPSBGMAP[Port 2] << 16 ) |
6570 * ( MPSBGMAP[Port 1] << 8 ) |
6571 * ( MPSBGMAP[Port 0] << 0 ))
6572 */
6573 if (adapter->flags & FW_OK) {
6574 u32 param, val;
6575 int ret;
6576
6577 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6578 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6579 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6580 0, 1, ¶m, &val);
6581 if (!ret) {
6582 int p;
6583
6584 /* Store the BG Map for all of the Ports in order to
6585 * avoid more calls to the Firmware in the future.
6586 */
6587 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6588 mps_bg_map[p] = val & 0xff;
6589
6590 return mps_bg_map[pidx];
6591 }
6592 }
6593
6594 /* Either we're not talking to the Firmware or we're dealing with
6595 * older Firmware which doesn't support the new API to get the MPS
6596 * Buffer Group Map. Fall back to computing it ourselves.
6597 */
6598 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6599 return mps_bg_map[pidx];
6600 }
6601
6602 /**
6603 * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6604 * @adapter: the adapter
6605 * @pidx: the port index
6606 */
t4_get_tp_e2c_map(struct adapter * adapter,int pidx)6607 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6608 {
6609 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6610 u32 param, val = 0;
6611 int ret;
6612
6613 if (pidx >= nports) {
6614 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6615 return 0;
6616 }
6617
6618 /* FW version >= 1.16.44.0 can determine E2C channel map using
6619 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6620 */
6621 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6622 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6623 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6624 0, 1, ¶m, &val);
6625 if (!ret)
6626 return (val >> (8*pidx)) & 0xff;
6627
6628 return 0;
6629 }
6630
6631 /**
6632 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6633 * @adapter: the adapter
6634 * @pidx: the port index
6635 *
6636 * Returns a bitmap indicating which TP Ingress Channels are associated with
6637 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
6638 */
t4_get_tp_ch_map(struct adapter * adapter,int pidx)6639 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6640 {
6641 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6642 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6643
6644 if (pidx >= nports) {
6645 CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6646 return 0;
6647 }
6648
6649 switch (chip_version) {
6650 case CHELSIO_T4:
6651 case CHELSIO_T5:
6652 /*
6653 * Note that this happens to be the same values as the MPS
6654 * Buffer Group Map for these Chips. But we replicate the code
6655 * here because they're really separate concepts.
6656 */
6657 switch (nports) {
6658 case 1: return 0xf;
6659 case 2: return 3 << (2 * pidx);
6660 case 4: return 1 << pidx;
6661 }
6662 break;
6663
6664 case CHELSIO_T6:
6665 switch (nports) {
6666 case 1: return 1 << pidx;
6667 case 2: return 1 << pidx;
6668 }
6669 break;
6670 }
6671
6672 CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6673 chip_version, nports);
6674 return 0;
6675 }
6676
6677 /**
6678 * t4_get_port_type_description - return Port Type string description
6679 * @port_type: firmware Port Type enumeration
6680 */
t4_get_port_type_description(enum fw_port_type port_type)6681 const char *t4_get_port_type_description(enum fw_port_type port_type)
6682 {
6683 static const char *const port_type_description[] = {
6684 "Fiber_XFI",
6685 "Fiber_XAUI",
6686 "BT_SGMII",
6687 "BT_XFI",
6688 "BT_XAUI",
6689 "KX4",
6690 "CX4",
6691 "KX",
6692 "KR",
6693 "SFP",
6694 "BP_AP",
6695 "BP4_AP",
6696 "QSFP_10G",
6697 "QSA",
6698 "QSFP",
6699 "BP40_BA",
6700 "KR4_100G",
6701 "CR4_QSFP",
6702 "CR_QSFP",
6703 "CR2_QSFP",
6704 "SFP28",
6705 "KR_SFP28",
6706 "KR_XLAUI",
6707 };
6708
6709 if (port_type < ARRAY_SIZE(port_type_description))
6710 return port_type_description[port_type];
6711 return "UNKNOWN";
6712 }
6713
6714 /**
6715 * t4_get_port_stats_offset - collect port stats relative to a previous
6716 * snapshot
6717 * @adap: The adapter
6718 * @idx: The port
6719 * @stats: Current stats to fill
6720 * @offset: Previous stats snapshot
6721 */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)6722 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6723 struct port_stats *stats,
6724 struct port_stats *offset)
6725 {
6726 u64 *s, *o;
6727 int i;
6728
6729 t4_get_port_stats(adap, idx, stats);
6730 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6731 i < (sizeof(struct port_stats)/sizeof(u64)) ;
6732 i++, s++, o++)
6733 *s -= *o;
6734 }
6735
6736 /**
6737 * t4_get_port_stats - collect port statistics
6738 * @adap: the adapter
6739 * @idx: the port index
6740 * @p: the stats structure to fill
6741 *
6742 * Collect statistics related to the given port from HW.
6743 */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)6744 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6745 {
6746 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6747 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6748
6749 #define GET_STAT(name) \
6750 t4_read_reg64(adap, \
6751 (is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6752 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6753 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6754
6755 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6756 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6757 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6758 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6759 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6760 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6761 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6762 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6763 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6764 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6765 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6766 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6767 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6768 p->tx_drop = GET_STAT(TX_PORT_DROP);
6769 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6770 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6771 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6772 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6773 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6774 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6775 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6776 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6777 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6778
6779 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6780 if (stat_ctl & F_COUNTPAUSESTATTX)
6781 p->tx_frames_64 -= p->tx_pause;
6782 if (stat_ctl & F_COUNTPAUSEMCTX)
6783 p->tx_mcast_frames -= p->tx_pause;
6784 }
6785
6786 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6787 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6788 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6789 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6790 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6791 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6792 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6793 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6794 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6795 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6796 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6797 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6798 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6799 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6800 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6801 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6802 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6803 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6804 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6805 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6806 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6807 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6808 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6809 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6810 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6811 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6812 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6813
6814 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6815 if (stat_ctl & F_COUNTPAUSESTATRX)
6816 p->rx_frames_64 -= p->rx_pause;
6817 if (stat_ctl & F_COUNTPAUSEMCRX)
6818 p->rx_mcast_frames -= p->rx_pause;
6819 }
6820
6821 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6822 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6823 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6824 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6825 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6826 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6827 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6828 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6829
6830 #undef GET_STAT
6831 #undef GET_STAT_COM
6832 }
6833
6834 /**
6835 * t4_get_lb_stats - collect loopback port statistics
6836 * @adap: the adapter
6837 * @idx: the loopback port index
6838 * @p: the stats structure to fill
6839 *
6840 * Return HW statistics for the given loopback port.
6841 */
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)6842 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6843 {
6844 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6845
6846 #define GET_STAT(name) \
6847 t4_read_reg64(adap, \
6848 (is_t4(adap->params.chip) ? \
6849 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6850 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6851 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6852
6853 p->octets = GET_STAT(BYTES);
6854 p->frames = GET_STAT(FRAMES);
6855 p->bcast_frames = GET_STAT(BCAST);
6856 p->mcast_frames = GET_STAT(MCAST);
6857 p->ucast_frames = GET_STAT(UCAST);
6858 p->error_frames = GET_STAT(ERROR);
6859
6860 p->frames_64 = GET_STAT(64B);
6861 p->frames_65_127 = GET_STAT(65B_127B);
6862 p->frames_128_255 = GET_STAT(128B_255B);
6863 p->frames_256_511 = GET_STAT(256B_511B);
6864 p->frames_512_1023 = GET_STAT(512B_1023B);
6865 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6866 p->frames_1519_max = GET_STAT(1519B_MAX);
6867 p->drop = GET_STAT(DROP_FRAMES);
6868
6869 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6870 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6871 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6872 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6873 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6874 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6875 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6876 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6877
6878 #undef GET_STAT
6879 #undef GET_STAT_COM
6880 }
6881
6882 /* t4_mk_filtdelwr - create a delete filter WR
6883 * @ftid: the filter ID
6884 * @wr: the filter work request to populate
6885 * @rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
6886 * @qid: ingress queue to receive the delete notification
6887 *
6888 * Creates a filter work request to delete the supplied filter. If @qid
6889 * is negative the delete notification is suppressed.
6890 */
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int rqtype,int qid)6891 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
6892 int rqtype, int qid)
6893 {
6894 memset(wr, 0, sizeof(*wr));
6895 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6896 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6897 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6898 V_FW_FILTER_WR_RQTYPE(rqtype) |
6899 V_FW_FILTER_WR_NOREPLY(qid < 0));
6900 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6901 if (qid >= 0)
6902 wr->rx_chan_rx_rpl_iq =
6903 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6904 }
6905
6906 #define INIT_CMD(var, cmd, rd_wr) do { \
6907 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6908 F_FW_CMD_REQUEST | \
6909 F_FW_CMD_##rd_wr); \
6910 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6911 } while (0)
6912
t4_fwaddrspace_write(struct adapter * adap,unsigned int mbox,u32 addr,u32 val)6913 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6914 u32 addr, u32 val)
6915 {
6916 u32 ldst_addrspace;
6917 struct fw_ldst_cmd c;
6918
6919 memset(&c, 0, sizeof(c));
6920 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6921 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6922 F_FW_CMD_REQUEST |
6923 F_FW_CMD_WRITE |
6924 ldst_addrspace);
6925 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6926 c.u.addrval.addr = cpu_to_be32(addr);
6927 c.u.addrval.val = cpu_to_be32(val);
6928
6929 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6930 }
6931
6932 /**
6933 * t4_mdio_rd - read a PHY register through MDIO
6934 * @adap: the adapter
6935 * @mbox: mailbox to use for the FW command
6936 * @phy_addr: the PHY address
6937 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6938 * @reg: the register to read
6939 * @valp: where to store the value
6940 *
6941 * Issues a FW command through the given mailbox to read a PHY register.
6942 */
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int * valp)6943 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6944 unsigned int mmd, unsigned int reg, unsigned int *valp)
6945 {
6946 int ret;
6947 u32 ldst_addrspace;
6948 struct fw_ldst_cmd c;
6949
6950 memset(&c, 0, sizeof(c));
6951 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6952 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6953 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6954 ldst_addrspace);
6955 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6956 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6957 V_FW_LDST_CMD_MMD(mmd));
6958 c.u.mdio.raddr = cpu_to_be16(reg);
6959
6960 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6961 if (ret == 0)
6962 *valp = be16_to_cpu(c.u.mdio.rval);
6963 return ret;
6964 }
6965
6966 /**
6967 * t4_mdio_wr - write a PHY register through MDIO
6968 * @adap: the adapter
6969 * @mbox: mailbox to use for the FW command
6970 * @phy_addr: the PHY address
6971 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6972 * @reg: the register to write
6973 * @valp: value to write
6974 *
6975 * Issues a FW command through the given mailbox to write a PHY register.
6976 */
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int val)6977 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6978 unsigned int mmd, unsigned int reg, unsigned int val)
6979 {
6980 u32 ldst_addrspace;
6981 struct fw_ldst_cmd c;
6982
6983 memset(&c, 0, sizeof(c));
6984 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6985 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6986 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6987 ldst_addrspace);
6988 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6989 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6990 V_FW_LDST_CMD_MMD(mmd));
6991 c.u.mdio.raddr = cpu_to_be16(reg);
6992 c.u.mdio.rval = cpu_to_be16(val);
6993
6994 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6995 }
6996
6997 /**
6998 *
6999 * t4_sge_decode_idma_state - decode the idma state
7000 * @adap: the adapter
7001 * @state: the state idma is stuck in
7002 */
t4_sge_decode_idma_state(struct adapter * adapter,int state)7003 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7004 {
7005 static const char * const t4_decode[] = {
7006 "IDMA_IDLE",
7007 "IDMA_PUSH_MORE_CPL_FIFO",
7008 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7009 "Not used",
7010 "IDMA_PHYSADDR_SEND_PCIEHDR",
7011 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7012 "IDMA_PHYSADDR_SEND_PAYLOAD",
7013 "IDMA_SEND_FIFO_TO_IMSG",
7014 "IDMA_FL_REQ_DATA_FL_PREP",
7015 "IDMA_FL_REQ_DATA_FL",
7016 "IDMA_FL_DROP",
7017 "IDMA_FL_H_REQ_HEADER_FL",
7018 "IDMA_FL_H_SEND_PCIEHDR",
7019 "IDMA_FL_H_PUSH_CPL_FIFO",
7020 "IDMA_FL_H_SEND_CPL",
7021 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7022 "IDMA_FL_H_SEND_IP_HDR",
7023 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7024 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7025 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7026 "IDMA_FL_D_SEND_PCIEHDR",
7027 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7028 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7029 "IDMA_FL_SEND_PCIEHDR",
7030 "IDMA_FL_PUSH_CPL_FIFO",
7031 "IDMA_FL_SEND_CPL",
7032 "IDMA_FL_SEND_PAYLOAD_FIRST",
7033 "IDMA_FL_SEND_PAYLOAD",
7034 "IDMA_FL_REQ_NEXT_DATA_FL",
7035 "IDMA_FL_SEND_NEXT_PCIEHDR",
7036 "IDMA_FL_SEND_PADDING",
7037 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7038 "IDMA_FL_SEND_FIFO_TO_IMSG",
7039 "IDMA_FL_REQ_DATAFL_DONE",
7040 "IDMA_FL_REQ_HEADERFL_DONE",
7041 };
7042 static const char * const t5_decode[] = {
7043 "IDMA_IDLE",
7044 "IDMA_ALMOST_IDLE",
7045 "IDMA_PUSH_MORE_CPL_FIFO",
7046 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7047 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7048 "IDMA_PHYSADDR_SEND_PCIEHDR",
7049 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7050 "IDMA_PHYSADDR_SEND_PAYLOAD",
7051 "IDMA_SEND_FIFO_TO_IMSG",
7052 "IDMA_FL_REQ_DATA_FL",
7053 "IDMA_FL_DROP",
7054 "IDMA_FL_DROP_SEND_INC",
7055 "IDMA_FL_H_REQ_HEADER_FL",
7056 "IDMA_FL_H_SEND_PCIEHDR",
7057 "IDMA_FL_H_PUSH_CPL_FIFO",
7058 "IDMA_FL_H_SEND_CPL",
7059 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7060 "IDMA_FL_H_SEND_IP_HDR",
7061 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7062 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7063 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7064 "IDMA_FL_D_SEND_PCIEHDR",
7065 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7066 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7067 "IDMA_FL_SEND_PCIEHDR",
7068 "IDMA_FL_PUSH_CPL_FIFO",
7069 "IDMA_FL_SEND_CPL",
7070 "IDMA_FL_SEND_PAYLOAD_FIRST",
7071 "IDMA_FL_SEND_PAYLOAD",
7072 "IDMA_FL_REQ_NEXT_DATA_FL",
7073 "IDMA_FL_SEND_NEXT_PCIEHDR",
7074 "IDMA_FL_SEND_PADDING",
7075 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7076 };
7077 static const char * const t6_decode[] = {
7078 "IDMA_IDLE",
7079 "IDMA_PUSH_MORE_CPL_FIFO",
7080 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7081 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7082 "IDMA_PHYSADDR_SEND_PCIEHDR",
7083 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7084 "IDMA_PHYSADDR_SEND_PAYLOAD",
7085 "IDMA_FL_REQ_DATA_FL",
7086 "IDMA_FL_DROP",
7087 "IDMA_FL_DROP_SEND_INC",
7088 "IDMA_FL_H_REQ_HEADER_FL",
7089 "IDMA_FL_H_SEND_PCIEHDR",
7090 "IDMA_FL_H_PUSH_CPL_FIFO",
7091 "IDMA_FL_H_SEND_CPL",
7092 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7093 "IDMA_FL_H_SEND_IP_HDR",
7094 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7095 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7096 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7097 "IDMA_FL_D_SEND_PCIEHDR",
7098 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7099 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7100 "IDMA_FL_SEND_PCIEHDR",
7101 "IDMA_FL_PUSH_CPL_FIFO",
7102 "IDMA_FL_SEND_CPL",
7103 "IDMA_FL_SEND_PAYLOAD_FIRST",
7104 "IDMA_FL_SEND_PAYLOAD",
7105 "IDMA_FL_REQ_NEXT_DATA_FL",
7106 "IDMA_FL_SEND_NEXT_PCIEHDR",
7107 "IDMA_FL_SEND_PADDING",
7108 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7109 };
7110 static const u32 sge_regs[] = {
7111 A_SGE_DEBUG_DATA_LOW_INDEX_2,
7112 A_SGE_DEBUG_DATA_LOW_INDEX_3,
7113 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7114 };
7115 const char **sge_idma_decode;
7116 int sge_idma_decode_nstates;
7117 int i;
7118 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7119
7120 /* Select the right set of decode strings to dump depending on the
7121 * adapter chip type.
7122 */
7123 switch (chip_version) {
7124 case CHELSIO_T4:
7125 sge_idma_decode = (const char **)t4_decode;
7126 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7127 break;
7128
7129 case CHELSIO_T5:
7130 sge_idma_decode = (const char **)t5_decode;
7131 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7132 break;
7133
7134 case CHELSIO_T6:
7135 sge_idma_decode = (const char **)t6_decode;
7136 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7137 break;
7138
7139 default:
7140 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
7141 return;
7142 }
7143
7144 if (state < sge_idma_decode_nstates)
7145 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7146 else
7147 CH_WARN(adapter, "idma state %d unknown\n", state);
7148
7149 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7150 CH_WARN(adapter, "SGE register %#x value %#x\n",
7151 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7152 }
7153
7154 /**
7155 * t4_sge_ctxt_flush - flush the SGE context cache
7156 * @adap: the adapter
7157 * @mbox: mailbox to use for the FW command
7158 *
7159 * Issues a FW command through the given mailbox to flush the
7160 * SGE context cache.
7161 */
t4_sge_ctxt_flush(struct adapter * adap,unsigned int mbox,int ctxt_type)7162 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
7163 {
7164 int ret;
7165 u32 ldst_addrspace;
7166 struct fw_ldst_cmd c;
7167
7168 memset(&c, 0, sizeof(c));
7169 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
7170 FW_LDST_ADDRSPC_SGE_EGRC :
7171 FW_LDST_ADDRSPC_SGE_INGC);
7172 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7173 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7174 ldst_addrspace);
7175 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7176 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7177
7178 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7179 return ret;
7180 }
7181
7182 /**
7183 * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
7184 * @adap - the adapter
7185 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
7186 * @dbqtimers: SGE Doorbell Queue Timer table
7187 *
7188 * Reads the SGE Doorbell Queue Timer values into the provided table.
7189 * Returns 0 on success (Firmware and Hardware support this feature),
7190 * an error on failure.
7191 */
t4_read_sge_dbqtimers(struct adapter * adap,unsigned int ndbqtimers,u16 * dbqtimers)7192 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
7193 u16 *dbqtimers)
7194 {
7195 int ret, dbqtimerix;
7196
7197 ret = 0;
7198 dbqtimerix = 0;
7199 while (dbqtimerix < ndbqtimers) {
7200 int nparams, param;
7201 u32 params[7], vals[7];
7202
7203 nparams = ndbqtimers - dbqtimerix;
7204 if (nparams > ARRAY_SIZE(params))
7205 nparams = ARRAY_SIZE(params);
7206
7207 for (param = 0; param < nparams; param++)
7208 params[param] =
7209 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7210 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
7211 V_FW_PARAMS_PARAM_Y(dbqtimerix + param));
7212 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
7213 nparams, params, vals);
7214 if (ret)
7215 break;
7216
7217 for (param = 0; param < nparams; param++)
7218 dbqtimers[dbqtimerix++] = vals[param];
7219 }
7220 return ret;
7221 }
7222
7223 /**
7224 * t4_fw_hello - establish communication with FW
7225 * @adap: the adapter
7226 * @mbox: mailbox to use for the FW command
7227 * @evt_mbox: mailbox to receive async FW events
7228 * @master: specifies the caller's willingness to be the device master
7229 * @state: returns the current device state (if non-NULL)
7230 *
7231 * Issues a command to establish communication with FW. Returns either
7232 * an error (negative integer) or the mailbox of the Master PF.
7233 */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)7234 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7235 enum dev_master master, enum dev_state *state)
7236 {
7237 int ret;
7238 struct fw_hello_cmd c;
7239 u32 v;
7240 unsigned int master_mbox;
7241 int retries = FW_CMD_HELLO_RETRIES;
7242
7243 retry:
7244 memset(&c, 0, sizeof(c));
7245 INIT_CMD(c, HELLO, WRITE);
7246 c.err_to_clearinit = cpu_to_be32(
7247 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7248 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7249 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7250 mbox : M_FW_HELLO_CMD_MBMASTER) |
7251 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7252 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7253 F_FW_HELLO_CMD_CLEARINIT);
7254
7255 /*
7256 * Issue the HELLO command to the firmware. If it's not successful
7257 * but indicates that we got a "busy" or "timeout" condition, retry
7258 * the HELLO until we exhaust our retry limit. If we do exceed our
7259 * retry limit, check to see if the firmware left us any error
7260 * information and report that if so ...
7261 */
7262 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7263 if (ret != FW_SUCCESS) {
7264 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7265 goto retry;
7266 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7267 t4_report_fw_error(adap);
7268 return ret;
7269 }
7270
7271 v = be32_to_cpu(c.err_to_clearinit);
7272 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7273 if (state) {
7274 if (v & F_FW_HELLO_CMD_ERR)
7275 *state = DEV_STATE_ERR;
7276 else if (v & F_FW_HELLO_CMD_INIT)
7277 *state = DEV_STATE_INIT;
7278 else
7279 *state = DEV_STATE_UNINIT;
7280 }
7281
7282 /*
7283 * If we're not the Master PF then we need to wait around for the
7284 * Master PF Driver to finish setting up the adapter.
7285 *
7286 * Note that we also do this wait if we're a non-Master-capable PF and
7287 * there is no current Master PF; a Master PF may show up momentarily
7288 * and we wouldn't want to fail pointlessly. (This can happen when an
7289 * OS loads lots of different drivers rapidly at the same time). In
7290 * this case, the Master PF returned by the firmware will be
7291 * M_PCIE_FW_MASTER so the test below will work ...
7292 */
7293 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7294 master_mbox != mbox) {
7295 int waiting = FW_CMD_HELLO_TIMEOUT;
7296
7297 /*
7298 * Wait for the firmware to either indicate an error or
7299 * initialized state. If we see either of these we bail out
7300 * and report the issue to the caller. If we exhaust the
7301 * "hello timeout" and we haven't exhausted our retries, try
7302 * again. Otherwise bail with a timeout error.
7303 */
7304 for (;;) {
7305 u32 pcie_fw;
7306
7307 msleep(50);
7308 waiting -= 50;
7309
7310 /*
7311 * If neither Error nor Initialialized are indicated
7312 * by the firmware keep waiting till we exaust our
7313 * timeout ... and then retry if we haven't exhausted
7314 * our retries ...
7315 */
7316 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7317 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7318 if (waiting <= 0) {
7319 if (retries-- > 0)
7320 goto retry;
7321
7322 return -ETIMEDOUT;
7323 }
7324 continue;
7325 }
7326
7327 /*
7328 * We either have an Error or Initialized condition
7329 * report errors preferentially.
7330 */
7331 if (state) {
7332 if (pcie_fw & F_PCIE_FW_ERR)
7333 *state = DEV_STATE_ERR;
7334 else if (pcie_fw & F_PCIE_FW_INIT)
7335 *state = DEV_STATE_INIT;
7336 }
7337
7338 /*
7339 * If we arrived before a Master PF was selected and
7340 * there's not a valid Master PF, grab its identity
7341 * for our caller.
7342 */
7343 if (master_mbox == M_PCIE_FW_MASTER &&
7344 (pcie_fw & F_PCIE_FW_MASTER_VLD))
7345 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7346 break;
7347 }
7348 }
7349
7350 return master_mbox;
7351 }
7352
7353 /**
7354 * t4_fw_bye - end communication with FW
7355 * @adap: the adapter
7356 * @mbox: mailbox to use for the FW command
7357 *
7358 * Issues a command to terminate communication with FW.
7359 */
t4_fw_bye(struct adapter * adap,unsigned int mbox)7360 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7361 {
7362 struct fw_bye_cmd c;
7363
7364 memset(&c, 0, sizeof(c));
7365 INIT_CMD(c, BYE, WRITE);
7366 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7367 }
7368
7369 /**
7370 * t4_fw_reset - issue a reset to FW
7371 * @adap: the adapter
7372 * @mbox: mailbox to use for the FW command
7373 * @reset: specifies the type of reset to perform
7374 *
7375 * Issues a reset command of the specified type to FW.
7376 */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)7377 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7378 {
7379 struct fw_reset_cmd c;
7380
7381 memset(&c, 0, sizeof(c));
7382 INIT_CMD(c, RESET, WRITE);
7383 c.val = cpu_to_be32(reset);
7384 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7385 }
7386
7387 /**
7388 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7389 * @adap: the adapter
7390 * @mbox: mailbox to use for the FW RESET command (if desired)
7391 * @force: force uP into RESET even if FW RESET command fails
7392 *
7393 * Issues a RESET command to firmware (if desired) with a HALT indication
7394 * and then puts the microprocessor into RESET state. The RESET command
7395 * will only be issued if a legitimate mailbox is provided (mbox <=
7396 * M_PCIE_FW_MASTER).
7397 *
7398 * This is generally used in order for the host to safely manipulate the
7399 * adapter without fear of conflicting with whatever the firmware might
7400 * be doing. The only way out of this state is to RESTART the firmware
7401 * ...
7402 */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)7403 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7404 {
7405 int ret = 0;
7406
7407 /*
7408 * If a legitimate mailbox is provided, issue a RESET command
7409 * with a HALT indication.
7410 */
7411 if (mbox <= M_PCIE_FW_MASTER) {
7412 struct fw_reset_cmd c;
7413
7414 memset(&c, 0, sizeof(c));
7415 INIT_CMD(c, RESET, WRITE);
7416 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7417 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7418 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7419 }
7420
7421 /*
7422 * Normally we won't complete the operation if the firmware RESET
7423 * command fails but if our caller insists we'll go ahead and put the
7424 * uP into RESET. This can be useful if the firmware is hung or even
7425 * missing ... We'll have to take the risk of putting the uP into
7426 * RESET without the cooperation of firmware in that case.
7427 *
7428 * We also force the firmware's HALT flag to be on in case we bypassed
7429 * the firmware RESET command above or we're dealing with old firmware
7430 * which doesn't have the HALT capability. This will serve as a flag
7431 * for the incoming firmware to know that it's coming out of a HALT
7432 * rather than a RESET ... if it's new enough to understand that ...
7433 */
7434 if (ret == 0 || force) {
7435 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7436 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7437 F_PCIE_FW_HALT);
7438 }
7439
7440 /*
7441 * And we always return the result of the firmware RESET command
7442 * even when we force the uP into RESET ...
7443 */
7444 return ret;
7445 }
7446
7447 /**
7448 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7449 * @adap: the adapter
7450 * @reset: if we want to do a RESET to restart things
7451 *
7452 * Restart firmware previously halted by t4_fw_halt(). On successful
7453 * return the previous PF Master remains as the new PF Master and there
7454 * is no need to issue a new HELLO command, etc.
7455 *
7456 * We do this in two ways:
7457 *
7458 * 1. If we're dealing with newer firmware we'll simply want to take
7459 * the chip's microprocessor out of RESET. This will cause the
7460 * firmware to start up from its start vector. And then we'll loop
7461 * until the firmware indicates it's started again (PCIE_FW.HALT
7462 * reset to 0) or we timeout.
7463 *
7464 * 2. If we're dealing with older firmware then we'll need to RESET
7465 * the chip since older firmware won't recognize the PCIE_FW.HALT
7466 * flag and automatically RESET itself on startup.
7467 */
t4_fw_restart(struct adapter * adap,unsigned int mbox,int reset)7468 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7469 {
7470 if (reset) {
7471 /*
7472 * Since we're directing the RESET instead of the firmware
7473 * doing it automatically, we need to clear the PCIE_FW.HALT
7474 * bit.
7475 */
7476 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7477
7478 /*
7479 * If we've been given a valid mailbox, first try to get the
7480 * firmware to do the RESET. If that works, great and we can
7481 * return success. Otherwise, if we haven't been given a
7482 * valid mailbox or the RESET command failed, fall back to
7483 * hitting the chip with a hammer.
7484 */
7485 if (mbox <= M_PCIE_FW_MASTER) {
7486 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7487 msleep(100);
7488 if (t4_fw_reset(adap, mbox,
7489 F_PIORST | F_PIORSTMODE) == 0)
7490 return 0;
7491 }
7492
7493 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7494 msleep(2000);
7495 } else {
7496 int ms;
7497
7498 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7499 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7500 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7501 return FW_SUCCESS;
7502 msleep(100);
7503 ms += 100;
7504 }
7505 return -ETIMEDOUT;
7506 }
7507 return 0;
7508 }
7509
7510 /**
7511 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7512 * @adap: the adapter
7513 * @mbox: mailbox to use for the FW RESET command (if desired)
7514 * @fw_data: the firmware image to write
7515 * @size: image size
7516 * @force: force upgrade even if firmware doesn't cooperate
7517 *
7518 * Perform all of the steps necessary for upgrading an adapter's
7519 * firmware image. Normally this requires the cooperation of the
7520 * existing firmware in order to halt all existing activities
7521 * but if an invalid mailbox token is passed in we skip that step
7522 * (though we'll still put the adapter microprocessor into RESET in
7523 * that case).
7524 *
7525 * On successful return the new firmware will have been loaded and
7526 * the adapter will have been fully RESET losing all previous setup
7527 * state. On unsuccessful return the adapter may be completely hosed ...
7528 * positive errno indicates that the adapter is ~probably~ intact, a
7529 * negative errno indicates that things are looking bad ...
7530 */
t4_fw_upgrade(struct adapter * adap,unsigned int mbox,const u8 * fw_data,unsigned int size,int force)7531 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7532 const u8 *fw_data, unsigned int size, int force)
7533 {
7534 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7535 unsigned int bootstrap =
7536 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7537 int reset, ret;
7538
7539 if (!t4_fw_matches_chip(adap, fw_hdr))
7540 return -EINVAL;
7541
7542 /* Disable FW_OK flags so that mbox commands with FW_OK flags check
7543 * wont be send when we are flashing FW.
7544 */
7545 adap->flags &= ~FW_OK;
7546
7547 if (!bootstrap) {
7548 ret = t4_fw_halt(adap, mbox, force);
7549 if (ret < 0 && !force)
7550 goto out;
7551 }
7552
7553 ret = t4_load_fw(adap, fw_data, size, bootstrap);
7554 if (ret < 0 || bootstrap)
7555 goto out;
7556
7557 /*
7558 * If there was a Firmware Configuration File staored in FLASH,
7559 * there's a good chance that it won't be compatible with the new
7560 * Firmware. In order to prevent difficult to diagnose adapter
7561 * initialization issues, we clear out the Firmware Configuration File
7562 * portion of the FLASH . The user will need to re-FLASH a new
7563 * Firmware Configuration File which is compatible with the new
7564 * Firmware if that's desired.
7565 */
7566 (void)t4_load_cfg(adap, NULL, 0);
7567
7568 /*
7569 * Older versions of the firmware don't understand the new
7570 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7571 * restart. So for newly loaded older firmware we'll have to do the
7572 * RESET for it so it starts up on a clean slate. We can tell if
7573 * the newly loaded firmware will handle this right by checking
7574 * its header flags to see if it advertises the capability.
7575 */
7576 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7577 ret = t4_fw_restart(adap, mbox, reset);
7578
7579 /* Grab potentially new Firmware Device Log parameters so we can see
7580 * how helthy the new Firmware is. It's okay to contact the new
7581 * Firmware for these parameters even though, as far as it's
7582 * concerned, we've never said "HELLO" to it ...
7583 */
7584 (void)t4_init_devlog_params(adap, 1);
7585
7586 out:
7587 adap->flags |= FW_OK;
7588 return ret;
7589 }
7590
7591 /**
7592 * t4_fl_pkt_align - return the fl packet alignment
7593 * @adap: the adapter
7594 * is_packed: True when the driver uses packed FLM mode
7595 *
7596 * T4 has a single field to specify the packing and padding boundary.
7597 * T5 onwards has separate fields for this and hence the alignment for
7598 * next packet offset is maximum of these two.
7599 *
7600 */
t4_fl_pkt_align(struct adapter * adap,bool is_packed)7601 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7602 {
7603 u32 sge_control, sge_control2;
7604 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7605
7606 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7607
7608 /* T4 uses a single control field to specify both the PCIe Padding and
7609 * Packing Boundary. T5 introduced the ability to specify these
7610 * separately. The actual Ingress Packet Data alignment boundary
7611 * within Packed Buffer Mode is the maximum of these two
7612 * specifications. (Note that it makes no real practical sense to
7613 * have the Pading Boudary be larger than the Packing Boundary but you
7614 * could set the chip up that way and, in fact, legacy T4 code would
7615 * end doing this because it would initialize the Padding Boundary and
7616 * leave the Packing Boundary initialized to 0 (16 bytes).)
7617 * Padding Boundary values in T6 starts from 8B,
7618 * where as it is 32B for T4 and T5.
7619 */
7620 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7621 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7622 else
7623 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7624
7625 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7626
7627 fl_align = ingpadboundary;
7628 if (!is_t4(adap->params.chip) && is_packed) {
7629 /* T5 has a weird interpretation of one of the PCIe Packing
7630 * Boundary values. No idea why ...
7631 */
7632 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7633 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7634 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7635 ingpackboundary = 16;
7636 else
7637 ingpackboundary = 1 << (ingpackboundary +
7638 X_INGPACKBOUNDARY_SHIFT);
7639
7640 fl_align = max(ingpadboundary, ingpackboundary);
7641 }
7642 return fl_align;
7643 }
7644
7645 /**
7646 * t4_fixup_host_params_compat - fix up host-dependent parameters
7647 * @adap: the adapter
7648 * @page_size: the host's Base Page Size
7649 * @cache_line_size: the host's Cache Line Size
7650 * @chip_compat: maintain compatibility with designated chip
7651 *
7652 * Various registers in the chip contain values which are dependent on the
7653 * host's Base Page and Cache Line Sizes. This function will fix all of
7654 * those registers with the appropriate values as passed in ...
7655 *
7656 * @chip_compat is used to limit the set of changes that are made
7657 * to be compatible with the indicated chip release. This is used by
7658 * drivers to maintain compatibility with chip register settings when
7659 * the drivers haven't [yet] been updated with new chip support.
7660 */
t4_fixup_host_params_compat(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size,enum chip_type chip_compat)7661 int t4_fixup_host_params_compat(struct adapter *adap,
7662 unsigned int page_size,
7663 unsigned int cache_line_size,
7664 enum chip_type chip_compat)
7665 {
7666 unsigned int page_shift = fls(page_size) - 1;
7667 unsigned int sge_hps = page_shift - 10;
7668 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7669 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7670 unsigned int fl_align_log = fls(fl_align) - 1;
7671
7672 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7673 V_HOSTPAGESIZEPF0(sge_hps) |
7674 V_HOSTPAGESIZEPF1(sge_hps) |
7675 V_HOSTPAGESIZEPF2(sge_hps) |
7676 V_HOSTPAGESIZEPF3(sge_hps) |
7677 V_HOSTPAGESIZEPF4(sge_hps) |
7678 V_HOSTPAGESIZEPF5(sge_hps) |
7679 V_HOSTPAGESIZEPF6(sge_hps) |
7680 V_HOSTPAGESIZEPF7(sge_hps));
7681
7682 if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
7683 t4_set_reg_field(adap, A_SGE_CONTROL,
7684 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7685 F_EGRSTATUSPAGESIZE,
7686 V_INGPADBOUNDARY(fl_align_log -
7687 X_INGPADBOUNDARY_SHIFT) |
7688 V_EGRSTATUSPAGESIZE(stat_len != 64));
7689 } else {
7690 unsigned int pack_align;
7691 unsigned int ingpad, ingpack;
7692 unsigned int pcie_cap;
7693
7694 /* T5 introduced the separation of the Free List Padding and
7695 * Packing Boundaries. Thus, we can select a smaller Padding
7696 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7697 * Bandwidth, and use a Packing Boundary which is large enough
7698 * to avoid false sharing between CPUs, etc.
7699 *
7700 * For the PCI Link, the smaller the Padding Boundary the
7701 * better. For the Memory Controller, a smaller Padding
7702 * Boundary is better until we cross under the Memory Line
7703 * Size (the minimum unit of transfer to/from Memory). If we
7704 * have a Padding Boundary which is smaller than the Memory
7705 * Line Size, that'll involve a Read-Modify-Write cycle on the
7706 * Memory Controller which is never good.
7707 */
7708
7709 /* We want the Packing Boundary to be based on the Cache Line
7710 * Size in order to help avoid False Sharing performance
7711 * issues between CPUs, etc. We also want the Packing
7712 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7713 * get best performance when the Packing Boundary is a
7714 * multiple of the Maximum Payload Size.
7715 */
7716 pack_align = fl_align;
7717 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
7718 if (pcie_cap) {
7719 unsigned int mps, mps_log;
7720 u16 devctl;
7721
7722 /*
7723 * The PCIe Device Control Maximum Payload Size field
7724 * [bits 7:5] encodes sizes as powers of 2 starting at
7725 * 128 bytes.
7726 */
7727 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
7728 &devctl);
7729 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7730 mps = 1 << mps_log;
7731 if (mps > pack_align)
7732 pack_align = mps;
7733 }
7734
7735 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7736 * value for the Packing Boundary. This corresponds to 16
7737 * bytes instead of the expected 32 bytes. So if we want 32
7738 * bytes, the best we can really do is 64 bytes ...
7739 */
7740 if (pack_align <= 16) {
7741 ingpack = X_INGPACKBOUNDARY_16B;
7742 fl_align = 16;
7743 } else if (pack_align == 32) {
7744 ingpack = X_INGPACKBOUNDARY_64B;
7745 fl_align = 64;
7746 } else {
7747 unsigned int pack_align_log = fls(pack_align) - 1;
7748 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
7749 fl_align = pack_align;
7750 }
7751
7752 /* Use the smallest Ingress Padding which isn't smaller than
7753 * the Memory Controller Read/Write Size. We'll take that as
7754 * being 8 bytes since we don't know of any system with a
7755 * wider Memory Controller Bus Width.
7756 */
7757 if (is_t5(adap->params.chip))
7758 ingpad = X_INGPADBOUNDARY_32B;
7759 else
7760 ingpad = X_T6_INGPADBOUNDARY_8B;
7761
7762 t4_set_reg_field(adap, A_SGE_CONTROL,
7763 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7764 F_EGRSTATUSPAGESIZE,
7765 V_INGPADBOUNDARY(ingpad) |
7766 V_EGRSTATUSPAGESIZE(stat_len != 64));
7767 t4_set_reg_field(adap, A_SGE_CONTROL2,
7768 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
7769 V_INGPACKBOUNDARY(ingpack));
7770 }
7771 /*
7772 * Adjust various SGE Free List Host Buffer Sizes.
7773 *
7774 * This is something of a crock since we're using fixed indices into
7775 * the array which are also known by the sge.c code and the T4
7776 * Firmware Configuration File. We need to come up with a much better
7777 * approach to managing this array. For now, the first four entries
7778 * are:
7779 *
7780 * 0: Host Page Size
7781 * 1: 64KB
7782 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7783 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7784 *
7785 * For the single-MTU buffers in unpacked mode we need to include
7786 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7787 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7788 * Padding boundary. All of these are accommodated in the Factory
7789 * Default Firmware Configuration File but we need to adjust it for
7790 * this host's cache line size.
7791 */
7792 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
7793 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
7794 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
7795 & ~(fl_align-1));
7796 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
7797 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
7798 & ~(fl_align-1));
7799
7800 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
7801
7802 return 0;
7803 }
7804
7805 /**
7806 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
7807 * @adap: the adapter
7808 * @page_size: the host's Base Page Size
7809 * @cache_line_size: the host's Cache Line Size
7810 *
7811 * Various registers in T4 contain values which are dependent on the
7812 * host's Base Page and Cache Line Sizes. This function will fix all of
7813 * those registers with the appropriate values as passed in ...
7814 *
7815 * This routine makes changes which are compatible with T4 chips.
7816 */
t4_fixup_host_params(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size)7817 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7818 unsigned int cache_line_size)
7819 {
7820 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
7821 T4_LAST_REV);
7822 }
7823
7824 /**
7825 * t4_fw_initialize - ask FW to initialize the device
7826 * @adap: the adapter
7827 * @mbox: mailbox to use for the FW command
7828 *
7829 * Issues a command to FW to partially initialize the device. This
7830 * performs initialization that generally doesn't depend on user input.
7831 */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)7832 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7833 {
7834 struct fw_initialize_cmd c;
7835
7836 memset(&c, 0, sizeof(c));
7837 INIT_CMD(c, INITIALIZE, WRITE);
7838 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7839 }
7840
7841 /**
7842 * t4_query_params_rw - query FW or device parameters
7843 * @adap: the adapter
7844 * @mbox: mailbox to use for the FW command
7845 * @pf: the PF
7846 * @vf: the VF
7847 * @nparams: the number of parameters
7848 * @params: the parameter names
7849 * @val: the parameter values
7850 * @rw: Write and read flag
7851 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7852 *
7853 * Reads the value of FW or device parameters. Up to 7 parameters can be
7854 * queried at once.
7855 */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw,bool sleep_ok)7856 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7857 unsigned int vf, unsigned int nparams, const u32 *params,
7858 u32 *val, int rw, bool sleep_ok)
7859 {
7860 int i, ret;
7861 struct fw_params_cmd c;
7862 __be32 *p = &c.param[0].mnem;
7863
7864 if (nparams > 7)
7865 return -EINVAL;
7866
7867 memset(&c, 0, sizeof(c));
7868 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7869 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7870 V_FW_PARAMS_CMD_PFN(pf) |
7871 V_FW_PARAMS_CMD_VFN(vf));
7872 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7873
7874 for (i = 0; i < nparams; i++) {
7875 *p++ = cpu_to_be32(*params++);
7876 if (rw)
7877 *p = cpu_to_be32(*(val + i));
7878 p++;
7879 }
7880
7881 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7882
7883 /*
7884 * We always copy back the reults, even if there's an error. We'll
7885 * get an error if any of the parameters was unknown to the Firmware,
7886 * but there will be results for the others ... (Older Firmware
7887 * stopped at the first unknown parameter; newer Firmware processes
7888 * them all and flags the unknown parameters with a return value of
7889 * ~0UL.)
7890 */
7891 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7892 *val++ = be32_to_cpu(*p);
7893
7894 return ret;
7895 }
7896
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7897 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7898 unsigned int vf, unsigned int nparams, const u32 *params,
7899 u32 *val)
7900 {
7901 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7902 true);
7903 }
7904
t4_query_params_ns(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7905 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7906 unsigned int vf, unsigned int nparams, const u32 *params,
7907 u32 *val)
7908 {
7909 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7910 false);
7911 }
7912
7913 /**
7914 * t4_set_params_timeout - sets FW or device parameters
7915 * @adap: the adapter
7916 * @mbox: mailbox to use for the FW command
7917 * @pf: the PF
7918 * @vf: the VF
7919 * @nparams: the number of parameters
7920 * @params: the parameter names
7921 * @val: the parameter values
7922 * @timeout: the timeout time
7923 *
7924 * Sets the value of FW or device parameters. Up to 7 parameters can be
7925 * specified at once.
7926 */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)7927 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7928 unsigned int pf, unsigned int vf,
7929 unsigned int nparams, const u32 *params,
7930 const u32 *val, int timeout)
7931 {
7932 struct fw_params_cmd c;
7933 __be32 *p = &c.param[0].mnem;
7934
7935 if (nparams > 7)
7936 return -EINVAL;
7937
7938 memset(&c, 0, sizeof(c));
7939 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7940 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7941 V_FW_PARAMS_CMD_PFN(pf) |
7942 V_FW_PARAMS_CMD_VFN(vf));
7943 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7944
7945 while (nparams--) {
7946 *p++ = cpu_to_be32(*params++);
7947 *p++ = cpu_to_be32(*val++);
7948 }
7949
7950 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7951 }
7952
7953 /**
7954 * t4_set_params - sets FW or device parameters
7955 * @adap: the adapter
7956 * @mbox: mailbox to use for the FW command
7957 * @pf: the PF
7958 * @vf: the VF
7959 * @nparams: the number of parameters
7960 * @params: the parameter names
7961 * @val: the parameter values
7962 *
7963 * Sets the value of FW or device parameters. Up to 7 parameters can be
7964 * specified at once.
7965 */
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)7966 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7967 unsigned int vf, unsigned int nparams, const u32 *params,
7968 const u32 *val)
7969 {
7970 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7971 FW_CMD_MAX_TIMEOUT);
7972 }
7973
7974 /**
7975 * t4_cfg_pfvf - configure PF/VF resource limits
7976 * @adap: the adapter
7977 * @mbox: mailbox to use for the FW command
7978 * @pf: the PF being configured
7979 * @vf: the VF being configured
7980 * @txq: the max number of egress queues
7981 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7982 * @rxqi: the max number of interrupt-capable ingress queues
7983 * @rxq: the max number of interruptless ingress queues
7984 * @tc: the PCI traffic class
7985 * @vi: the max number of virtual interfaces
7986 * @cmask: the channel access rights mask for the PF/VF
7987 * @pmask: the port access rights mask for the PF/VF
7988 * @nexact: the maximum number of exact MPS filters
7989 * @rcaps: read capabilities
7990 * @wxcaps: write/execute capabilities
7991 *
7992 * Configures resource limits and capabilities for a physical or virtual
7993 * function.
7994 */
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)7995 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7996 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7997 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7998 unsigned int vi, unsigned int cmask, unsigned int pmask,
7999 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
8000 {
8001 struct fw_pfvf_cmd c;
8002
8003 memset(&c, 0, sizeof(c));
8004 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
8005 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
8006 V_FW_PFVF_CMD_VFN(vf));
8007 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8008 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
8009 V_FW_PFVF_CMD_NIQ(rxq));
8010 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
8011 V_FW_PFVF_CMD_PMASK(pmask) |
8012 V_FW_PFVF_CMD_NEQ(txq));
8013 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
8014 V_FW_PFVF_CMD_NVI(vi) |
8015 V_FW_PFVF_CMD_NEXACTF(nexact));
8016 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
8017 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
8018 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
8019 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8020 }
8021
8022 /**
8023 * t4_alloc_vi_func - allocate a virtual interface
8024 * @adap: the adapter
8025 * @mbox: mailbox to use for the FW command
8026 * @port: physical port associated with the VI
8027 * @pf: the PF owning the VI
8028 * @vf: the VF owning the VI
8029 * @nmac: number of MAC addresses needed (1 to 5)
8030 * @mac: the MAC addresses of the VI
8031 * @rss_size: size of RSS table slice associated with this VI
8032 * @portfunc: which Port Application Function MAC Address is desired
8033 * @idstype: Intrusion Detection Type
8034 *
8035 * Allocates a virtual interface for the given physical port. If @mac is
8036 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
8037 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
8038 * @mac should be large enough to hold @nmac Ethernet addresses, they are
8039 * stored consecutively so the space needed is @nmac * 6 bytes.
8040 * Returns a negative error number or the non-negative VI id.
8041 */
t4_alloc_vi_func(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin,unsigned int portfunc,unsigned int idstype)8042 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
8043 unsigned int port, unsigned int pf, unsigned int vf,
8044 unsigned int nmac, u8 *mac, unsigned int *rss_size,
8045 u8 *vivld, u8 *vin,
8046 unsigned int portfunc, unsigned int idstype)
8047 {
8048 int ret;
8049 struct fw_vi_cmd c;
8050
8051 memset(&c, 0, sizeof(c));
8052 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
8053 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
8054 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
8055 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
8056 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
8057 V_FW_VI_CMD_FUNC(portfunc));
8058 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
8059 c.nmac = nmac - 1;
8060 if(!rss_size)
8061 c.norss_rsssize = F_FW_VI_CMD_NORSS;
8062
8063 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8064 if (ret)
8065 return ret;
8066
8067 if (mac) {
8068 memcpy(mac, c.mac, sizeof(c.mac));
8069 switch (nmac) {
8070 case 5:
8071 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
8072 /* FALLTHRU */
8073 case 4:
8074 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
8075 /* FALLTHRU */
8076 case 3:
8077 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
8078 /* FALLTHRU */
8079 case 2:
8080 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
8081 }
8082 }
8083 if (rss_size)
8084 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
8085
8086 if (vivld)
8087 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
8088
8089 if (vin)
8090 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
8091
8092 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
8093 }
8094
8095 /**
8096 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
8097 * @adap: the adapter
8098 * @mbox: mailbox to use for the FW command
8099 * @port: physical port associated with the VI
8100 * @pf: the PF owning the VI
8101 * @vf: the VF owning the VI
8102 * @nmac: number of MAC addresses needed (1 to 5)
8103 * @mac: the MAC addresses of the VI
8104 * @rss_size: size of RSS table slice associated with this VI
8105 *
8106 * backwards compatible and convieniance routine to allocate a Virtual
8107 * Interface with a Ethernet Port Application Function and Intrustion
8108 * Detection System disabled.
8109 */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin)8110 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
8111 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
8112 unsigned int *rss_size, u8 *vivld, u8 *vin)
8113 {
8114 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
8115 vivld, vin, FW_VI_FUNC_ETH, 0);
8116 }
8117
8118
8119 /**
8120 * t4_free_vi - free a virtual interface
8121 * @adap: the adapter
8122 * @mbox: mailbox to use for the FW command
8123 * @pf: the PF owning the VI
8124 * @vf: the VF owning the VI
8125 * @viid: virtual interface identifiler
8126 *
8127 * Free a previously allocated virtual interface.
8128 */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)8129 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8130 unsigned int vf, unsigned int viid)
8131 {
8132 struct fw_vi_cmd c;
8133
8134 memset(&c, 0, sizeof(c));
8135 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8136 F_FW_CMD_REQUEST |
8137 F_FW_CMD_EXEC |
8138 V_FW_VI_CMD_PFN(pf) |
8139 V_FW_VI_CMD_VFN(vf));
8140 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8141 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8142
8143 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8144 }
8145
8146 /**
8147 * t4_set_rxmode - set Rx properties of a virtual interface
8148 * @adap: the adapter
8149 * @mbox: mailbox to use for the FW command
8150 * @viid: the VI id
8151 * @mtu: the new MTU or -1
8152 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8153 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8154 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8155 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8156 * @sleep_ok: if true we may sleep while awaiting command completion
8157 *
8158 * Sets Rx properties of a virtual interface.
8159 */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)8160 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8161 int mtu, int promisc, int all_multi, int bcast, int vlanex,
8162 bool sleep_ok)
8163 {
8164 struct fw_vi_rxmode_cmd c;
8165
8166 /* convert to FW values */
8167 if (mtu < 0)
8168 mtu = M_FW_VI_RXMODE_CMD_MTU;
8169 if (promisc < 0)
8170 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8171 if (all_multi < 0)
8172 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8173 if (bcast < 0)
8174 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8175 if (vlanex < 0)
8176 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8177
8178 memset(&c, 0, sizeof(c));
8179 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8180 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8181 V_FW_VI_RXMODE_CMD_VIID(viid));
8182 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8183 c.mtu_to_vlanexen =
8184 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8185 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8186 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8187 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8188 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8189 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8190 }
8191
8192 /**
8193 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
8194 * @adap: the adapter
8195 * @viid: the VI id
8196 * @mac: the MAC address
8197 * @mask: the mask
8198 * @vni: the VNI id for the tunnel protocol
8199 * @vni_mask: mask for the VNI id
8200 * @dip_hit: to enable DIP match for the MPS entry
8201 * @lookup_type: MAC address for inner (1) or outer (0) header
8202 * @sleep_ok: call is allowed to sleep
8203 *
8204 * Allocates an MPS entry with specified MAC address and VNI value.
8205 *
8206 * Returns a negative error number or the allocated index for this mac.
8207 */
t4_alloc_encap_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int vni,unsigned int vni_mask,u8 dip_hit,u8 lookup_type,bool sleep_ok)8208 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
8209 const u8 *addr, const u8 *mask, unsigned int vni,
8210 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
8211 bool sleep_ok)
8212 {
8213 struct fw_vi_mac_cmd c;
8214 struct fw_vi_mac_vni *p = c.u.exact_vni;
8215 int ret = 0;
8216 u32 val;
8217
8218 memset(&c, 0, sizeof(c));
8219 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8220 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8221 V_FW_VI_MAC_CMD_VIID(viid));
8222 val = V_FW_CMD_LEN16(1) |
8223 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
8224 c.freemacs_to_len16 = cpu_to_be32(val);
8225 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8226 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8227 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8228 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
8229
8230 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
8231 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
8232 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
8233 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
8234
8235 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8236 if (ret == 0)
8237 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8238 return ret;
8239 }
8240
8241 /**
8242 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8243 * @adap: the adapter
8244 * @viid: the VI id
8245 * @mac: the MAC address
8246 * @mask: the mask
8247 * @idx: index at which to add this entry
8248 * @port_id: the port index
8249 * @lookup_type: MAC address for inner (1) or outer (0) header
8250 * @sleep_ok: call is allowed to sleep
8251 *
8252 * Adds the mac entry at the specified index using raw mac interface.
8253 *
8254 * Returns a negative error number or the allocated index for this mac.
8255 */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)8256 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8257 const u8 *addr, const u8 *mask, unsigned int idx,
8258 u8 lookup_type, u8 port_id, bool sleep_ok)
8259 {
8260 int ret = 0;
8261 struct fw_vi_mac_cmd c;
8262 struct fw_vi_mac_raw *p = &c.u.raw;
8263 u32 val;
8264
8265 memset(&c, 0, sizeof(c));
8266 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8267 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8268 V_FW_VI_MAC_CMD_VIID(viid));
8269 val = V_FW_CMD_LEN16(1) |
8270 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8271 c.freemacs_to_len16 = cpu_to_be32(val);
8272
8273 /* Specify that this is an inner mac address */
8274 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8275
8276 /* Lookup Type. Outer header: 0, Inner header: 1 */
8277 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8278 V_DATAPORTNUM(port_id));
8279 /* Lookup mask and port mask */
8280 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8281 V_DATAPORTNUM(M_DATAPORTNUM));
8282
8283 /* Copy the address and the mask */
8284 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8285 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8286
8287 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8288 if (ret == 0) {
8289 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8290 if (ret != idx)
8291 ret = -ENOMEM;
8292 }
8293
8294 return ret;
8295 }
8296
8297 /**
8298 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8299 * @adap: the adapter
8300 * @mbox: mailbox to use for the FW command
8301 * @viid: the VI id
8302 * @free: if true any existing filters for this VI id are first removed
8303 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8304 * @addr: the MAC address(es)
8305 * @idx: where to store the index of each allocated filter
8306 * @hash: pointer to hash address filter bitmap
8307 * @sleep_ok: call is allowed to sleep
8308 *
8309 * Allocates an exact-match filter for each of the supplied addresses and
8310 * sets it to the corresponding address. If @idx is not %NULL it should
8311 * have at least @naddr entries, each of which will be set to the index of
8312 * the filter allocated for the corresponding MAC address. If a filter
8313 * could not be allocated for an address its index is set to 0xffff.
8314 * If @hash is not %NULL addresses that fail to allocate an exact filter
8315 * are hashed and update the hash filter bitmap pointed at by @hash.
8316 *
8317 * Returns a negative error number or the number of filters allocated.
8318 */
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)8319 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8320 unsigned int viid, bool free, unsigned int naddr,
8321 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8322 {
8323 int offset, ret = 0;
8324 struct fw_vi_mac_cmd c;
8325 unsigned int nfilters = 0;
8326 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8327 unsigned int rem = naddr;
8328
8329 if (naddr > max_naddr)
8330 return -EINVAL;
8331
8332 for (offset = 0; offset < naddr ; /**/) {
8333 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8334 ? rem
8335 : ARRAY_SIZE(c.u.exact));
8336 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8337 u.exact[fw_naddr]), 16);
8338 struct fw_vi_mac_exact *p;
8339 int i;
8340
8341 memset(&c, 0, sizeof(c));
8342 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8343 F_FW_CMD_REQUEST |
8344 F_FW_CMD_WRITE |
8345 V_FW_CMD_EXEC(free) |
8346 V_FW_VI_MAC_CMD_VIID(viid));
8347 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8348 V_FW_CMD_LEN16(len16));
8349
8350 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8351 p->valid_to_idx =
8352 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8353 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8354 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8355 }
8356
8357 /*
8358 * It's okay if we run out of space in our MAC address arena.
8359 * Some of the addresses we submit may get stored so we need
8360 * to run through the reply to see what the results were ...
8361 */
8362 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8363 if (ret && ret != -FW_ENOMEM)
8364 break;
8365
8366 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8367 u16 index = G_FW_VI_MAC_CMD_IDX(
8368 be16_to_cpu(p->valid_to_idx));
8369
8370 if (idx)
8371 idx[offset+i] = (index >= max_naddr
8372 ? 0xffff
8373 : index);
8374 if (index < max_naddr)
8375 nfilters++;
8376 else if (hash)
8377 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8378 }
8379
8380 free = false;
8381 offset += fw_naddr;
8382 rem -= fw_naddr;
8383 }
8384
8385 if (ret == 0 || ret == -FW_ENOMEM)
8386 ret = nfilters;
8387 return ret;
8388 }
8389
8390 /**
8391 * t4_free_encap_mac_filt - frees MPS entry at given index
8392 * @adap: the adapter
8393 * @viid: the VI id
8394 * @idx: index of MPS entry to be freed
8395 * @sleep_ok: call is allowed to sleep
8396 *
8397 * Frees the MPS entry at supplied index
8398 *
8399 * Returns a negative error number or zero on success
8400 */
t4_free_encap_mac_filt(struct adapter * adap,unsigned int viid,int idx,bool sleep_ok)8401 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
8402 int idx, bool sleep_ok)
8403 {
8404 struct fw_vi_mac_exact *p;
8405 struct fw_vi_mac_cmd c;
8406 u8 addr[] = {0,0,0,0,0,0};
8407 int ret = 0;
8408 u32 exact;
8409
8410 memset(&c, 0, sizeof(c));
8411 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8412 F_FW_CMD_REQUEST |
8413 F_FW_CMD_WRITE |
8414 V_FW_CMD_EXEC(0) |
8415 V_FW_VI_MAC_CMD_VIID(viid));
8416 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
8417 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8418 exact |
8419 V_FW_CMD_LEN16(1));
8420 p = c.u.exact;
8421 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8422 V_FW_VI_MAC_CMD_IDX(idx));
8423 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8424
8425 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8426 return ret;
8427 }
8428
8429 /**
8430 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
8431 * @adap: the adapter
8432 * @viid: the VI id
8433 * @addr: the MAC address
8434 * @mask: the mask
8435 * @idx: index of the entry in mps tcam
8436 * @lookup_type: MAC address for inner (1) or outer (0) header
8437 * @port_id: the port index
8438 * @sleep_ok: call is allowed to sleep
8439 *
8440 * Removes the mac entry at the specified index using raw mac interface.
8441 *
8442 * Returns a negative error number on failure.
8443 */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)8444 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
8445 const u8 *addr, const u8 *mask, unsigned int idx,
8446 u8 lookup_type, u8 port_id, bool sleep_ok)
8447 {
8448 struct fw_vi_mac_cmd c;
8449 struct fw_vi_mac_raw *p = &c.u.raw;
8450 u32 raw;
8451
8452 memset(&c, 0, sizeof(c));
8453 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8454 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8455 V_FW_CMD_EXEC(0) |
8456 V_FW_VI_MAC_CMD_VIID(viid));
8457 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8458 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8459 raw |
8460 V_FW_CMD_LEN16(1));
8461
8462 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
8463 FW_VI_MAC_ID_BASED_FREE);
8464
8465 /* Lookup Type. Outer header: 0, Inner header: 1 */
8466 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8467 V_DATAPORTNUM(port_id));
8468 /* Lookup mask and port mask */
8469 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8470 V_DATAPORTNUM(M_DATAPORTNUM));
8471
8472 /* Copy the address and the mask */
8473 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8474 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8475
8476 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8477 }
8478
8479 /**
8480 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8481 * @adap: the adapter
8482 * @mbox: mailbox to use for the FW command
8483 * @viid: the VI id
8484 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8485 * @addr: the MAC address(es)
8486 * @sleep_ok: call is allowed to sleep
8487 *
8488 * Frees the exact-match filter for each of the supplied addresses
8489 *
8490 * Returns a negative error number or the number of filters freed.
8491 */
t4_free_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)8492 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8493 unsigned int viid, unsigned int naddr,
8494 const u8 **addr, bool sleep_ok)
8495 {
8496 int offset, ret = 0;
8497 struct fw_vi_mac_cmd c;
8498 unsigned int nfilters = 0;
8499 unsigned int max_naddr = is_t4(adap->params.chip) ?
8500 NUM_MPS_CLS_SRAM_L_INSTANCES :
8501 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8502 unsigned int rem = naddr;
8503
8504 if (naddr > max_naddr)
8505 return -EINVAL;
8506
8507 for (offset = 0; offset < (int)naddr ; /**/) {
8508 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8509 ? rem
8510 : ARRAY_SIZE(c.u.exact));
8511 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8512 u.exact[fw_naddr]), 16);
8513 struct fw_vi_mac_exact *p;
8514 int i;
8515
8516 memset(&c, 0, sizeof(c));
8517 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8518 F_FW_CMD_REQUEST |
8519 F_FW_CMD_WRITE |
8520 V_FW_CMD_EXEC(0) |
8521 V_FW_VI_MAC_CMD_VIID(viid));
8522 c.freemacs_to_len16 =
8523 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8524 V_FW_CMD_LEN16(len16));
8525
8526 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8527 p->valid_to_idx = cpu_to_be16(
8528 F_FW_VI_MAC_CMD_VALID |
8529 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8530 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8531 }
8532
8533 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8534 if (ret)
8535 break;
8536
8537 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8538 u16 index = G_FW_VI_MAC_CMD_IDX(
8539 be16_to_cpu(p->valid_to_idx));
8540
8541 if (index < max_naddr)
8542 nfilters++;
8543 }
8544
8545 offset += fw_naddr;
8546 rem -= fw_naddr;
8547 }
8548
8549 if (ret == 0)
8550 ret = nfilters;
8551 return ret;
8552 }
8553
8554 /**
8555 * t4_change_mac - modifies the exact-match filter for a MAC address
8556 * @adap: the adapter
8557 * @mbox: mailbox to use for the FW command
8558 * @viid: the VI id
8559 * @idx: index of existing filter for old value of MAC address, or -1
8560 * @addr: the new MAC address value
8561 * @persist: whether a new MAC allocation should be persistent
8562 * @add_smt: if true also add the address to the HW SMT
8563 *
8564 * Modifies an exact-match filter and sets it to the new MAC address if
8565 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
8566 * latter case the address is added persistently if @persist is %true.
8567 *
8568 * Note that in general it is not possible to modify the value of a given
8569 * filter so the generic way to modify an address filter is to free the one
8570 * being used by the old address value and allocate a new filter for the
8571 * new address value.
8572 *
8573 * Returns a negative error number or the index of the filter with the new
8574 * MAC value. Note that this index may differ from @idx.
8575 */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx)8576 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8577 int idx, const u8 *addr, bool persist, u8 *smt_idx)
8578 {
8579 /* This will add this mac address to the destination TCAM region */
8580 return t4_add_mac(adap, mbox, viid, idx, addr, persist, smt_idx, 0);
8581 }
8582
8583 /**
8584 * t4_set_addr_hash - program the MAC inexact-match hash filter
8585 * @adap: the adapter
8586 * @mbox: mailbox to use for the FW command
8587 * @viid: the VI id
8588 * @ucast: whether the hash filter should also match unicast addresses
8589 * @vec: the value to be written to the hash filter
8590 * @sleep_ok: call is allowed to sleep
8591 *
8592 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8593 */
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)8594 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8595 bool ucast, u64 vec, bool sleep_ok)
8596 {
8597 struct fw_vi_mac_cmd c;
8598 u32 val;
8599
8600 memset(&c, 0, sizeof(c));
8601 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8602 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8603 V_FW_VI_ENABLE_CMD_VIID(viid));
8604 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8605 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8606 c.freemacs_to_len16 = cpu_to_be32(val);
8607 c.u.hash.hashvec = cpu_to_be64(vec);
8608 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8609 }
8610
8611 /**
8612 * t4_enable_vi_params - enable/disable a virtual interface
8613 * @adap: the adapter
8614 * @mbox: mailbox to use for the FW command
8615 * @viid: the VI id
8616 * @rx_en: 1=enable Rx, 0=disable Rx
8617 * @tx_en: 1=enable Tx, 0=disable Tx
8618 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8619 *
8620 * Enables/disables a virtual interface. Note that setting DCB Enable
8621 * only makes sense when enabling a Virtual Interface ...
8622 */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)8623 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8624 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8625 {
8626 struct fw_vi_enable_cmd c;
8627
8628 memset(&c, 0, sizeof(c));
8629 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8630 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8631 V_FW_VI_ENABLE_CMD_VIID(viid));
8632 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8633 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8634 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8635 FW_LEN16(c));
8636 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8637 }
8638
8639 /**
8640 * t4_enable_vi - enable/disable a virtual interface
8641 * @adap: the adapter
8642 * @mbox: mailbox to use for the FW command
8643 * @viid: the VI id
8644 * @rx_en: 1=enable Rx, 0=disable Rx
8645 * @tx_en: 1=enable Tx, 0=disable Tx
8646 *
8647 * Enables/disables a virtual interface. Note that setting DCB Enable
8648 * only makes sense when enabling a Virtual Interface ...
8649 */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)8650 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8651 bool rx_en, bool tx_en)
8652 {
8653 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8654 }
8655
8656 /**
8657 * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8658 * @adap: the adapter
8659 * @mbox: mailbox to use for the FW command
8660 * @pi: the Port Information structure
8661 * @rx_en: 1=enable Rx, 0=disable Rx
8662 * @tx_en: 1=enable Tx, 0=disable Tx
8663 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8664 *
8665 * Enables/disables a Port's Virtual Interface. Note that setting DCB
8666 * Enable only makes sense when enabling a Virtual Interface ...
8667 * If the Virtual Interface enable/disable operation is successful,
8668 * we notify the OS-specific code of a potential Link Status change
8669 * via the OS Contract API t4_os_link_changed().
8670 */
t4_enable_pi_params(struct adapter * adap,unsigned int mbox,struct port_info * pi,bool rx_en,bool tx_en,bool dcb_en)8671 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8672 struct port_info *pi,
8673 bool rx_en, bool tx_en, bool dcb_en)
8674 {
8675 int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8676 rx_en, tx_en, dcb_en);
8677 if (ret)
8678 return ret;
8679 t4_os_link_changed(adap, pi->port_id,
8680 rx_en && tx_en && pi->link_cfg.link_ok);
8681 return 0;
8682 }
8683
8684 /**
8685 * t4_identify_port - identify a VI's port by blinking its LED
8686 * @adap: the adapter
8687 * @mbox: mailbox to use for the FW command
8688 * @viid: the VI id
8689 * @nblinks: how many times to blink LED at 2.5 Hz
8690 *
8691 * Identifies a VI's port by blinking its LED.
8692 */
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)8693 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8694 unsigned int nblinks)
8695 {
8696 struct fw_vi_enable_cmd c;
8697
8698 memset(&c, 0, sizeof(c));
8699 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8700 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8701 V_FW_VI_ENABLE_CMD_VIID(viid));
8702 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8703 c.blinkdur = cpu_to_be16(nblinks);
8704 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8705 }
8706
8707 /**
8708 * t4_iq_stop - stop an ingress queue and its FLs
8709 * @adap: the adapter
8710 * @mbox: mailbox to use for the FW command
8711 * @pf: the PF owning the queues
8712 * @vf: the VF owning the queues
8713 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8714 * @iqid: ingress queue id
8715 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8716 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8717 *
8718 * Stops an ingress queue and its associated FLs, if any. This causes
8719 * any current or future data/messages destined for these queues to be
8720 * tossed.
8721 */
t4_iq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8722 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8723 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8724 unsigned int fl0id, unsigned int fl1id)
8725 {
8726 struct fw_iq_cmd c;
8727
8728 memset(&c, 0, sizeof(c));
8729 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8730 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8731 V_FW_IQ_CMD_VFN(vf));
8732 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8733 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8734 c.iqid = cpu_to_be16(iqid);
8735 c.fl0id = cpu_to_be16(fl0id);
8736 c.fl1id = cpu_to_be16(fl1id);
8737 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8738 }
8739
8740 /**
8741 * t4_iq_free - free an ingress queue and its FLs
8742 * @adap: the adapter
8743 * @mbox: mailbox to use for the FW command
8744 * @pf: the PF owning the queues
8745 * @vf: the VF owning the queues
8746 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8747 * @iqid: ingress queue id
8748 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8749 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8750 *
8751 * Frees an ingress queue and its associated FLs, if any.
8752 */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8753 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8754 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8755 unsigned int fl0id, unsigned int fl1id)
8756 {
8757 struct fw_iq_cmd c;
8758
8759 memset(&c, 0, sizeof(c));
8760 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8761 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8762 V_FW_IQ_CMD_VFN(vf));
8763 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8764 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8765 c.iqid = cpu_to_be16(iqid);
8766 c.fl0id = cpu_to_be16(fl0id);
8767 c.fl1id = cpu_to_be16(fl1id);
8768 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8769 }
8770
8771 /**
8772 * t4_eth_eq_free - free an Ethernet egress queue
8773 * @adap: the adapter
8774 * @mbox: mailbox to use for the FW command
8775 * @pf: the PF owning the queue
8776 * @vf: the VF owning the queue
8777 * @eqid: egress queue id
8778 *
8779 * Frees an Ethernet egress queue.
8780 */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8781 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8782 unsigned int vf, unsigned int eqid)
8783 {
8784 struct fw_eq_eth_cmd c;
8785
8786 memset(&c, 0, sizeof(c));
8787 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8788 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8789 V_FW_EQ_ETH_CMD_PFN(pf) |
8790 V_FW_EQ_ETH_CMD_VFN(vf));
8791 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8792 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8793 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8794 }
8795
8796 /**
8797 * t4_ctrl_eq_free - free a control egress queue
8798 * @adap: the adapter
8799 * @mbox: mailbox to use for the FW command
8800 * @pf: the PF owning the queue
8801 * @vf: the VF owning the queue
8802 * @eqid: egress queue id
8803 *
8804 * Frees a control egress queue.
8805 */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8806 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8807 unsigned int vf, unsigned int eqid)
8808 {
8809 struct fw_eq_ctrl_cmd c;
8810
8811 memset(&c, 0, sizeof(c));
8812 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8813 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8814 V_FW_EQ_CTRL_CMD_PFN(pf) |
8815 V_FW_EQ_CTRL_CMD_VFN(vf));
8816 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8817 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8818 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8819 }
8820
8821 /**
8822 * t4_ofld_eq_free - free an offload egress queue
8823 * @adap: the adapter
8824 * @mbox: mailbox to use for the FW command
8825 * @pf: the PF owning the queue
8826 * @vf: the VF owning the queue
8827 * @eqid: egress queue id
8828 *
8829 * Frees a control egress queue.
8830 */
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8831 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8832 unsigned int vf, unsigned int eqid)
8833 {
8834 struct fw_eq_ofld_cmd c;
8835
8836 memset(&c, 0, sizeof(c));
8837 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8838 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8839 V_FW_EQ_OFLD_CMD_PFN(pf) |
8840 V_FW_EQ_OFLD_CMD_VFN(vf));
8841 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8842 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8843 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8844 }
8845
8846 /**
8847 * Return the highest speed set in the port capabilities, in Mb/s.
8848 */
t4_link_fwcap_to_speed(fw_port_cap32_t caps)8849 unsigned int t4_link_fwcap_to_speed(fw_port_cap32_t caps)
8850 {
8851 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8852 do { \
8853 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8854 return __speed; \
8855 } while (0)
8856
8857 TEST_SPEED_RETURN(400G, 400000);
8858 TEST_SPEED_RETURN(200G, 200000);
8859 TEST_SPEED_RETURN(100G, 100000);
8860 TEST_SPEED_RETURN(50G, 50000);
8861 TEST_SPEED_RETURN(40G, 40000);
8862 TEST_SPEED_RETURN(25G, 25000);
8863 TEST_SPEED_RETURN(10G, 10000);
8864 TEST_SPEED_RETURN(1G, 1000);
8865 TEST_SPEED_RETURN(100M, 100);
8866
8867 #undef TEST_SPEED_RETURN
8868
8869 return 0;
8870 }
8871
8872 /**
8873 * t4_link_fwcap_to_fwspeed - return highest speed in Port Capabilities
8874 * @acaps: advertised Port Capabilities
8875 *
8876 * Get the highest speed for the port from the advertised Port
8877 * Capabilities. It will be either the highest speed from the list of
8878 * speeds or whatever user has set using ethtool.
8879 */
t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps)8880 fw_port_cap32_t t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps)
8881 {
8882 #define TEST_SPEED_RETURN(__caps_speed) \
8883 do { \
8884 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8885 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8886 } while (0)
8887
8888 TEST_SPEED_RETURN(400G);
8889 TEST_SPEED_RETURN(200G);
8890 TEST_SPEED_RETURN(100G);
8891 TEST_SPEED_RETURN(50G);
8892 TEST_SPEED_RETURN(40G);
8893 TEST_SPEED_RETURN(25G);
8894 TEST_SPEED_RETURN(10G);
8895 TEST_SPEED_RETURN(1G);
8896 TEST_SPEED_RETURN(100M);
8897
8898 #undef TEST_SPEED_RETURN
8899
8900 return 0;
8901 }
8902
8903 /**
8904 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
8905 * @caps16: a 16-bit Port Capabilities value
8906 *
8907 * Returns the equivalent 32-bit Port Capabilities value.
8908 */
fwcaps16_to_caps32(fw_port_cap16_t caps16)8909 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
8910 {
8911 fw_port_cap32_t caps32 = 0;
8912
8913 #define CAP16_TO_CAP32(__cap) \
8914 do { \
8915 if (caps16 & FW_PORT_CAP_##__cap) \
8916 caps32 |= FW_PORT_CAP32_##__cap; \
8917 } while (0)
8918
8919 CAP16_TO_CAP32(SPEED_100M);
8920 CAP16_TO_CAP32(SPEED_1G);
8921 CAP16_TO_CAP32(SPEED_25G);
8922 CAP16_TO_CAP32(SPEED_10G);
8923 CAP16_TO_CAP32(SPEED_40G);
8924 CAP16_TO_CAP32(SPEED_100G);
8925 CAP16_TO_CAP32(FC_RX);
8926 CAP16_TO_CAP32(FC_TX);
8927 CAP16_TO_CAP32(ANEG);
8928 CAP16_TO_CAP32(FORCE_PAUSE);
8929 CAP16_TO_CAP32(MDIAUTO);
8930 CAP16_TO_CAP32(MDISTRAIGHT);
8931 CAP16_TO_CAP32(FEC_RS);
8932 CAP16_TO_CAP32(FEC_BASER_RS);
8933 CAP16_TO_CAP32(802_3_PAUSE);
8934 CAP16_TO_CAP32(802_3_ASM_DIR);
8935
8936 #undef CAP16_TO_CAP32
8937
8938 return caps32;
8939 }
8940
8941 /**
8942 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
8943 * @caps32: a 32-bit Port Capabilities value
8944 *
8945 * Returns the equivalent 16-bit Port Capabilities value. Note that
8946 * not all 32-bit Port Capabilities can be represented in the 16-bit
8947 * Port Capabilities and some fields/values may not make it.
8948 */
fwcaps32_to_caps16(fw_port_cap32_t caps32)8949 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
8950 {
8951 fw_port_cap16_t caps16 = 0;
8952
8953 #define CAP32_TO_CAP16(__cap) \
8954 do { \
8955 if (caps32 & FW_PORT_CAP32_##__cap) \
8956 caps16 |= FW_PORT_CAP_##__cap; \
8957 } while (0)
8958
8959 CAP32_TO_CAP16(SPEED_100M);
8960 CAP32_TO_CAP16(SPEED_1G);
8961 CAP32_TO_CAP16(SPEED_10G);
8962 CAP32_TO_CAP16(SPEED_25G);
8963 CAP32_TO_CAP16(SPEED_40G);
8964 CAP32_TO_CAP16(SPEED_100G);
8965 CAP32_TO_CAP16(FC_RX);
8966 CAP32_TO_CAP16(FC_TX);
8967 CAP32_TO_CAP16(802_3_PAUSE);
8968 CAP32_TO_CAP16(802_3_ASM_DIR);
8969 CAP32_TO_CAP16(ANEG);
8970 CAP32_TO_CAP16(FORCE_PAUSE);
8971 CAP32_TO_CAP16(MDIAUTO);
8972 CAP32_TO_CAP16(MDISTRAIGHT);
8973 CAP32_TO_CAP16(FEC_RS);
8974 CAP32_TO_CAP16(FEC_BASER_RS);
8975
8976 #undef CAP32_TO_CAP16
8977
8978 return caps16;
8979 }
8980
t4_link_set_autoneg(struct port_info * pi,u8 autoneg,fw_port_cap32_t * new_caps)8981 int t4_link_set_autoneg(struct port_info *pi, u8 autoneg,
8982 fw_port_cap32_t *new_caps)
8983 {
8984 struct link_config *lc = &pi->link_cfg;
8985 fw_port_cap32_t caps = *new_caps;
8986
8987 if (autoneg) {
8988 if (!(lc->pcaps & FW_PORT_CAP32_ANEG))
8989 return -ENOTSUP;
8990
8991 caps |= FW_PORT_CAP32_ANEG;
8992 } else {
8993 caps &= ~FW_PORT_CAP32_ANEG;
8994 }
8995
8996 caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI);
8997 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO)
8998 caps |= FW_PORT_CAP32_MDIAUTO;
8999
9000 *new_caps = caps;
9001 return 0;
9002 }
9003
t4_link_set_pause(struct port_info * pi,cc_pause_t pause,fw_port_cap32_t * new_caps)9004 int t4_link_set_pause(struct port_info *pi, cc_pause_t pause,
9005 fw_port_cap32_t *new_caps)
9006 {
9007 struct link_config *lc = &pi->link_cfg;
9008 fw_port_cap32_t caps = *new_caps;
9009
9010 caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
9011 caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
9012
9013 if ((pause & PAUSE_TX) && (pause & PAUSE_RX)) {
9014 caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX;
9015 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
9016 caps |= FW_PORT_CAP32_802_3_PAUSE;
9017 } else if (pause & PAUSE_TX) {
9018 caps |= FW_PORT_CAP32_FC_TX;
9019 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
9020 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
9021 } else if (pause & PAUSE_RX) {
9022 caps |= FW_PORT_CAP32_FC_RX;
9023 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
9024 caps |= FW_PORT_CAP32_802_3_PAUSE;
9025 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
9026 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
9027 }
9028
9029 if (!(pause & PAUSE_AUTONEG))
9030 caps |= FW_PORT_CAP32_FORCE_PAUSE;
9031
9032 *new_caps = caps;
9033 return 0;
9034 }
9035
9036 #define T4_LINK_FEC_MASK V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)
9037
t4_link_supported_speed_to_fec(u32 speed)9038 static fw_port_cap32_t t4_link_supported_speed_to_fec(u32 speed)
9039 {
9040 fw_port_cap32_t caps = 0;
9041
9042 switch (speed) {
9043 case 100000:
9044 caps |= FW_PORT_CAP32_FEC_RS;
9045 break;
9046 case 50000:
9047 caps |= FW_PORT_CAP32_FEC_BASER_RS;
9048 break;
9049 case 25000:
9050 caps |= FW_PORT_CAP32_FEC_RS |
9051 FW_PORT_CAP32_FEC_BASER_RS;
9052 break;
9053 default:
9054 break;
9055 }
9056
9057 caps |= FW_PORT_CAP32_FEC_NO_FEC;
9058 return caps;
9059 }
9060
t4_link_update_fec(struct port_info * pi,u32 max_speed,cc_fec_t fec,fw_port_cap32_t * new_caps)9061 static void t4_link_update_fec(struct port_info *pi, u32 max_speed,
9062 cc_fec_t fec, fw_port_cap32_t *new_caps)
9063 {
9064 fw_port_cap32_t caps = *new_caps;
9065
9066 caps &= ~T4_LINK_FEC_MASK;
9067 if (fec & FEC_RS) {
9068 switch (max_speed) {
9069 case 100000:
9070 case 25000:
9071 caps |= FW_PORT_CAP32_FEC_RS;
9072 break;
9073 default:
9074 CH_ERR(pi->adapter,
9075 "Ignoring unsupported RS FEC for speed %u\n",
9076 max_speed);
9077 break;
9078 }
9079 }
9080
9081 if (fec & FEC_BASER_RS) {
9082 switch (max_speed) {
9083 case 50000:
9084 case 25000:
9085 caps |= FW_PORT_CAP32_FEC_BASER_RS;
9086 break;
9087 default:
9088 CH_ERR(pi->adapter,
9089 "Ignoring unsupported BASER FEC for speed %u\n",
9090 max_speed);
9091 break;
9092 }
9093 }
9094
9095 if (fec & FEC_NONE)
9096 caps |= FW_PORT_CAP32_FEC_NO_FEC;
9097
9098 if (!(caps & T4_LINK_FEC_MASK)) {
9099 /* No explicit encoding is requested.
9100 * So, default back to AUTO.
9101 */
9102 caps |= t4_link_supported_speed_to_fec(max_speed);
9103 caps &= ~FW_PORT_CAP32_FORCE_FEC;
9104 }
9105
9106 if (fec & FEC_FORCE)
9107 caps |= FW_PORT_CAP32_FORCE_FEC;
9108
9109 *new_caps = caps;
9110 }
9111
t4_link_set_fec(struct port_info * pi,cc_fec_t fec,fw_port_cap32_t * new_caps)9112 int t4_link_set_fec(struct port_info *pi, cc_fec_t fec,
9113 fw_port_cap32_t *new_caps)
9114 {
9115 struct link_config *lc = &pi->link_cfg;
9116 u32 max_speed;
9117
9118 if (!(lc->pcaps & T4_LINK_FEC_MASK))
9119 return -ENOTSUP;
9120
9121 max_speed = t4_link_fwcap_to_speed(lc->link_caps);
9122 /* Link might be down. In that case consider the max
9123 * speed advertised
9124 */
9125 if (!max_speed)
9126 max_speed = t4_link_fwcap_to_speed(lc->acaps);
9127
9128 t4_link_update_fec(pi, max_speed, fec, new_caps);
9129 return 0;
9130 }
9131
9132 #define T4_LINK_SPEED_MASK V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED)
9133
t4_link_set_speed(struct port_info * pi,fw_port_cap32_t speed,u8 en,fw_port_cap32_t * new_caps)9134 int t4_link_set_speed(struct port_info *pi, fw_port_cap32_t speed, u8 en,
9135 fw_port_cap32_t *new_caps)
9136 {
9137 fw_port_cap32_t tcaps, caps = *new_caps;
9138 struct link_config *lc = &pi->link_cfg;
9139
9140 if (((lc->pcaps & T4_LINK_SPEED_MASK) & speed) != speed)
9141 return -ENOTSUP;
9142
9143 if (en)
9144 caps |= speed;
9145 else
9146 caps &= ~speed;
9147
9148 /* If no speeds are left, then pick the next highest speed. */
9149 if (!(caps & T4_LINK_SPEED_MASK)) {
9150 tcaps = CAP32_SPEED(lc->pcaps);
9151 tcaps &= ~speed;
9152 tcaps &= (speed - 1);
9153 if (tcaps == 0)
9154 return -EINVAL;
9155
9156 caps |= t4_link_fwcap_to_fwspeed(tcaps);
9157 }
9158
9159 *new_caps = caps;
9160 return 0;
9161 }
9162
t4_link_sanitize_speed_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9163 static void t4_link_sanitize_speed_caps(struct link_config *lc,
9164 fw_port_cap32_t *new_caps)
9165 {
9166 fw_port_cap32_t tcaps, caps = *new_caps;
9167
9168 /* Sanitize Speeds when AN is disabled */
9169 if (!(caps & FW_PORT_CAP32_ANEG)) {
9170 tcaps = CAP32_SPEED(caps);
9171 caps &= ~T4_LINK_SPEED_MASK;
9172 caps |= t4_link_fwcap_to_fwspeed(tcaps);
9173 }
9174
9175 *new_caps = caps;
9176 }
9177
t4_link_sanitize_fec_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9178 static void t4_link_sanitize_fec_caps(struct link_config *lc,
9179 fw_port_cap32_t *new_caps)
9180 {
9181 fw_port_cap32_t tcaps, caps = *new_caps;
9182 u32 max_speed;
9183
9184 /* Sanitize FECs when supported */
9185 if (CAP32_FEC(lc->pcaps)) {
9186 max_speed = t4_link_fwcap_to_speed(caps);
9187 tcaps = t4_link_supported_speed_to_fec(max_speed);
9188 if (caps & FW_PORT_CAP32_FORCE_FEC) {
9189 /* If the current chosen FEC params are
9190 * completely invalid, then disable FEC.
9191 * Else, pick only the FECs requested
9192 * by user or the defaults supported by
9193 * the speed.
9194 */
9195 if (!(tcaps & CAP32_FEC(caps)))
9196 tcaps = FW_PORT_CAP32_FEC_NO_FEC;
9197 else
9198 tcaps &= CAP32_FEC(caps);
9199 }
9200 } else {
9201 /* Always force NO_FEC when FECs are not supported */
9202 tcaps = FW_PORT_CAP32_FEC_NO_FEC;
9203 }
9204
9205 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) {
9206 tcaps |= FW_PORT_CAP32_FORCE_FEC;
9207 } else {
9208 /* Older firmware doesn't allow driver to send request
9209 * to try multiple FECs for FEC_AUTO case. So, clear
9210 * the FEC caps for FEC_AUTO case because the older
9211 * firmware will try all supported FECs on its own.
9212 */
9213 caps &= ~FW_PORT_CAP32_FORCE_FEC;
9214 if (tcaps & (tcaps - 1))
9215 tcaps = 0;
9216 }
9217
9218 caps &= ~T4_LINK_FEC_MASK;
9219 caps |= tcaps;
9220
9221 *new_caps = caps;
9222 }
9223
t4_link_sanitize_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9224 static void t4_link_sanitize_caps(struct link_config *lc,
9225 fw_port_cap32_t *new_caps)
9226 {
9227 fw_port_cap32_t caps = *new_caps;
9228
9229 t4_link_sanitize_speed_caps(lc, &caps);
9230 t4_link_sanitize_fec_caps(lc, &caps);
9231
9232 /* Remove all unsupported caps */
9233 if ((lc->pcaps | caps) != lc->pcaps)
9234 caps &= lc->pcaps;
9235
9236 *new_caps = caps;
9237 }
9238
9239 /**
9240 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
9241 * @adapter: the adapter
9242 * @mbox: the Firmware Mailbox to use
9243 * @port: the Port ID
9244 * @lc: the Port's Link Configuration
9245 * @rcap: new link configuration
9246 * @sleep_ok: if true we may sleep while awaiting command completion
9247 * @timeout: time to wait for command to finish before timing out
9248 * (negative implies @sleep_ok=false)
9249 *
9250 * Set up a port's MAC and PHY according to a desired link configuration.
9251 * - If the PHY can auto-negotiate first decide what to advertise, then
9252 * enable/disable auto-negotiation as desired, and reset.
9253 * - If the PHY does not auto-negotiate just reset it.
9254 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
9255 * otherwise do it later based on the outcome of auto-negotiation.
9256 */
t4_link_l1cfg_core(struct adapter * adapter,unsigned int mbox,unsigned int port,struct link_config * lc,fw_port_cap32_t rcap,bool sleep_ok,int timeout)9257 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
9258 unsigned int port, struct link_config *lc,
9259 fw_port_cap32_t rcap, bool sleep_ok, int timeout)
9260 {
9261 unsigned int fw_caps = adapter->params.fw_caps_support;
9262 struct fw_port_cmd cmd;
9263 int ret;
9264
9265 t4_link_sanitize_caps(lc, &rcap);
9266
9267 memset(&cmd, 0, sizeof(cmd));
9268 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9269 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9270 V_FW_PORT_CMD_PORTID(port));
9271 cmd.action_to_len16 =
9272 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9273 ? FW_PORT_ACTION_L1_CFG
9274 : FW_PORT_ACTION_L1_CFG32) |
9275 FW_LEN16(cmd));
9276 if (fw_caps == FW_CAPS16)
9277 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
9278 else
9279 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
9280 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
9281 sleep_ok, timeout);
9282
9283 /* Unfortunately, even if the Requested Port Capabilities "fit" within
9284 * the Physical Port Capabilities, some combinations of features may
9285 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward
9286 * Error Correction. So if the Firmware rejects the L1 Configure
9287 * request, flag that here.
9288 */
9289 if (ret) {
9290 CH_ERR(adapter,
9291 "Requested Port Capabilities 0x%x rejected, error %d\n",
9292 rcap, -ret);
9293 return ret;
9294 }
9295
9296 return 0;
9297 }
9298
9299 /**
9300 * t4_restart_aneg - restart autonegotiation
9301 * @adap: the adapter
9302 * @mbox: mbox to use for the FW command
9303 * @port: the port id
9304 *
9305 * Restarts autonegotiation for the selected port.
9306 */
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)9307 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
9308 {
9309 unsigned int fw_caps = adap->params.fw_caps_support;
9310 struct fw_port_cmd c;
9311
9312 memset(&c, 0, sizeof(c));
9313 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9314 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9315 V_FW_PORT_CMD_PORTID(port));
9316 c.action_to_len16 =
9317 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9318 ? FW_PORT_ACTION_L1_CFG
9319 : FW_PORT_ACTION_L1_CFG32) |
9320 FW_LEN16(c));
9321 if (fw_caps == FW_CAPS16)
9322 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
9323 else
9324 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
9325 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9326 }
9327
9328 /**
9329 * t4_init_link_config - initialize a link's SW state
9330 * @pi: the port info
9331 * @pcaps: link Port Capabilities
9332 * @acaps: link current Advertised Port Capabilities
9333 *
9334 * Initializes the SW state maintained for each link, including the link's
9335 * capabilities and default speed/flow-control/autonegotiation settings.
9336 */
t4_init_link_config(struct port_info * pi,fw_port_cap32_t pcaps,fw_port_cap32_t acaps)9337 static void t4_init_link_config(struct port_info *pi, fw_port_cap32_t pcaps,
9338 fw_port_cap32_t acaps)
9339 {
9340 u32 max_speed = t4_link_fwcap_to_speed(acaps);
9341 struct link_config *lc = &pi->link_cfg;
9342 fw_port_cap32_t new_caps = acaps;
9343
9344 /* If initializing for the first time or if port module changed,
9345 * then overwrite the saved link params with the new port module
9346 * caps.
9347 */
9348 if (lc->admin_caps == 0 || lc->pcaps != pcaps) {
9349 t4_link_update_fec(pi, max_speed, FEC_AUTO, &new_caps);
9350 lc->admin_caps = new_caps;
9351 }
9352
9353 lc->pcaps = pcaps;
9354 lc->acaps = acaps;
9355 lc->lpacaps = 0;
9356 lc->link_caps = 0;
9357 }
9358
9359 /**
9360 * t4_link_down_rc_str - return a string for a Link Down Reason Code
9361 * @link_down_rc: Link Down Reason Code
9362 *
9363 * Returns a string representation of the Link Down Reason Code.
9364 */
t4_link_down_rc_str(unsigned char link_down_rc)9365 const char *t4_link_down_rc_str(unsigned char link_down_rc)
9366 {
9367 static const char * const reason[] = {
9368 "Link Down",
9369 "Remote Fault",
9370 "Auto-negotiation Failure",
9371 "Reserved",
9372 "Insufficient Airflow",
9373 "Unable To Determine Reason",
9374 "No RX Signal Detected",
9375 "Reserved",
9376 };
9377
9378 if (link_down_rc >= ARRAY_SIZE(reason))
9379 return "Bad Reason Code";
9380
9381 return reason[link_down_rc];
9382 }
9383
9384 /**
9385 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
9386 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
9387 *
9388 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
9389 * 32-bit Port Capabilities value.
9390 */
lstatus_to_fwcap(u32 lstatus)9391 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
9392 {
9393 fw_port_cap32_t linkattr = 0;
9394
9395 /*
9396 * Unfortunately the format of the Link Status in the old
9397 * 16-bit Port Information message isn't the same as the
9398 * 16-bit Port Capabilities bitfield used everywhere else ...
9399 */
9400 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
9401 linkattr |= FW_PORT_CAP32_FC_RX;
9402 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
9403 linkattr |= FW_PORT_CAP32_FC_TX;
9404 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
9405 linkattr |= FW_PORT_CAP32_SPEED_100M;
9406 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
9407 linkattr |= FW_PORT_CAP32_SPEED_1G;
9408 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
9409 linkattr |= FW_PORT_CAP32_SPEED_10G;
9410 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
9411 linkattr |= FW_PORT_CAP32_SPEED_25G;
9412 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
9413 linkattr |= FW_PORT_CAP32_SPEED_40G;
9414 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
9415 linkattr |= FW_PORT_CAP32_SPEED_100G;
9416
9417 return linkattr;
9418 }
9419
9420 /**
9421 * t4_handle_get_port_info - process a FW reply message
9422 * @pi: the port info
9423 * @rpl: start of the FW message
9424 *
9425 * Processes a GET_PORT_INFO FW reply message.
9426 */
t4_handle_get_port_info(struct port_info * pi,const __be64 * rpl)9427 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
9428 {
9429 const struct fw_port_cmd *cmd = (const void *)rpl;
9430 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
9431 struct adapter *adapter = pi->adapter;
9432 struct link_config *lc = &pi->link_cfg;
9433 int link_ok, linkdnrc;
9434 enum fw_port_type port_type;
9435 enum fw_port_module_type mod_type;
9436 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
9437
9438 /*
9439 * Extract the various fields from the Port Information message.
9440 */
9441 switch (action) {
9442 case FW_PORT_ACTION_GET_PORT_INFO: {
9443 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
9444
9445 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
9446 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
9447 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
9448 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
9449 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
9450 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
9451 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
9452 linkattr = lstatus_to_fwcap(lstatus);
9453 break;
9454 }
9455
9456 case FW_PORT_ACTION_GET_PORT_INFO32: {
9457 u32 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
9458
9459 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
9460 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
9461 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
9462 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
9463 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
9464 acaps = be32_to_cpu(cmd->u.info32.acaps32);
9465 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
9466 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
9467 break;
9468 }
9469
9470 default:
9471 CH_ERR(adapter, "Handle Port Information: Bad Command/Action %#x\n",
9472 be32_to_cpu(cmd->action_to_len16));
9473 return;
9474 }
9475
9476 /*
9477 * Reset state for communicating new Transceiver Module status and
9478 * whether the OS-dependent layer wants us to redo the current
9479 * "sticky" L1 Configure Link Parameters.
9480 */
9481 lc->new_module = false;
9482 lc->redo_l1cfg = false;
9483
9484 if (mod_type != pi->mod_type) {
9485 /*
9486 * Some versions of the early T6 Firmware "cheated" when
9487 * handling different Transceiver Modules by changing the
9488 * underlaying Port Type reported to the Host Drivers. As
9489 * such we need to capture whatever Port Type the Firmware
9490 * sends us and record it in case it's different from what we
9491 * were told earlier. Unfortunately, since Firmware is
9492 * forever, we'll need to keep this code here forever, but in
9493 * later T6 Firmware it should just be an assignment of the
9494 * same value already recorded.
9495 */
9496 pi->port_type = port_type;
9497
9498 /*
9499 * Record new Module Type information.
9500 */
9501 pi->mod_type = mod_type;
9502
9503 /*
9504 * Let the OS-dependent layer know if we have a new
9505 * Transceiver Module inserted.
9506 */
9507 lc->new_module = t4_is_inserted_mod_type(mod_type);
9508
9509 if (lc->new_module)
9510 t4_init_link_config(pi, pcaps, acaps);
9511 t4_os_portmod_changed(adapter, pi->port_id);
9512 }
9513
9514 if (link_ok != lc->link_ok || acaps != lc->acaps ||
9515 lpacaps != lc->lpacaps || linkattr != lc->link_caps) {
9516 /* something changed */
9517 if (!link_ok && lc->link_ok) {
9518 lc->link_down_rc = linkdnrc;
9519 CH_WARN_RATELIMIT(adapter,
9520 "Port %d link down, reason: %s\n",
9521 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
9522 }
9523
9524 lc->link_ok = link_ok;
9525 lc->acaps = acaps;
9526 lc->lpacaps = lpacaps;
9527 lc->link_caps = linkattr;
9528
9529 t4_os_link_changed(adapter, pi->port_id, link_ok);
9530 }
9531
9532 /*
9533 * If we have a new Transceiver Module and the OS-dependent code has
9534 * told us that it wants us to redo whatever "sticky" L1 Configuration
9535 * Link Parameters are set, do that now.
9536 */
9537 if (lc->new_module && lc->redo_l1cfg) {
9538 int ret;
9539
9540 /*
9541 * Save the current L1 Configuration and restore it if an
9542 * error occurs. We probably should fix the l1_cfg*()
9543 * routines not to change the link_config when an error
9544 * occurs ...
9545 */
9546 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc,
9547 lc->admin_caps);
9548 if (ret) {
9549 CH_WARN(adapter,
9550 "Attempt to update new Transceiver Module settings failed\n");
9551 }
9552 }
9553 lc->new_module = false;
9554 lc->redo_l1cfg = false;
9555 }
9556
9557 /**
9558 * t4_update_port_info - retrieve and update port information if changed
9559 * @pi: the port_info
9560 *
9561 * We issue a Get Port Information Command to the Firmware and, if
9562 * successful, we check to see if anything is different from what we
9563 * last recorded and update things accordingly.
9564 */
t4_update_port_info(struct port_info * pi)9565 int t4_update_port_info(struct port_info *pi)
9566 {
9567 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9568 struct fw_port_cmd port_cmd;
9569 int ret;
9570
9571 memset(&port_cmd, 0, sizeof port_cmd);
9572 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9573 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9574 V_FW_PORT_CMD_PORTID(pi->lport));
9575 port_cmd.action_to_len16 = cpu_to_be32(
9576 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9577 ? FW_PORT_ACTION_GET_PORT_INFO
9578 : FW_PORT_ACTION_GET_PORT_INFO32) |
9579 FW_LEN16(port_cmd));
9580 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9581 &port_cmd, sizeof(port_cmd), &port_cmd);
9582 if (ret)
9583 return ret;
9584
9585 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
9586 return 0;
9587 }
9588
9589 /**
9590 * t4_get_link_params - retrieve basic link parameters for given port
9591 * @pi: the port
9592 * @link_okp: value return pointer for link up/down
9593 * @speedp: value return pointer for speed (Mb/s)
9594 * @mtup: value return pointer for mtu
9595 *
9596 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
9597 * and MTU for a specified port. A negative error is returned on
9598 * failure; 0 on success.
9599 */
t4_get_link_params(struct port_info * pi,unsigned int * link_okp,unsigned int * speedp,unsigned int * mtup)9600 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
9601 unsigned int *speedp, unsigned int *mtup)
9602 {
9603 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9604 struct fw_port_cmd port_cmd;
9605 unsigned int action, link_ok, mtu;
9606 fw_port_cap32_t linkattr;
9607 int ret;
9608
9609 memset(&port_cmd, 0, sizeof port_cmd);
9610 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9611 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9612 V_FW_PORT_CMD_PORTID(pi->tx_chan));
9613 action = (fw_caps == FW_CAPS16
9614 ? FW_PORT_ACTION_GET_PORT_INFO
9615 : FW_PORT_ACTION_GET_PORT_INFO32);
9616 port_cmd.action_to_len16 = cpu_to_be32(
9617 V_FW_PORT_CMD_ACTION(action) |
9618 FW_LEN16(port_cmd));
9619 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9620 &port_cmd, sizeof(port_cmd), &port_cmd);
9621 if (ret)
9622 return ret;
9623
9624 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
9625 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
9626
9627 link_ok = !!(lstatus & F_FW_PORT_CMD_LSTATUS);
9628 linkattr = lstatus_to_fwcap(lstatus);
9629 mtu = be16_to_cpu(port_cmd.u.info.mtu);;
9630 } else {
9631 u32 lstatus32 = be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
9632
9633 link_ok = !!(lstatus32 & F_FW_PORT_CMD_LSTATUS32);
9634 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
9635 mtu = G_FW_PORT_CMD_MTU32(
9636 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
9637 }
9638
9639 *link_okp = link_ok;
9640 *speedp = t4_link_fwcap_to_speed(linkattr);
9641 *mtup = mtu;
9642
9643 return 0;
9644 }
9645
9646 /**
9647 * t4_handle_fw_rpl - process a FW reply message
9648 * @adap: the adapter
9649 * @rpl: start of the FW message
9650 *
9651 * Processes a FW message, such as link state change messages.
9652 */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)9653 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
9654 {
9655 u8 opcode = *(const u8 *)rpl;
9656
9657 /*
9658 * This might be a port command ... this simplifies the following
9659 * conditionals ... We can get away with pre-dereferencing
9660 * action_to_len16 because it's in the first 16 bytes and all messages
9661 * will be at least that long.
9662 */
9663 const struct fw_port_cmd *p = (const void *)rpl;
9664 unsigned int action =
9665 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
9666
9667 if (opcode == FW_PORT_CMD &&
9668 (action == FW_PORT_ACTION_GET_PORT_INFO ||
9669 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
9670 int i;
9671 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
9672 struct port_info *pi = NULL;
9673
9674 for_each_port(adap, i) {
9675 pi = adap2pinfo(adap, i);
9676 if (pi->lport == chan)
9677 break;
9678 }
9679
9680 t4_handle_get_port_info(pi, rpl);
9681 } else {
9682 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9683 return -EINVAL;
9684 }
9685 return 0;
9686 }
9687
9688 /**
9689 * get_pci_mode - determine a card's PCI mode
9690 * @adapter: the adapter
9691 * @p: where to store the PCI settings
9692 *
9693 * Determines a card's PCI mode and associated parameters, such as speed
9694 * and width.
9695 */
get_pci_mode(struct adapter * adapter,struct pci_params * p)9696 static void get_pci_mode(struct adapter *adapter,
9697 struct pci_params *p)
9698 {
9699 u16 val;
9700 u32 pcie_cap;
9701
9702 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9703 if (pcie_cap) {
9704 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
9705 p->speed = val & PCI_EXP_LNKSTA_CLS;
9706 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
9707 }
9708 }
9709
9710 /**
9711 * t4_wait_dev_ready - wait till to reads of registers work
9712 *
9713 * Right after the device is RESET is can take a small amount of time
9714 * for it to respond to register reads. Until then, all reads will
9715 * return either 0xff...ff or 0xee...ee. Return an error if reads
9716 * don't work within a reasonable time frame.
9717 */
t4_wait_dev_ready(struct adapter * adapter)9718 int t4_wait_dev_ready(struct adapter *adapter)
9719 {
9720 u32 whoami;
9721
9722 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9723 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9724 return 0;
9725
9726 msleep(500);
9727 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9728 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9729 return 0;
9730
9731 CH_ERR(adapter, "Device didn't become ready for access, "
9732 "whoami = %#x\n", whoami);
9733 return -EIO;
9734 }
9735
9736 struct flash_desc {
9737 u32 vendor_and_model_id;
9738 u32 size_mb;
9739 };
9740
t4_get_flash_params(struct adapter * adapter)9741 int t4_get_flash_params(struct adapter *adapter)
9742 {
9743 /*
9744 * Table for non-standard supported Flash parts. Note, all Flash
9745 * parts must have 64KB sectors.
9746 */
9747 static struct flash_desc supported_flash[] = {
9748 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
9749 };
9750
9751 int ret;
9752 u32 flashid = 0;
9753 unsigned int part, manufacturer;
9754 unsigned int density, size = 0;
9755
9756
9757 /*
9758 * Issue a Read ID Command to the Flash part. We decode supported
9759 * Flash parts and their sizes from this. There's a newer Query
9760 * Command which can retrieve detailed geometry information but many
9761 * Flash parts don't support it.
9762 */
9763 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
9764 if (!ret)
9765 ret = sf1_read(adapter, 3, 0, 1, &flashid);
9766 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
9767 if (ret < 0)
9768 return ret;
9769
9770 /*
9771 * Check to see if it's one of our non-standard supported Flash parts.
9772 */
9773 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9774 if (supported_flash[part].vendor_and_model_id == flashid) {
9775 adapter->params.sf_size =
9776 supported_flash[part].size_mb;
9777 adapter->params.sf_nsec =
9778 adapter->params.sf_size / SF_SEC_SIZE;
9779 goto found;
9780 }
9781
9782 /*
9783 * Decode Flash part size. The code below looks repetative with
9784 * common encodings, but that's not guaranteed in the JEDEC
9785 * specification for the Read JADEC ID command. The only thing that
9786 * we're guaranteed by the JADEC specification is where the
9787 * Manufacturer ID is in the returned result. After that each
9788 * Manufacturer ~could~ encode things completely differently.
9789 * Note, all Flash parts must have 64KB sectors.
9790 */
9791 manufacturer = flashid & 0xff;
9792 switch (manufacturer) {
9793 case 0x20: { /* Micron/Numonix */
9794 /*
9795 * This Density -> Size decoding table is taken from Micron
9796 * Data Sheets.
9797 */
9798 density = (flashid >> 16) & 0xff;
9799 switch (density) {
9800 case 0x14: size = 1 << 20; break; /* 1MB */
9801 case 0x15: size = 1 << 21; break; /* 2MB */
9802 case 0x16: size = 1 << 22; break; /* 4MB */
9803 case 0x17: size = 1 << 23; break; /* 8MB */
9804 case 0x18: size = 1 << 24; break; /* 16MB */
9805 case 0x19: size = 1 << 25; break; /* 32MB */
9806 case 0x20: size = 1 << 26; break; /* 64MB */
9807 case 0x21: size = 1 << 27; break; /* 128MB */
9808 case 0x22: size = 1 << 28; break; /* 256MB */
9809 }
9810 break;
9811 }
9812
9813 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9814 /*
9815 * This Density -> Size decoding table is taken from ISSI
9816 * Data Sheets.
9817 */
9818 density = (flashid >> 16) & 0xff;
9819 switch (density) {
9820 case 0x16: size = 1 << 25; break; /* 32MB */
9821 case 0x17: size = 1 << 26; break; /* 64MB */
9822 }
9823 break;
9824 }
9825
9826 case 0xc2: { /* Macronix */
9827 /*
9828 * This Density -> Size decoding table is taken from Macronix
9829 * Data Sheets.
9830 */
9831 density = (flashid >> 16) & 0xff;
9832 switch (density) {
9833 case 0x17: size = 1 << 23; break; /* 8MB */
9834 case 0x18: size = 1 << 24; break; /* 16MB */
9835 }
9836 break;
9837 }
9838
9839 case 0xef: { /* Winbond */
9840 /*
9841 * This Density -> Size decoding table is taken from Winbond
9842 * Data Sheets.
9843 */
9844 density = (flashid >> 16) & 0xff;
9845 switch (density) {
9846 case 0x17: size = 1 << 23; break; /* 8MB */
9847 case 0x18: size = 1 << 24; break; /* 16MB */
9848 }
9849 break;
9850 }
9851 }
9852
9853 /*
9854 * If we didn't recognize the FLASH part, that's no real issue: the
9855 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9856 * use a FLASH part which is at least 4MB in size and has 64KB
9857 * sectors. The unrecognized FLASH part is likely to be much larger
9858 * than 4MB, but that's all we really need.
9859 */
9860 if (size == 0) {
9861 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
9862 size = 1 << 22;
9863 }
9864
9865 /*
9866 * Store decoded Flash size and fall through into vetting code.
9867 */
9868 adapter->params.sf_size = size;
9869 adapter->params.sf_nsec = size / SF_SEC_SIZE;
9870
9871 found:
9872 /*
9873 * We should ~probably~ reject adapters with FLASHes which are too
9874 * small but we have some legacy FPGAs with small FLASHes that we'd
9875 * still like to use. So instead we emit a scary message ...
9876 */
9877 if (adapter->params.sf_size < FLASH_MIN_SIZE)
9878 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9879 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
9880
9881 return 0;
9882 }
9883
set_pcie_completion_timeout(struct adapter * adapter,u8 range)9884 static void set_pcie_completion_timeout(struct adapter *adapter,
9885 u8 range)
9886 {
9887 u16 val;
9888 u32 pcie_cap;
9889
9890 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9891 if (pcie_cap) {
9892 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
9893 val &= 0xfff0;
9894 val |= range ;
9895 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
9896 }
9897 }
9898
9899 /**
9900 * t4_get_chip_type - Determine chip type from device ID
9901 * @adap: the adapter
9902 * @ver: adapter version
9903 */
t4_get_chip_type(struct adapter * adap,int ver)9904 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
9905 {
9906 enum chip_type chip = 0;
9907 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
9908
9909 /* Retrieve adapter's device ID */
9910 switch (ver) {
9911 case CHELSIO_T4_FPGA:
9912 chip |= CHELSIO_CHIP_FPGA;
9913 /*FALLTHROUGH*/
9914 case CHELSIO_T4:
9915 chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9916 break;
9917 case CHELSIO_T5_FPGA:
9918 chip |= CHELSIO_CHIP_FPGA;
9919 /*FALLTHROUGH*/
9920 case CHELSIO_T5:
9921 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9922 break;
9923 case CHELSIO_T6_FPGA:
9924 chip |= CHELSIO_CHIP_FPGA;
9925 /*FALLTHROUGH*/
9926 case CHELSIO_T6:
9927 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9928 break;
9929 default:
9930 CH_ERR(adap, "Device %d is not supported\n",
9931 adap->params.pci.device_id);
9932 return -EINVAL;
9933 }
9934
9935 /* T4A1 chip is no longer supported */
9936 if (chip == T4_A1) {
9937 CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
9938 return -EINVAL;
9939 }
9940 return chip;
9941 }
9942
9943 /**
9944 * t4_prep_pf - prepare SW and HW for PF operation
9945 * @adapter: the adapter
9946 *
9947 * Initialize adapter SW state for the various HW modules, set initial
9948 * values for some adapter tunables on each PF.
9949 */
t4_prep_pf(struct adapter * adapter)9950 int t4_prep_pf(struct adapter *adapter)
9951 {
9952 int ret, ver;
9953
9954 ret = t4_wait_dev_ready(adapter);
9955 if (ret < 0)
9956 return ret;
9957
9958 get_pci_mode(adapter, &adapter->params.pci);
9959
9960
9961 /* Retrieve adapter's device ID
9962 */
9963 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
9964 t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
9965
9966 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
9967 adapter->params.chip = t4_get_chip_type(adapter, ver);
9968 if (is_t4(adapter->params.chip)) {
9969 adapter->params.arch.sge_fl_db = F_DBPRIO;
9970 adapter->params.arch.mps_tcam_size =
9971 NUM_MPS_CLS_SRAM_L_INSTANCES;
9972 adapter->params.arch.mps_rplc_size = 128;
9973 adapter->params.arch.nchan = NCHAN;
9974 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9975 adapter->params.arch.vfcount = 128;
9976 /* Congestion map is for 4 channels so that
9977 * MPS can have 4 priority per port.
9978 */
9979 adapter->params.arch.cng_ch_bits_log = 2;
9980 } else if (is_t5(adapter->params.chip)) {
9981 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
9982 adapter->params.arch.mps_tcam_size =
9983 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9984 adapter->params.arch.mps_rplc_size = 128;
9985 adapter->params.arch.nchan = NCHAN;
9986 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9987 adapter->params.arch.vfcount = 128;
9988 adapter->params.arch.cng_ch_bits_log = 2;
9989 } else if (is_t6(adapter->params.chip)) {
9990 adapter->params.arch.sge_fl_db = 0;
9991 adapter->params.arch.mps_tcam_size =
9992 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9993 adapter->params.arch.mps_rplc_size = 256;
9994 adapter->params.arch.nchan = 2;
9995 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9996 adapter->params.arch.vfcount = 256;
9997 /* Congestion map will be for 2 channels so that
9998 * MPS can have 8 priority per port.
9999 */
10000 adapter->params.arch.cng_ch_bits_log = 3;
10001 } else {
10002 CH_ERR(adapter, "Device %d is not supported\n",
10003 adapter->params.pci.device_id);
10004 return -EINVAL;
10005 }
10006
10007 adapter->params.pci.vpd_cap_addr =
10008 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
10009
10010 if (is_fpga(adapter->params.chip)) {
10011 /* FPGA */
10012 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
10013 } else {
10014 /* ASIC */
10015 adapter->params.cim_la_size = CIMLA_SIZE;
10016 }
10017
10018 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
10019
10020 /*
10021 * Default port and clock for debugging in case we can't reach FW.
10022 */
10023 adapter->params.nports = 1;
10024 adapter->params.portvec = 1;
10025 adapter->params.vpd.cclk = 50000;
10026
10027 /* Set pci completion timeout value to 4 seconds. */
10028 set_pcie_completion_timeout(adapter, 0xd);
10029 return 0;
10030 }
10031
10032 /**
10033 * t4_prep_master_pf - prepare SW for master PF operations
10034 * @adapter: the adapter
10035 *
10036 */
t4_prep_master_pf(struct adapter * adapter)10037 int t4_prep_master_pf(struct adapter *adapter)
10038 {
10039 int ret;
10040
10041 ret = t4_prep_pf(adapter);
10042 if (ret < 0)
10043 return ret;
10044
10045 ret = t4_get_flash_params(adapter);
10046 if (ret < 0) {
10047 CH_ERR(adapter,
10048 "Unable to retrieve Flash parameters ret = %d\n", -ret);
10049 return ret;
10050 }
10051
10052 return 0;
10053 }
10054
10055 /**
10056 * t4_prep_adapter - prepare SW and HW for operation
10057 * @adapter: the adapter
10058 * @reset: if true perform a HW reset
10059 *
10060 * Initialize adapter SW state for the various HW modules, set initial
10061 * values for some adapter tunables.
10062 */
t4_prep_adapter(struct adapter * adapter,bool reset)10063 int t4_prep_adapter(struct adapter *adapter, bool reset)
10064 {
10065 return t4_prep_master_pf(adapter);
10066 }
10067
10068 /**
10069 * t4_shutdown_adapter - shut down adapter, host & wire
10070 * @adapter: the adapter
10071 *
10072 * Perform an emergency shutdown of the adapter and stop it from
10073 * continuing any further communication on the ports or DMA to the
10074 * host. This is typically used when the adapter and/or firmware
10075 * have crashed and we want to prevent any further accidental
10076 * communication with the rest of the world. This will also force
10077 * the port Link Status to go down -- if register writes work --
10078 * which should help our peers figure out that we're down.
10079 */
t4_shutdown_adapter(struct adapter * adapter)10080 int t4_shutdown_adapter(struct adapter *adapter)
10081 {
10082 int port;
10083
10084 t4_intr_disable(adapter);
10085 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
10086 for_each_port(adapter, port) {
10087 u32 a_port_cfg = is_t4(adapter->params.chip) ?
10088 PORT_REG(port, A_XGMAC_PORT_CFG) :
10089 T5_PORT_REG(port, A_MAC_PORT_CFG);
10090
10091 t4_write_reg(adapter, a_port_cfg,
10092 t4_read_reg(adapter, a_port_cfg)
10093 & ~V_SIGNAL_DET(1));
10094 }
10095 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
10096
10097 return 0;
10098 }
10099
10100 /**
10101 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
10102 * @adapter: the adapter
10103 * @qid: the Queue ID
10104 * @qtype: the Ingress or Egress type for @qid
10105 * @user: true if this request is for a user mode queue
10106 * @pbar2_qoffset: BAR2 Queue Offset
10107 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
10108 *
10109 * Returns the BAR2 SGE Queue Registers information associated with the
10110 * indicated Absolute Queue ID. These are passed back in return value
10111 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
10112 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
10113 *
10114 * This may return an error which indicates that BAR2 SGE Queue
10115 * registers aren't available. If an error is not returned, then the
10116 * following values are returned:
10117 *
10118 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
10119 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
10120 *
10121 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
10122 * require the "Inferred Queue ID" ability may be used. E.g. the
10123 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
10124 * then these "Inferred Queue ID" register may not be used.
10125 */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,int user,u64 * pbar2_qoffset,unsigned int * pbar2_qid)10126 int t4_bar2_sge_qregs(struct adapter *adapter,
10127 unsigned int qid,
10128 enum t4_bar2_qtype qtype,
10129 int user,
10130 u64 *pbar2_qoffset,
10131 unsigned int *pbar2_qid)
10132 {
10133 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
10134 u64 bar2_page_offset, bar2_qoffset;
10135 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
10136
10137 /* T4 doesn't support BAR2 SGE Queue registers for kernel
10138 * mode queues.
10139 */
10140 if (!user && is_t4(adapter->params.chip))
10141 return -EINVAL;
10142
10143 /* Get our SGE Page Size parameters.
10144 */
10145 page_shift = adapter->params.sge.hps + 10;
10146 page_size = 1 << page_shift;
10147
10148 /* Get the right Queues per Page parameters for our Queue.
10149 */
10150 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
10151 ? adapter->params.sge.eq_qpp
10152 : adapter->params.sge.iq_qpp);
10153 qpp_mask = (1 << qpp_shift) - 1;
10154
10155 /* Calculate the basics of the BAR2 SGE Queue register area:
10156 * o The BAR2 page the Queue registers will be in.
10157 * o The BAR2 Queue ID.
10158 * o The BAR2 Queue ID Offset into the BAR2 page.
10159 */
10160 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
10161 bar2_qid = qid & qpp_mask;
10162 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
10163
10164 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
10165 * hardware will infer the Absolute Queue ID simply from the writes to
10166 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
10167 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
10168 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
10169 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
10170 * from the BAR2 Page and BAR2 Queue ID.
10171 *
10172 * One important censequence of this is that some BAR2 SGE registers
10173 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
10174 * there. But other registers synthesize the SGE Queue ID purely
10175 * from the writes to the registers -- the Write Combined Doorbell
10176 * Buffer is a good example. These BAR2 SGE Registers are only
10177 * available for those BAR2 SGE Register areas where the SGE Absolute
10178 * Queue ID can be inferred from simple writes.
10179 */
10180 bar2_qoffset = bar2_page_offset;
10181 bar2_qinferred = (bar2_qid_offset < page_size);
10182 if (bar2_qinferred) {
10183 bar2_qoffset += bar2_qid_offset;
10184 bar2_qid = 0;
10185 }
10186
10187 *pbar2_qoffset = bar2_qoffset;
10188 *pbar2_qid = bar2_qid;
10189 return 0;
10190 }
10191
10192 /**
10193 * t4_init_devlog_params - initialize adapter->params.devlog
10194 * @adap: the adapter
10195 * @fw_attach: whether we can talk to the firmware
10196 *
10197 * Initialize various fields of the adapter's Firmware Device Log
10198 * Parameters structure.
10199 */
t4_init_devlog_params(struct adapter * adap,int fw_attach)10200 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
10201 {
10202 struct devlog_params *dparams = &adap->params.devlog;
10203 u32 pf_dparams;
10204 unsigned int devlog_meminfo;
10205 struct fw_devlog_cmd devlog_cmd;
10206 int ret;
10207
10208 /* If we're dealing with newer firmware, the Device Log Paramerters
10209 * are stored in a designated register which allows us to access the
10210 * Device Log even if we can't talk to the firmware.
10211 */
10212 pf_dparams =
10213 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
10214 if (pf_dparams) {
10215 unsigned int nentries, nentries128;
10216
10217 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
10218 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
10219
10220 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
10221 nentries = (nentries128 + 1) * 128;
10222 dparams->size = nentries * sizeof(struct fw_devlog_e);
10223
10224 return 0;
10225 }
10226
10227 /*
10228 * For any failing returns ...
10229 */
10230 memset(dparams, 0, sizeof *dparams);
10231
10232 /*
10233 * If we can't talk to the firmware, there's really nothing we can do
10234 * at this point.
10235 */
10236 if (!fw_attach)
10237 return -ENXIO;
10238
10239 /* Otherwise, ask the firmware for it's Device Log Parameters.
10240 */
10241 memset(&devlog_cmd, 0, sizeof devlog_cmd);
10242 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10243 F_FW_CMD_REQUEST | F_FW_CMD_READ);
10244 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10245 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
10246 &devlog_cmd);
10247 if (ret)
10248 return ret;
10249
10250 devlog_meminfo =
10251 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
10252 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
10253 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
10254 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
10255
10256 return 0;
10257 }
10258
10259 /**
10260 * t4_init_sge_params - initialize adap->params.sge
10261 * @adapter: the adapter
10262 *
10263 * Initialize various fields of the adapter's SGE Parameters structure.
10264 */
t4_init_sge_params(struct adapter * adapter)10265 int t4_init_sge_params(struct adapter *adapter)
10266 {
10267 struct sge_params *sge_params = &adapter->params.sge;
10268 u32 hps, qpp;
10269 unsigned int s_hps, s_qpp;
10270
10271 /* Extract the SGE Page Size for our PF.
10272 */
10273 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
10274 s_hps = (S_HOSTPAGESIZEPF0 +
10275 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
10276 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
10277
10278 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
10279 */
10280 s_qpp = (S_QUEUESPERPAGEPF0 +
10281 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
10282 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
10283 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10284 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
10285 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10286
10287 return 0;
10288 }
10289
10290 /**
10291 * t4_init_tp_params - initialize adap->params.tp
10292 * @adap: the adapter
10293 * @sleep_ok: if true we may sleep while awaiting command completion
10294 *
10295 * Initialize various fields of the adapter's TP Parameters structure.
10296 */
t4_init_tp_params(struct adapter * adap,bool sleep_ok)10297 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
10298 {
10299 u32 param, val, v;
10300 int chan, ret;
10301
10302 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
10303 adap->params.tp.tre = G_TIMERRESOLUTION(v);
10304 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
10305
10306 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
10307 for (chan = 0; chan < NCHAN; chan++)
10308 adap->params.tp.tx_modq[chan] = chan;
10309
10310 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
10311 * Configuration.
10312 */
10313
10314 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10315 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10316 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
10317
10318 /* Read current value */
10319 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
10320 ¶m, &val);
10321 if (ret == 0) {
10322 CH_INFO(adap,
10323 "Current filter mode/mask 0x%x:0x%x\n",
10324 G_FW_PARAMS_PARAM_FILTER_MODE(val),
10325 G_FW_PARAMS_PARAM_FILTER_MASK(val));
10326 adap->params.tp.vlan_pri_map = G_FW_PARAMS_PARAM_FILTER_MODE(val);
10327 adap->params.tp.filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val);
10328 } else {
10329 CH_WARN(adap,
10330 "Reading filter mode/mask not supported via fw api, "
10331 "falling back to older indirect-reg-read \n");
10332
10333 /* Incase of older-fw (which doesn't expose the api
10334 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
10335 * the fw api) combination, fall-back to older method of reading
10336 * the filter mode from indirect-register
10337 */
10338 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
10339 A_TP_VLAN_PRI_MAP, sleep_ok);
10340
10341 /* With the older-fw and newer-driver combination we might run
10342 * into an issue when user wants to use hash filter region but
10343 * the filter_mask is zero, in this case filter_mask validation
10344 * is tough. To avoid that we set the filter_mask same as filter
10345 * mode, which will behave exactly as the older way of ignoring
10346 * the filter mask validation.
10347 */
10348 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
10349 }
10350
10351 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
10352 A_TP_INGRESS_CONFIG, sleep_ok);
10353
10354 /* For T6, cache the adapter's compressed error vector
10355 * and passing outer header info for encapsulated packets.
10356 */
10357 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
10358 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
10359 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
10360 }
10361
10362 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
10363 * shift positions of several elements of the Compressed Filter Tuple
10364 * for this adapter which we need frequently ...
10365 */
10366 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
10367 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
10368 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
10369 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
10370 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
10371 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
10372 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
10373 F_ETHERTYPE);
10374 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
10375 F_MACMATCH);
10376 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
10377 F_MPSHITTYPE);
10378 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
10379 F_FRAGMENTATION);
10380 return 0;
10381 }
10382
10383 /**
10384 * t4_filter_field_shift - calculate filter field shift
10385 * @adap: the adapter
10386 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
10387 *
10388 * Return the shift position of a filter field within the Compressed
10389 * Filter Tuple. The filter field is specified via its selection bit
10390 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
10391 */
t4_filter_field_shift(const struct adapter * adap,int filter_sel)10392 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
10393 {
10394 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
10395 unsigned int sel;
10396 int field_shift;
10397
10398 if ((filter_mode & filter_sel) == 0)
10399 return -1;
10400
10401 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
10402 switch (filter_mode & sel) {
10403 case F_FCOE:
10404 field_shift += W_FT_FCOE;
10405 break;
10406 case F_PORT:
10407 field_shift += W_FT_PORT;
10408 break;
10409 case F_VNIC_ID:
10410 field_shift += W_FT_VNIC_ID;
10411 break;
10412 case F_VLAN:
10413 field_shift += W_FT_VLAN;
10414 break;
10415 case F_TOS:
10416 field_shift += W_FT_TOS;
10417 break;
10418 case F_PROTOCOL:
10419 field_shift += W_FT_PROTOCOL;
10420 break;
10421 case F_ETHERTYPE:
10422 field_shift += W_FT_ETHERTYPE;
10423 break;
10424 case F_MACMATCH:
10425 field_shift += W_FT_MACMATCH;
10426 break;
10427 case F_MPSHITTYPE:
10428 field_shift += W_FT_MPSHITTYPE;
10429 break;
10430 case F_FRAGMENTATION:
10431 field_shift += W_FT_FRAGMENTATION;
10432 break;
10433 }
10434 }
10435 return field_shift;
10436 }
10437
10438 /**
10439 * t4_create_filter_info - return Compressed Filter Value/Mask tuple
10440 * @adapter: the adapter
10441 * @filter_value: Filter Value return value pointer
10442 * @filter_mask: Filter Mask return value pointer
10443 * @fcoe: FCoE filter selection
10444 * @port: physical port filter selection
10445 * @vnic: Virtual NIC ID filter selection
10446 * @vlan: VLAN ID filter selection
10447 * @vlan_pcp: VLAN Priority Code Point
10448 * @vlan_dei: VLAN Drop Eligibility Indicator
10449 * @tos: Type Of Server filter selection
10450 * @protocol: IP Protocol filter selection
10451 * @ethertype: Ethernet Type filter selection
10452 * @macmatch: MPS MAC Index filter selection
10453 * @matchtype: MPS Hit Type filter selection
10454 * @frag: IP Fragmentation filter selection
10455 *
10456 * Construct a Compressed Filter Value/Mask tuple based on a set of
10457 * "filter selection" values. For each passed filter selection value
10458 * which is greater than or equal to 0, we put that value into the
10459 * constructed Filter Value and the appropriate mask into the Filter
10460 * Mask. If a filter selections is specified which is not currently
10461 * configured into the hardware, an error will be returned. Otherwise
10462 * the constructed FIlter Value/Mask tuple will be returned via the
10463 * specified return value pointers and success will be returned.
10464 *
10465 * All filter selection values and the returned Filter Value/Mask values
10466 * are in Host-Endian format.
10467 */
t4_create_filter_info(const struct adapter * adapter,u64 * filter_value,u64 * filter_mask,int fcoe,int port,int vnic,int vlan,int vlan_pcp,int vlan_dei,int tos,int protocol,int ethertype,int macmatch,int matchtype,int frag)10468 int t4_create_filter_info(const struct adapter *adapter,
10469 u64 *filter_value, u64 *filter_mask,
10470 int fcoe, int port, int vnic,
10471 int vlan, int vlan_pcp, int vlan_dei,
10472 int tos, int protocol, int ethertype,
10473 int macmatch, int matchtype, int frag)
10474 {
10475 const struct tp_params *tp = &adapter->params.tp;
10476 u64 v, m;
10477
10478 /*
10479 * If any selected filter field isn't enabled, return an error.
10480 */
10481 #define BAD_FILTER(__field) \
10482 ((__field) >= 0 && tp->__field##_shift < 0)
10483 if (BAD_FILTER(fcoe) ||
10484 BAD_FILTER(port) ||
10485 BAD_FILTER(vnic) ||
10486 BAD_FILTER(vlan) ||
10487 BAD_FILTER(tos) ||
10488 BAD_FILTER(protocol) ||
10489 BAD_FILTER(ethertype) ||
10490 BAD_FILTER(macmatch) ||
10491 BAD_FILTER(matchtype) ||
10492 BAD_FILTER(frag))
10493 return -EINVAL;
10494 #undef BAD_FILTER
10495
10496 /*
10497 * We have to have VLAN ID selected if we want to also select on
10498 * either the Priority Code Point or Drop Eligibility Indicator
10499 * fields.
10500 */
10501 if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
10502 return -EINVAL;
10503
10504 /*
10505 * Construct Filter Value and Mask.
10506 */
10507 v = m = 0;
10508 #define SET_FILTER_FIELD(__field, __width) \
10509 do { \
10510 if ((__field) >= 0) { \
10511 const int shift = tp->__field##_shift; \
10512 \
10513 v |= (__field) << shift; \
10514 m |= ((1ULL << (__width)) - 1) << shift; \
10515 } \
10516 } while (0)
10517 SET_FILTER_FIELD(fcoe, W_FT_FCOE);
10518 SET_FILTER_FIELD(port, W_FT_PORT);
10519 SET_FILTER_FIELD(tos, W_FT_TOS);
10520 SET_FILTER_FIELD(protocol, W_FT_PROTOCOL);
10521 SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
10522 SET_FILTER_FIELD(macmatch, W_FT_MACMATCH);
10523 SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
10524 SET_FILTER_FIELD(frag, W_FT_FRAGMENTATION);
10525 #undef SET_FILTER_FIELD
10526
10527 /*
10528 * We handle VNIC ID and VLANs separately because they're slightly
10529 * different than the rest of the fields. Both require that a
10530 * corresponding "valid" bit be set in the Filter Value and Mask.
10531 * These bits are in the top bit of the field. Additionally, we can
10532 * select the Priority Code Point and Drop Eligibility Indicator
10533 * fields for VLANs as an option. Remember that the format of a VLAN
10534 * Tag is:
10535 *
10536 * bits: 3 1 12
10537 * +---+-+------------+
10538 * |PCP|D| VLAN ID |
10539 * +---+-+------------+
10540 */
10541 if (vnic >= 0) {
10542 v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
10543 m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
10544 }
10545 if (vlan >= 0) {
10546 v |= ((1ULL << (W_FT_VLAN-1)) | vlan) << tp->vlan_shift;
10547 m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
10548
10549 if (vlan_dei >= 0) {
10550 v |= vlan_dei << (tp->vlan_shift + 12);
10551 m |= 0x7 << (tp->vlan_shift + 12);
10552 }
10553 if (vlan_pcp >= 0) {
10554 v |= vlan_pcp << (tp->vlan_shift + 13);
10555 m |= 0x7 << (tp->vlan_shift + 13);
10556 }
10557 }
10558
10559 /*
10560 * Pass back computed Filter Value and Mask; return success.
10561 */
10562 *filter_value = v;
10563 *filter_mask = m;
10564 return 0;
10565 }
10566
t4_init_rss_mode(struct adapter * adap,int mbox)10567 int t4_init_rss_mode(struct adapter *adap, int mbox)
10568 {
10569 int i, ret;
10570 struct fw_rss_vi_config_cmd rvc;
10571
10572 memset(&rvc, 0, sizeof(rvc));
10573
10574 for_each_port(adap, i) {
10575 struct port_info *p = adap2pinfo(adap, i);
10576 rvc.op_to_viid =
10577 cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
10578 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10579 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
10580 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
10581 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
10582 if (ret)
10583 return ret;
10584 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
10585 }
10586 return 0;
10587 }
10588
t4_init_portmirror(struct port_info * pi,int mbox,int port,int pf,int vf)10589 static int t4_init_portmirror(struct port_info *pi, int mbox,
10590 int port, int pf, int vf)
10591 {
10592 struct adapter *adapter = pi->adapter;
10593 int ret;
10594 u8 vivld = 0, vin = 0;
10595
10596 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
10597 &vivld, &vin);
10598 if (ret < 0)
10599 return ret;
10600
10601 pi->viid_mirror = ret;
10602
10603 /* If fw supports returning the VIN as part of FW_VI_CMD,
10604 * save the returned values.
10605 */
10606 if (adapter->params.viid_smt_extn_support) {
10607 pi->vivld_mirror = vivld;
10608 pi->vin_mirror = vin;
10609 } else {
10610 /* Retrieve the values from VIID */
10611 pi->vivld_mirror = G_FW_VIID_VIVLD(pi->viid_mirror);
10612 pi->vin_mirror = G_FW_VIID_VIN(pi->viid_mirror);
10613 }
10614
10615 CH_INFO(pi->adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
10616 port, pf, pi->vin_mirror);
10617 return 0;
10618 }
10619
t4_mirror_init(struct adapter * adap,int mbox,int pf,int vf,bool enable_ringbb)10620 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf,
10621 bool enable_ringbb)
10622 {
10623 int ret, i, j = 0;
10624
10625 for_each_port(adap, i) {
10626 struct port_info *pi = adap2pinfo(adap, i);
10627
10628 /* We want mirroring only on Port0 for ringbackbone
10629 * configuration.
10630 */
10631 if (enable_ringbb && i)
10632 break;
10633 while ((adap->params.portvec & (1 << j)) == 0)
10634 j++;
10635
10636 ret = t4_init_portmirror(pi, mbox, j, pf, vf);
10637 if (ret)
10638 return ret;
10639 j++;
10640 }
10641 return 0;
10642 }
10643
10644 /**
10645 * t4_init_portinfo_viid - allocate a virtual interface and initialize
10646 * port_info
10647 * @pi: the port_info
10648 * @mbox: mailbox to use for the FW command
10649 * @port: physical port associated with the VI
10650 * @pf: the PF owning the VI
10651 * @vf: the VF owning the VI
10652 * @mac: the MAC address of the VI
10653 * @alloc_vi: Indicator to alloc VI
10654 *
10655 * Allocates a virtual interface for the given physical port. If @mac is
10656 * not %NULL it contains the MAC address of the VI as assigned by FW.
10657 * @mac should be large enough to hold an Ethernet address.
10658 * Returns < 0 on error.
10659 */
t4_init_portinfo_viid(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[],bool alloc_vi)10660 int t4_init_portinfo_viid(struct port_info *pi, int mbox,
10661 int port, int pf, int vf, u8 mac[], bool alloc_vi)
10662 {
10663 struct adapter *adapter = pi->adapter;
10664 unsigned int fw_caps = adapter->params.fw_caps_support;
10665 struct fw_port_cmd cmd;
10666 unsigned int rss_size;
10667 enum fw_port_type port_type;
10668 int mdio_addr;
10669 fw_port_cap32_t pcaps, acaps;
10670 int ret;
10671
10672 /*
10673 * If we haven't yet determined whether we're talking to Firmware
10674 * which knows the new 32-bit Port Capabilities, it's time to find
10675 * out now. This will also tell new Firmware to send us Port Status
10676 * Updates using the new 32-bit Port Capabilities version of the
10677 * Port Information message.
10678 */
10679 if (fw_caps == FW_CAPS_UNKNOWN) {
10680 u32 param, val;
10681
10682 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
10683 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
10684 val = 1;
10685 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
10686 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
10687 adapter->params.fw_caps_support = fw_caps;
10688 }
10689
10690 memset(&cmd, 0, sizeof(cmd));
10691 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
10692 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10693 V_FW_PORT_CMD_PORTID(port));
10694 cmd.action_to_len16 = cpu_to_be32(
10695 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
10696 ? FW_PORT_ACTION_GET_PORT_INFO
10697 : FW_PORT_ACTION_GET_PORT_INFO32) |
10698 FW_LEN16(cmd));
10699 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
10700 if (ret)
10701 return ret;
10702
10703 /*
10704 * Extract the various fields from the Port Information message.
10705 */
10706 if (fw_caps == FW_CAPS16) {
10707 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
10708
10709 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
10710 mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP)
10711 ? G_FW_PORT_CMD_MDIOADDR(lstatus)
10712 : -1);
10713 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
10714 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
10715 } else {
10716 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
10717
10718 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
10719 mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32)
10720 ? G_FW_PORT_CMD_MDIOADDR32(lstatus32)
10721 : -1);
10722 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
10723 acaps = be32_to_cpu(cmd.u.info32.acaps32);
10724 }
10725
10726 if (alloc_vi) {
10727 u8 vivld = 0, vin = 0;
10728
10729 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac,
10730 &rss_size, &vivld, &vin);
10731 if (ret < 0)
10732 return ret;
10733
10734 pi->viid = ret;
10735 pi->rss_size = rss_size;
10736
10737 /* If fw supports returning the VIN as part of FW_VI_CMD,
10738 * save the returned values.
10739 */
10740 if (adapter->params.viid_smt_extn_support) {
10741 pi->vivld = vivld;
10742 pi->vin = vin;
10743 } else {
10744 /* Retrieve the values from VIID */
10745 pi->vivld = G_FW_VIID_VIVLD(pi->viid);
10746 pi->vin = G_FW_VIID_VIN(pi->viid);
10747 }
10748 }
10749
10750 pi->tx_chan = port;
10751 pi->lport = port;
10752 pi->rx_chan = port;
10753 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
10754
10755 pi->port_type = port_type;
10756 pi->mdio_addr = mdio_addr;
10757 pi->mod_type = FW_PORT_MOD_TYPE_NA;
10758
10759 t4_init_link_config(pi, pcaps, acaps);
10760 return 0;
10761 }
10762
10763 /**
10764 * t4_init_portinfo - allocate a virtual interface and initialize port_info
10765 * @pi: the port_info
10766 * @mbox: mailbox to use for the FW command
10767 * @port: physical port associated with the VI
10768 * @pf: the PF owning the VI
10769 * @vf: the VF owning the VI
10770 * @mac: the MAC address of the VI
10771 *
10772 * Allocates a virtual interface for the given physical port. If @mac is
10773 * not %NULL it contains the MAC address of the VI as assigned by FW.
10774 * @mac should be large enough to hold an Ethernet address.
10775 * Returns < 0 on error.
10776 */
t4_init_portinfo(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[])10777 int t4_init_portinfo(struct port_info *pi, int mbox,
10778 int port, int pf, int vf, u8 mac[])
10779 {
10780 return t4_init_portinfo_viid(pi, mbox, port, pf, vf, mac, true);
10781 }
10782
t4_port_init(struct adapter * adap,int mbox,int pf,int vf)10783 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
10784 {
10785 u8 addr[6];
10786 int ret, i, j = 0;
10787
10788 for_each_port(adap, i) {
10789 struct port_info *pi = adap2pinfo(adap, i);
10790
10791 while ((adap->params.portvec & (1 << j)) == 0)
10792 j++;
10793
10794 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
10795 if (ret)
10796 return ret;
10797
10798 t4_os_set_hw_addr(adap, i, addr);
10799 j++;
10800 }
10801 return 0;
10802 }
10803
10804 /**
10805 * t4_read_cimq_cfg - read CIM queue configuration
10806 * @adap: the adapter
10807 * @base: holds the queue base addresses in bytes
10808 * @size: holds the queue sizes in bytes
10809 * @thres: holds the queue full thresholds in bytes
10810 *
10811 * Returns the current configuration of the CIM queues, starting with
10812 * the IBQs, then the OBQs.
10813 */
t4_read_cimq_cfg(struct adapter * adap,u16 * base,u16 * size,u16 * thres)10814 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
10815 {
10816 unsigned int i, v;
10817 int cim_num_obq = is_t4(adap->params.chip) ?
10818 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10819
10820 for (i = 0; i < CIM_NUM_IBQ; i++) {
10821 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
10822 V_QUENUMSELECT(i));
10823 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10824 /* value is in 256-byte units */
10825 *base++ = G_CIMQBASE(v) * 256;
10826 *size++ = G_CIMQSIZE(v) * 256;
10827 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
10828 }
10829 for (i = 0; i < cim_num_obq; i++) {
10830 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10831 V_QUENUMSELECT(i));
10832 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10833 /* value is in 256-byte units */
10834 *base++ = G_CIMQBASE(v) * 256;
10835 *size++ = G_CIMQSIZE(v) * 256;
10836 }
10837 }
10838
10839 /**
10840 * t4_read_cim_ibq - read the contents of a CIM inbound queue
10841 * @adap: the adapter
10842 * @qid: the queue index
10843 * @data: where to store the queue contents
10844 * @n: capacity of @data in 32-bit words
10845 *
10846 * Reads the contents of the selected CIM queue starting at address 0 up
10847 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
10848 * error and the number of 32-bit words actually read on success.
10849 */
t4_read_cim_ibq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)10850 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10851 {
10852 int i, err, attempts;
10853 unsigned int addr;
10854 const unsigned int nwords = CIM_IBQ_SIZE * 4;
10855
10856 if (qid > 5 || (n & 3))
10857 return -EINVAL;
10858
10859 addr = qid * nwords;
10860 if (n > nwords)
10861 n = nwords;
10862
10863 /* It might take 3-10ms before the IBQ debug read access is allowed.
10864 * Wait for 1 Sec with a delay of 1 usec.
10865 */
10866 attempts = 1000000;
10867
10868 for (i = 0; i < n; i++, addr++) {
10869 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
10870 F_IBQDBGEN);
10871 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
10872 attempts, 1);
10873 if (err)
10874 return err;
10875 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
10876 }
10877 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
10878 return i;
10879 }
10880
10881 /**
10882 * t4_read_cim_obq - read the contents of a CIM outbound queue
10883 * @adap: the adapter
10884 * @qid: the queue index
10885 * @data: where to store the queue contents
10886 * @n: capacity of @data in 32-bit words
10887 *
10888 * Reads the contents of the selected CIM queue starting at address 0 up
10889 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
10890 * error and the number of 32-bit words actually read on success.
10891 */
t4_read_cim_obq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)10892 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10893 {
10894 int i, err;
10895 unsigned int addr, v, nwords;
10896 int cim_num_obq = is_t4(adap->params.chip) ?
10897 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10898
10899 if ((qid > (cim_num_obq - 1)) || (n & 3))
10900 return -EINVAL;
10901
10902 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10903 V_QUENUMSELECT(qid));
10904 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10905
10906 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
10907 nwords = G_CIMQSIZE(v) * 64; /* same */
10908 if (n > nwords)
10909 n = nwords;
10910
10911 for (i = 0; i < n; i++, addr++) {
10912 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
10913 F_OBQDBGEN);
10914 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
10915 2, 1);
10916 if (err)
10917 return err;
10918 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
10919 }
10920 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
10921 return i;
10922 }
10923
10924 /**
10925 * t4_cim_read - read a block from CIM internal address space
10926 * @adap: the adapter
10927 * @addr: the start address within the CIM address space
10928 * @n: number of words to read
10929 * @valp: where to store the result
10930 *
10931 * Reads a block of 4-byte words from the CIM intenal address space.
10932 */
t4_cim_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)10933 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
10934 unsigned int *valp)
10935 {
10936 int ret = 0;
10937
10938 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
10939 return -EBUSY;
10940
10941 for ( ; !ret && n--; addr += 4) {
10942 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
10943 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10944 0, 5, 2);
10945 if (!ret)
10946 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
10947 }
10948 return ret;
10949 }
10950
10951 /**
10952 * t4_cim_write - write a block into CIM internal address space
10953 * @adap: the adapter
10954 * @addr: the start address within the CIM address space
10955 * @n: number of words to write
10956 * @valp: set of values to write
10957 *
10958 * Writes a block of 4-byte words into the CIM intenal address space.
10959 */
t4_cim_write(struct adapter * adap,unsigned int addr,unsigned int n,const unsigned int * valp)10960 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
10961 const unsigned int *valp)
10962 {
10963 int ret = 0;
10964
10965 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
10966 return -EBUSY;
10967
10968 for ( ; !ret && n--; addr += 4) {
10969 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
10970 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
10971 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10972 0, 5, 2);
10973 }
10974 return ret;
10975 }
10976
t4_cim_write1(struct adapter * adap,unsigned int addr,unsigned int val)10977 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
10978 unsigned int val)
10979 {
10980 return t4_cim_write(adap, addr, 1, &val);
10981 }
10982
10983 /**
10984 * t4_cim_read_la - read CIM LA capture buffer
10985 * @adap: the adapter
10986 * @la_buf: where to store the LA data
10987 * @wrptr: the HW write pointer within the capture buffer
10988 *
10989 * Reads the contents of the CIM LA buffer with the most recent entry at
10990 * the end of the returned data and with the entry at @wrptr first.
10991 * We try to leave the LA in the running state we find it in.
10992 */
t4_cim_read_la(struct adapter * adap,u32 * la_buf,unsigned int * wrptr)10993 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
10994 {
10995 int i, ret;
10996 unsigned int cfg, val, idx;
10997
10998 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
10999 if (ret)
11000 return ret;
11001
11002 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
11003 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
11004 if (ret)
11005 return ret;
11006 }
11007
11008 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
11009 if (ret)
11010 goto restart;
11011
11012 idx = G_UPDBGLAWRPTR(val);
11013 if (wrptr)
11014 *wrptr = idx;
11015
11016 for (i = 0; i < adap->params.cim_la_size; i++) {
11017 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11018 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
11019 if (ret)
11020 break;
11021 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
11022 if (ret)
11023 break;
11024 if (val & F_UPDBGLARDEN) {
11025 ret = -ETIMEDOUT;
11026 break;
11027 }
11028 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
11029 if (ret)
11030 break;
11031
11032 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
11033 * identify the 32-bit portion of the full 312-bit data
11034 */
11035 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
11036 idx = (idx & 0xff0) + 0x10;
11037 else
11038 idx++;
11039 /* address can't exceed 0xfff */
11040 idx &= M_UPDBGLARDPTR;
11041 }
11042 restart:
11043 if (cfg & F_UPDBGLAEN) {
11044 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11045 cfg & ~F_UPDBGLARDEN);
11046 if (!ret)
11047 ret = r;
11048 }
11049 return ret;
11050 }
11051
11052 /**
11053 * t4_tp_read_la - read TP LA capture buffer
11054 * @adap: the adapter
11055 * @la_buf: where to store the LA data
11056 * @wrptr: the HW write pointer within the capture buffer
11057 *
11058 * Reads the contents of the TP LA buffer with the most recent entry at
11059 * the end of the returned data and with the entry at @wrptr first.
11060 * We leave the LA in the running state we find it in.
11061 */
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)11062 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
11063 {
11064 bool last_incomplete;
11065 unsigned int i, cfg, val, idx;
11066
11067 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
11068 if (cfg & F_DBGLAENABLE) /* freeze LA */
11069 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11070 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
11071
11072 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
11073 idx = G_DBGLAWPTR(val);
11074 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
11075 if (last_incomplete)
11076 idx = (idx + 1) & M_DBGLARPTR;
11077 if (wrptr)
11078 *wrptr = idx;
11079
11080 val &= 0xffff;
11081 val &= ~V_DBGLARPTR(M_DBGLARPTR);
11082 val |= adap->params.tp.la_mask;
11083
11084 for (i = 0; i < TPLA_SIZE; i++) {
11085 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
11086 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
11087 idx = (idx + 1) & M_DBGLARPTR;
11088 }
11089
11090 /* Wipe out last entry if it isn't valid */
11091 if (last_incomplete)
11092 la_buf[TPLA_SIZE - 1] = ~0ULL;
11093
11094 if (cfg & F_DBGLAENABLE) /* restore running state */
11095 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11096 cfg | adap->params.tp.la_mask);
11097 }
11098
11099 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
11100 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
11101 * state for more than the Warning Threshold then we'll issue a warning about
11102 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
11103 * appears to be hung every Warning Repeat second till the situation clears.
11104 * If the situation clears, we'll note that as well.
11105 */
11106 #define SGE_IDMA_WARN_THRESH 1
11107 #define SGE_IDMA_WARN_REPEAT 300
11108
11109 /**
11110 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
11111 * @adapter: the adapter
11112 * @idma: the adapter IDMA Monitor state
11113 *
11114 * Initialize the state of an SGE Ingress DMA Monitor.
11115 */
t4_idma_monitor_init(struct adapter * adapter,struct sge_idma_monitor_state * idma)11116 void t4_idma_monitor_init(struct adapter *adapter,
11117 struct sge_idma_monitor_state *idma)
11118 {
11119 /* Initialize the state variables for detecting an SGE Ingress DMA
11120 * hang. The SGE has internal counters which count up on each clock
11121 * tick whenever the SGE finds its Ingress DMA State Engines in the
11122 * same state they were on the previous clock tick. The clock used is
11123 * the Core Clock so we have a limit on the maximum "time" they can
11124 * record; typically a very small number of seconds. For instance,
11125 * with a 600MHz Core Clock, we can only count up to a bit more than
11126 * 7s. So we'll synthesize a larger counter in order to not run the
11127 * risk of having the "timers" overflow and give us the flexibility to
11128 * maintain a Hung SGE State Machine of our own which operates across
11129 * a longer time frame.
11130 */
11131 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
11132 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
11133 }
11134
11135 /**
11136 * t4_idma_monitor - monitor SGE Ingress DMA state
11137 * @adapter: the adapter
11138 * @idma: the adapter IDMA Monitor state
11139 * @hz: number of ticks/second
11140 * @ticks: number of ticks since the last IDMA Monitor call
11141 */
t4_idma_monitor(struct adapter * adapter,struct sge_idma_monitor_state * idma,int hz,int ticks)11142 void t4_idma_monitor(struct adapter *adapter,
11143 struct sge_idma_monitor_state *idma,
11144 int hz, int ticks)
11145 {
11146 int i, idma_same_state_cnt[2];
11147
11148 /* Read the SGE Debug Ingress DMA Same State Count registers. These
11149 * are counters inside the SGE which count up on each clock when the
11150 * SGE finds its Ingress DMA State Engines in the same states they
11151 * were in the previous clock. The counters will peg out at
11152 * 0xffffffff without wrapping around so once they pass the 1s
11153 * threshold they'll stay above that till the IDMA state changes.
11154 */
11155 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
11156 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
11157 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11158
11159 for (i = 0; i < 2; i++) {
11160 u32 debug0, debug11;
11161
11162 /* If the Ingress DMA Same State Counter ("timer") is less
11163 * than 1s, then we can reset our synthesized Stall Timer and
11164 * continue. If we have previously emitted warnings about a
11165 * potential stalled Ingress Queue, issue a note indicating
11166 * that the Ingress Queue has resumed forward progress.
11167 */
11168 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
11169 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
11170 CH_WARN(adapter, "SGE idma%d, queue %u, "
11171 "resumed after %d seconds\n",
11172 i, idma->idma_qid[i],
11173 idma->idma_stalled[i]/hz);
11174 idma->idma_stalled[i] = 0;
11175 continue;
11176 }
11177
11178 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
11179 * domain. The first time we get here it'll be because we
11180 * passed the 1s Threshold; each additional time it'll be
11181 * because the RX Timer Callback is being fired on its regular
11182 * schedule.
11183 *
11184 * If the stall is below our Potential Hung Ingress Queue
11185 * Warning Threshold, continue.
11186 */
11187 if (idma->idma_stalled[i] == 0) {
11188 idma->idma_stalled[i] = hz;
11189 idma->idma_warn[i] = 0;
11190 } else {
11191 idma->idma_stalled[i] += ticks;
11192 idma->idma_warn[i] -= ticks;
11193 }
11194
11195 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
11196 continue;
11197
11198 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
11199 */
11200 if (idma->idma_warn[i] > 0)
11201 continue;
11202 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
11203
11204 /* Read and save the SGE IDMA State and Queue ID information.
11205 * We do this every time in case it changes across time ...
11206 * can't be too careful ...
11207 */
11208 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
11209 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11210 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
11211
11212 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
11213 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11214 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
11215
11216 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
11217 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
11218 i, idma->idma_qid[i], idma->idma_state[i],
11219 idma->idma_stalled[i]/hz,
11220 debug0, debug11);
11221 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
11222 }
11223 }
11224
11225 /**
11226 * t4_set_vf_mac - Set MAC address for the specified VF
11227 * @adapter: The adapter
11228 * @vf: one of the VFs instantiated by the specified PF
11229 * @naddr: the number of MAC addresses
11230 * @addr: the MAC address(es) to be set to the specified VF
11231 */
t4_set_vf_mac_acl(struct adapter * adapter,unsigned int vf,unsigned int naddr,u8 * addr)11232 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
11233 unsigned int naddr, u8 *addr)
11234 {
11235 struct fw_acl_mac_cmd cmd;
11236
11237 memset(&cmd, 0, sizeof(cmd));
11238 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
11239 F_FW_CMD_REQUEST |
11240 F_FW_CMD_WRITE |
11241 V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
11242 V_FW_ACL_MAC_CMD_VFN(vf));
11243
11244 /* Note: Do not enable the ACL */
11245 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
11246 cmd.nmac = naddr;
11247
11248 switch (adapter->pf) {
11249 case 3:
11250 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
11251 break;
11252 case 2:
11253 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
11254 break;
11255 case 1:
11256 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
11257 break;
11258 case 0:
11259 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
11260 break;
11261 }
11262
11263 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
11264 }
11265
11266 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
11267 * functions
11268 */
11269
11270 /**
11271 * t4_read_pace_tbl - read the pace table
11272 * @adap: the adapter
11273 * @pace_vals: holds the returned values
11274 *
11275 * Returns the values of TP's pace table in microseconds.
11276 */
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])11277 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
11278 {
11279 unsigned int i, v;
11280
11281 for (i = 0; i < NTX_SCHED; i++) {
11282 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
11283 v = t4_read_reg(adap, A_TP_PACE_TABLE);
11284 pace_vals[i] = dack_ticks_to_usec(adap, v);
11285 }
11286 }
11287
11288 /**
11289 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
11290 * @adap: the adapter
11291 * @sched: the scheduler index
11292 * @kbps: the byte rate in Kbps
11293 * @ipg: the interpacket delay in tenths of nanoseconds
11294 * @sleep_ok: if true we may sleep while awaiting command completion
11295 *
11296 * Return the current configuration of a HW Tx scheduler.
11297 */
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg,bool sleep_ok)11298 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
11299 unsigned int *ipg, bool sleep_ok)
11300 {
11301 unsigned int v, addr, bpt, cpt;
11302
11303 if (kbps) {
11304 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
11305 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11306 if (sched & 1)
11307 v >>= 16;
11308 bpt = (v >> 8) & 0xff;
11309 cpt = v & 0xff;
11310 if (!cpt)
11311 *kbps = 0; /* scheduler disabled */
11312 else {
11313 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
11314 *kbps = (v * bpt) / 125;
11315 }
11316 }
11317 if (ipg) {
11318 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
11319 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11320 if (sched & 1)
11321 v >>= 16;
11322 v &= 0xffff;
11323 *ipg = (10000 * v) / core_ticks_per_usec(adap);
11324 }
11325 }
11326
11327 /**
11328 * t4_load_cfg - download config file
11329 * @adap: the adapter
11330 * @cfg_data: the cfg text file to write
11331 * @size: text file size
11332 *
11333 * Write the supplied config text file to the card's serial flash.
11334 */
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11335 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
11336 {
11337 int ret, i, n, cfg_addr;
11338 unsigned int addr;
11339 unsigned int flash_cfg_start_sec;
11340 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11341
11342 cfg_addr = t4_flash_cfg_addr(adap);
11343 if (cfg_addr < 0)
11344 return cfg_addr;
11345
11346 addr = cfg_addr;
11347 flash_cfg_start_sec = addr / SF_SEC_SIZE;
11348
11349 if (size > FLASH_CFG_MAX_SIZE) {
11350 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
11351 FLASH_CFG_MAX_SIZE);
11352 return -EFBIG;
11353 }
11354
11355 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
11356 sf_sec_size);
11357 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11358 flash_cfg_start_sec + i - 1);
11359 /*
11360 * If size == 0 then we're simply erasing the FLASH sectors associated
11361 * with the on-adapter Firmware Configuration File.
11362 */
11363 if (ret || size == 0)
11364 goto out;
11365
11366 /* this will write to the flash up to SF_PAGE_SIZE at a time */
11367 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11368 if ( (size - i) < SF_PAGE_SIZE)
11369 n = size - i;
11370 else
11371 n = SF_PAGE_SIZE;
11372 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
11373 if (ret)
11374 goto out;
11375
11376 addr += SF_PAGE_SIZE;
11377 cfg_data += SF_PAGE_SIZE;
11378 }
11379
11380 out:
11381 if (ret)
11382 CH_ERR(adap, "config file %s failed %d\n",
11383 (size == 0 ? "clear" : "download"), ret);
11384 return ret;
11385 }
11386
11387 /**
11388 * t5_fw_init_extern_mem - initialize the external memory
11389 * @adap: the adapter
11390 *
11391 * Initializes the external memory on T5.
11392 */
t5_fw_init_extern_mem(struct adapter * adap)11393 int t5_fw_init_extern_mem(struct adapter *adap)
11394 {
11395 u32 params[1], val[1];
11396 int ret;
11397
11398 if (!is_t5(adap->params.chip))
11399 return 0;
11400
11401 val[0] = 0xff; /* Initialize all MCs */
11402 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11403 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
11404 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
11405 FW_CMD_MAX_TIMEOUT);
11406
11407 return ret;
11408 }
11409
11410 /* BIOS boot headers */
11411 typedef struct pci_expansion_rom_header {
11412 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
11413 u8 reserved[22]; /* Reserved per processor Architecture data */
11414 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
11415 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
11416
11417 /* Legacy PCI Expansion ROM Header */
11418 typedef struct legacy_pci_expansion_rom_header {
11419 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
11420 u8 size512; /* Current Image Size in units of 512 bytes */
11421 u8 initentry_point[4];
11422 u8 cksum; /* Checksum computed on the entire Image */
11423 u8 reserved[16]; /* Reserved */
11424 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
11425 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
11426
11427 /* EFI PCI Expansion ROM Header */
11428 typedef struct efi_pci_expansion_rom_header {
11429 u8 signature[2]; // ROM signature. The value 0xaa55
11430 u8 initialization_size[2]; /* Units 512. Includes this header */
11431 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
11432 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
11433 u8 efi_machine_type[2]; /* Machine type from EFI image header */
11434 u8 compression_type[2]; /* Compression type. */
11435 /*
11436 * Compression type definition
11437 * 0x0: uncompressed
11438 * 0x1: Compressed
11439 * 0x2-0xFFFF: Reserved
11440 */
11441 u8 reserved[8]; /* Reserved */
11442 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
11443 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
11444 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
11445
11446 /* PCI Data Structure Format */
11447 typedef struct pcir_data_structure { /* PCI Data Structure */
11448 u8 signature[4]; /* Signature. The string "PCIR" */
11449 u8 vendor_id[2]; /* Vendor Identification */
11450 u8 device_id[2]; /* Device Identification */
11451 u8 vital_product[2]; /* Pointer to Vital Product Data */
11452 u8 length[2]; /* PCIR Data Structure Length */
11453 u8 revision; /* PCIR Data Structure Revision */
11454 u8 class_code[3]; /* Class Code */
11455 u8 image_length[2]; /* Image Length. Multiple of 512B */
11456 u8 code_revision[2]; /* Revision Level of Code/Data */
11457 u8 code_type; /* Code Type. */
11458 /*
11459 * PCI Expansion ROM Code Types
11460 * 0x00: Intel IA-32, PC-AT compatible. Legacy
11461 * 0x01: Open Firmware standard for PCI. FCODE
11462 * 0x02: Hewlett-Packard PA RISC. HP reserved
11463 * 0x03: EFI Image. EFI
11464 * 0x04-0xFF: Reserved.
11465 */
11466 u8 indicator; /* Indicator. Identifies the last image in the ROM */
11467 u8 reserved[2]; /* Reserved */
11468 } pcir_data_t; /* PCI__DATA_STRUCTURE */
11469
11470 /* BOOT constants */
11471 enum {
11472 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
11473 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
11474 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
11475 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
11476 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
11477 VENDOR_ID = 0x1425, /* Vendor ID */
11478 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
11479 };
11480
11481 /*
11482 * modify_device_id - Modifies the device ID of the Boot BIOS image
11483 * @adatper: the device ID to write.
11484 * @boot_data: the boot image to modify.
11485 *
11486 * Write the supplied device ID to the boot BIOS image.
11487 */
modify_device_id(int device_id,u8 * boot_data)11488 static void modify_device_id(int device_id, u8 *boot_data)
11489 {
11490 legacy_pci_exp_rom_header_t *header;
11491 pcir_data_t *pcir_header;
11492 u32 cur_header = 0;
11493
11494 /*
11495 * Loop through all chained images and change the device ID's
11496 */
11497 while (1) {
11498 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
11499 pcir_header = (pcir_data_t *) &boot_data[cur_header +
11500 le16_to_cpu(*(u16*)header->pcir_offset)];
11501
11502 /*
11503 * Only modify the Device ID if code type is Legacy or HP.
11504 * 0x00: Okay to modify
11505 * 0x01: FCODE. Do not be modify
11506 * 0x03: Okay to modify
11507 * 0x04-0xFF: Do not modify
11508 */
11509 if (pcir_header->code_type == 0x00) {
11510 u8 csum = 0;
11511 int i;
11512
11513 /*
11514 * Modify Device ID to match current adatper
11515 */
11516 *(u16*) pcir_header->device_id = device_id;
11517
11518 /*
11519 * Set checksum temporarily to 0.
11520 * We will recalculate it later.
11521 */
11522 header->cksum = 0x0;
11523
11524 /*
11525 * Calculate and update checksum
11526 */
11527 for (i = 0; i < (header->size512 * 512); i++)
11528 csum += (u8)boot_data[cur_header + i];
11529
11530 /*
11531 * Invert summed value to create the checksum
11532 * Writing new checksum value directly to the boot data
11533 */
11534 boot_data[cur_header + 7] = -csum;
11535
11536 } else if (pcir_header->code_type == 0x03) {
11537
11538 /*
11539 * Modify Device ID to match current adatper
11540 */
11541 *(u16*) pcir_header->device_id = device_id;
11542
11543 }
11544
11545
11546 /*
11547 * Check indicator element to identify if this is the last
11548 * image in the ROM.
11549 */
11550 if (pcir_header->indicator & 0x80)
11551 break;
11552
11553 /*
11554 * Move header pointer up to the next image in the ROM.
11555 */
11556 cur_header += header->size512 * 512;
11557 }
11558 }
11559
11560 #ifdef CHELSIO_T4_DIAGS
11561 /*
11562 * t4_earse_sf - Erase entire serial Flash region
11563 * @adapter: the adapter
11564 *
11565 * Clears the entire serial flash region.
11566 */
t4_erase_sf(struct adapter * adap)11567 int t4_erase_sf(struct adapter *adap)
11568 {
11569 unsigned int nsectors;
11570 int ret;
11571
11572 nsectors = FLASH_END_SEC;
11573 if (nsectors > adap->params.sf_nsec)
11574 nsectors = adap->params.sf_nsec;
11575
11576 // Erase all sectors of flash before and including the FW.
11577 // Flash layout is in t4_hw.h.
11578 ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
11579 if (ret)
11580 CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
11581 return ret;
11582 }
11583 #endif
11584
11585 /*
11586 * t4_load_boot - download boot flash
11587 * @adapter: the adapter
11588 * @boot_data: the boot image to write
11589 * @boot_addr: offset in flash to write boot_data
11590 * @size: image size
11591 *
11592 * Write the supplied boot image to the card's serial flash.
11593 * The boot image has the following sections: a 28-byte header and the
11594 * boot image.
11595 */
t4_load_boot(struct adapter * adap,u8 * boot_data,unsigned int boot_addr,unsigned int size)11596 int t4_load_boot(struct adapter *adap, u8 *boot_data,
11597 unsigned int boot_addr, unsigned int size)
11598 {
11599 pci_exp_rom_header_t *header;
11600 int pcir_offset ;
11601 pcir_data_t *pcir_header;
11602 int ret, addr;
11603 uint16_t device_id;
11604 unsigned int i;
11605 unsigned int boot_sector = (boot_addr * 1024 );
11606 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11607
11608 /*
11609 * Make sure the boot image does not encroach on the firmware region
11610 */
11611 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
11612 CH_ERR(adap, "boot image encroaching on firmware region\n");
11613 return -EFBIG;
11614 }
11615
11616 /*
11617 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
11618 * and Boot configuration data sections. These 3 boot sections span
11619 * sectors 0 to 7 in flash and live right before the FW image location.
11620 */
11621 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
11622 sf_sec_size);
11623 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
11624 (boot_sector >> 16) + i - 1);
11625
11626 /*
11627 * If size == 0 then we're simply erasing the FLASH sectors associated
11628 * with the on-adapter option ROM file
11629 */
11630 if (ret || (size == 0))
11631 goto out;
11632
11633 /* Get boot header */
11634 header = (pci_exp_rom_header_t *)boot_data;
11635 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
11636 /* PCIR Data Structure */
11637 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
11638
11639 /*
11640 * Perform some primitive sanity testing to avoid accidentally
11641 * writing garbage over the boot sectors. We ought to check for
11642 * more but it's not worth it for now ...
11643 */
11644 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
11645 CH_ERR(adap, "boot image too small/large\n");
11646 return -EFBIG;
11647 }
11648
11649 #ifndef CHELSIO_T4_DIAGS
11650 /*
11651 * Check BOOT ROM header signature
11652 */
11653 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
11654 CH_ERR(adap, "Boot image missing signature\n");
11655 return -EINVAL;
11656 }
11657
11658 /*
11659 * Check PCI header signature
11660 */
11661 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
11662 CH_ERR(adap, "PCI header missing signature\n");
11663 return -EINVAL;
11664 }
11665
11666 /*
11667 * Check Vendor ID matches Chelsio ID
11668 */
11669 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
11670 CH_ERR(adap, "Vendor ID missing signature\n");
11671 return -EINVAL;
11672 }
11673 #endif
11674
11675 /*
11676 * Retrieve adapter's device ID
11677 */
11678 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
11679 /* Want to deal with PF 0 so I strip off PF 4 indicator */
11680 device_id = device_id & 0xf0ff;
11681
11682 /*
11683 * Check PCIE Device ID
11684 */
11685 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
11686 /*
11687 * Change the device ID in the Boot BIOS image to match
11688 * the Device ID of the current adapter.
11689 */
11690 modify_device_id(device_id, boot_data);
11691 }
11692
11693 /*
11694 * Skip over the first SF_PAGE_SIZE worth of data and write it after
11695 * we finish copying the rest of the boot image. This will ensure
11696 * that the BIOS boot header will only be written if the boot image
11697 * was written in full.
11698 */
11699 addr = boot_sector;
11700 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
11701 addr += SF_PAGE_SIZE;
11702 boot_data += SF_PAGE_SIZE;
11703 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
11704 if (ret)
11705 goto out;
11706 }
11707
11708 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
11709 (const u8 *)header, 0);
11710
11711 out:
11712 if (ret)
11713 CH_ERR(adap, "boot image download failed, error %d\n", ret);
11714 return ret;
11715 }
11716
11717 /*
11718 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
11719 * @adapter: the adapter
11720 *
11721 * Return the address within the flash where the OptionROM Configuration
11722 * is stored, or an error if the device FLASH is too small to contain
11723 * a OptionROM Configuration.
11724 */
t4_flash_bootcfg_addr(struct adapter * adapter)11725 static int t4_flash_bootcfg_addr(struct adapter *adapter)
11726 {
11727 /*
11728 * If the device FLASH isn't large enough to hold a Firmware
11729 * Configuration File, return an error.
11730 */
11731 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
11732 return -ENOSPC;
11733
11734 return FLASH_BOOTCFG_START;
11735 }
11736
t4_load_bootcfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11737 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
11738 {
11739 int ret, i, n, cfg_addr;
11740 unsigned int addr;
11741 unsigned int flash_cfg_start_sec;
11742 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11743
11744 cfg_addr = t4_flash_bootcfg_addr(adap);
11745 if (cfg_addr < 0)
11746 return cfg_addr;
11747
11748 addr = cfg_addr;
11749 flash_cfg_start_sec = addr / SF_SEC_SIZE;
11750
11751 if (size > FLASH_BOOTCFG_MAX_SIZE) {
11752 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
11753 FLASH_BOOTCFG_MAX_SIZE);
11754 return -EFBIG;
11755 }
11756
11757 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
11758 sf_sec_size);
11759 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11760 flash_cfg_start_sec + i - 1);
11761
11762 /*
11763 * If size == 0 then we're simply erasing the FLASH sectors associated
11764 * with the on-adapter OptionROM Configuration File.
11765 */
11766 if (ret || size == 0)
11767 goto out;
11768
11769 /* this will write to the flash up to SF_PAGE_SIZE at a time */
11770 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11771 if ( (size - i) < SF_PAGE_SIZE)
11772 n = size - i;
11773 else
11774 n = SF_PAGE_SIZE;
11775 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
11776 if (ret)
11777 goto out;
11778
11779 addr += SF_PAGE_SIZE;
11780 cfg_data += SF_PAGE_SIZE;
11781 }
11782
11783 out:
11784 if (ret)
11785 CH_ERR(adap, "boot config data %s failed %d\n",
11786 (size == 0 ? "clear" : "download"), ret);
11787 return ret;
11788 }
11789
11790 /**
11791 * t4_read_bootcfg - read the current (boot)OptionROM configuration from FLASH
11792 * @adap: the adapter
11793 * @cfg_data: where to store the read OptionROM configuration data
11794 *
11795 * Read the current OptionROM configuration from FLASH and write to the
11796 * buffer @cfg_data supplied.
11797 */
t4_read_bootcfg(struct adapter * adap,u8 * cfg_data,unsigned int size)11798 int t4_read_bootcfg(struct adapter *adap, u8 *cfg_data, unsigned int size)
11799 {
11800 u32 *ptr = (u32 *)cfg_data;
11801 int i, n, cfg_addr;
11802 int ret = 0;
11803
11804 if (size > FLASH_BOOTCFG_MAX_SIZE) {
11805 CH_ERR(adap, "bootcfg file too big, max is %u bytes\n",
11806 FLASH_BOOTCFG_MAX_SIZE);
11807 return -EINVAL;
11808 }
11809
11810 cfg_addr = t4_flash_bootcfg_addr(adap);
11811 if (cfg_addr < 0)
11812 return cfg_addr;
11813
11814 size = size / sizeof (u32);
11815 for (i = 0; i < size; i += SF_PAGE_SIZE) {
11816 if ( (size - i) < SF_PAGE_SIZE)
11817 n = size - i;
11818 else
11819 n = SF_PAGE_SIZE;
11820
11821 ret = t4_read_flash(adap, cfg_addr, n, ptr, 0);
11822 if (ret)
11823 goto out;
11824
11825 cfg_addr += (n*4);
11826 ptr += n;
11827 }
11828
11829 out:
11830 return ret;
11831 }
11832
11833 /**
11834 * t4_set_filter_mode - configure the optional components of filter tuples
11835 * @adap: the adapter
11836 * @mode_map: a bitmap selcting which optional filter components to enable
11837 * @sleep_ok: if true we may sleep while awaiting command completion
11838 *
11839 * Sets the filter mode by selecting the optional components to enable
11840 * in filter tuples. Returns 0 on success and a negative error if the
11841 * requested mode needs more bits than are available for optional
11842 * components.
11843 */
t4_set_filter_mode(struct adapter * adap,unsigned int mode_map,bool sleep_ok)11844 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
11845 bool sleep_ok)
11846 {
11847 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
11848
11849 int i, nbits = 0;
11850
11851 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
11852 if (mode_map & (1 << i))
11853 nbits += width[i];
11854 if (nbits > FILTER_OPT_LEN)
11855 return -EINVAL;
11856
11857 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
11858
11859 return 0;
11860 }
11861
11862 /**
11863 * t4_clr_port_stats - clear port statistics
11864 * @adap: the adapter
11865 * @idx: the port index
11866 *
11867 * Clear HW statistics for the given port.
11868 */
t4_clr_port_stats(struct adapter * adap,int idx)11869 void t4_clr_port_stats(struct adapter *adap, int idx)
11870 {
11871 unsigned int i;
11872 u32 bgmap = t4_get_mps_bg_map(adap, idx);
11873 u32 port_base_addr;
11874
11875 if (is_t4(adap->params.chip))
11876 port_base_addr = PORT_BASE(idx);
11877 else
11878 port_base_addr = T5_PORT_BASE(idx);
11879
11880 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
11881 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
11882 t4_write_reg(adap, port_base_addr + i, 0);
11883 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
11884 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
11885 t4_write_reg(adap, port_base_addr + i, 0);
11886 for (i = 0; i < 4; i++)
11887 if (bgmap & (1 << i)) {
11888 t4_write_reg(adap,
11889 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
11890 t4_write_reg(adap,
11891 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
11892 }
11893 }
11894
11895 /**
11896 * t4_i2c_io - read/write I2C data from adapter
11897 * @adap: the adapter
11898 * @port: Port number if per-port device; <0 if not
11899 * @devid: per-port device ID or absolute device ID
11900 * @offset: byte offset into device I2C space
11901 * @len: byte length of I2C space data
11902 * @buf: buffer in which to return I2C data for read
11903 * buffer which holds the I2C data for write
11904 * @write: if true, do a write; else do a read
11905 * Reads/Writes the I2C data from/to the indicated device and location.
11906 */
t4_i2c_io(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf,bool write)11907 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
11908 int port, unsigned int devid,
11909 unsigned int offset, unsigned int len,
11910 u8 *buf, bool write)
11911 {
11912 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
11913 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
11914 int ret = 0;
11915
11916 if (len > I2C_PAGE_SIZE)
11917 return -EINVAL;
11918
11919 /* Dont allow reads that spans multiple pages */
11920 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
11921 return -EINVAL;
11922
11923 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
11924 ldst_cmd.op_to_addrspace =
11925 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
11926 F_FW_CMD_REQUEST |
11927 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
11928 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
11929 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
11930 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
11931 ldst_cmd.u.i2c.did = devid;
11932
11933 while (len > 0) {
11934 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
11935
11936 ldst_cmd.u.i2c.boffset = offset;
11937 ldst_cmd.u.i2c.blen = i2c_len;
11938
11939 if (write)
11940 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
11941
11942 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
11943 write ? NULL : &ldst_rpl);
11944 if (ret)
11945 break;
11946
11947 if (!write)
11948 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
11949 offset += i2c_len;
11950 buf += i2c_len;
11951 len -= i2c_len;
11952 }
11953
11954 return ret;
11955 }
11956
t4_i2c_rd(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)11957 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
11958 int port, unsigned int devid,
11959 unsigned int offset, unsigned int len,
11960 u8 *buf)
11961 {
11962 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
11963 }
11964
t4_i2c_wr(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)11965 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
11966 int port, unsigned int devid,
11967 unsigned int offset, unsigned int len,
11968 u8 *buf)
11969 {
11970 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
11971 }
11972
11973 /**
11974 * t4_sge_ctxt_rd - read an SGE context through FW
11975 * @adap: the adapter
11976 * @mbox: mailbox to use for the FW command
11977 * @cid: the context id
11978 * @ctype: the context type
11979 * @data: where to store the context data
11980 *
11981 * Issues a FW command through the given mailbox to read an SGE context.
11982 */
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)11983 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
11984 enum ctxt_type ctype, u32 *data)
11985 {
11986 int ret;
11987 struct fw_ldst_cmd c;
11988
11989 if (ctype == CTXT_EGRESS)
11990 ret = FW_LDST_ADDRSPC_SGE_EGRC;
11991 else if (ctype == CTXT_INGRESS)
11992 ret = FW_LDST_ADDRSPC_SGE_INGC;
11993 else if (ctype == CTXT_FLM)
11994 ret = FW_LDST_ADDRSPC_SGE_FLMC;
11995 else
11996 ret = FW_LDST_ADDRSPC_SGE_CONMC;
11997
11998 memset(&c, 0, sizeof(c));
11999 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12000 F_FW_CMD_REQUEST | F_FW_CMD_READ |
12001 V_FW_LDST_CMD_ADDRSPACE(ret));
12002 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
12003 c.u.idctxt.physid = cpu_to_be32(cid);
12004
12005 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12006 if (ret == 0) {
12007 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
12008 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
12009 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
12010 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
12011 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
12012 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
12013 }
12014 return ret;
12015 }
12016
12017 /**
12018 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
12019 * @adap: the adapter
12020 * @cid: the context id
12021 * @ctype: the context type
12022 * @data: where to store the context data
12023 *
12024 * Reads an SGE context directly, bypassing FW. This is only for
12025 * debugging when FW is unavailable.
12026 */
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)12027 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
12028 u32 *data)
12029 {
12030 int i, ret;
12031
12032 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
12033 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
12034 if (!ret)
12035 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
12036 *data++ = t4_read_reg(adap, i);
12037 return ret;
12038 }
12039
t4_sched_config(struct adapter * adapter,int type,int minmaxen)12040 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
12041 {
12042 struct fw_sched_cmd cmd;
12043
12044 memset(&cmd, 0, sizeof(cmd));
12045 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12046 F_FW_CMD_REQUEST |
12047 F_FW_CMD_WRITE);
12048 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12049
12050 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
12051 cmd.u.config.type = type;
12052 cmd.u.config.minmaxen = minmaxen;
12053
12054 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12055 NULL, 1);
12056 }
12057
t4_sched_params(struct adapter * adapter,int channel,int cls,int level,int mode,int type,int rateunit,int ratemode,int minrate,int maxrate,int weight,int pktsize,int burstsize)12058 int t4_sched_params(struct adapter *adapter,
12059 int channel, int cls,
12060 int level, int mode, int type,
12061 int rateunit, int ratemode,
12062 int minrate, int maxrate, int weight,
12063 int pktsize, int burstsize)
12064 {
12065 struct fw_sched_cmd cmd;
12066
12067 memset(&cmd, 0, sizeof(cmd));
12068 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12069 F_FW_CMD_REQUEST |
12070 F_FW_CMD_WRITE);
12071 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12072
12073 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12074 cmd.u.params.type = type;
12075 cmd.u.params.level = level;
12076 cmd.u.params.mode = mode;
12077 cmd.u.params.ch = channel;
12078 cmd.u.params.cl = cls;
12079 cmd.u.params.unit = rateunit;
12080 cmd.u.params.rate = ratemode;
12081 cmd.u.params.min = cpu_to_be32(minrate);
12082 cmd.u.params.max = cpu_to_be32(maxrate);
12083 cmd.u.params.weight = cpu_to_be16(weight);
12084 cmd.u.params.pktsize = cpu_to_be16(pktsize);
12085 cmd.u.params.burstsize = cpu_to_be16(burstsize);
12086
12087 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12088 NULL, 1);
12089 }
12090
t4_read_sched_params(struct adapter * adapter,int channel,int cls,int * level,int * mode,int * type,int * rateunit,int * ratemode,int * minrate,int * maxrate,int * weight,int * pktsize,int * burstsize)12091 int t4_read_sched_params(struct adapter *adapter,
12092 int channel, int cls,
12093 int *level, int *mode, int *type,
12094 int *rateunit, int *ratemode,
12095 int *minrate, int *maxrate, int *weight,
12096 int *pktsize, int *burstsize)
12097 {
12098 struct fw_sched_cmd cmd;
12099 int ret = 0;
12100
12101 memset(&cmd, 0, sizeof(cmd));
12102 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12103 F_FW_CMD_REQUEST |
12104 F_FW_CMD_READ);
12105 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12106 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12107 cmd.u.params.ch = channel;
12108 cmd.u.params.cl = cls;
12109
12110 ret = t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
12111 &cmd, 1);
12112 if (ret)
12113 goto out;
12114
12115 *level = cmd.u.params.level;
12116 *mode = cmd.u.params.mode;
12117 *type = cmd.u.params.type;
12118 *rateunit = cmd.u.params.unit;
12119 *ratemode = cmd.u.params.rate;
12120 *minrate = be32_to_cpu(cmd.u.params.min);
12121 *maxrate = be32_to_cpu(cmd.u.params.max);
12122 *weight = be16_to_cpu(cmd.u.params.weight);
12123 *pktsize = be16_to_cpu(cmd.u.params.pktsize);
12124 *burstsize = be16_to_cpu(cmd.u.params.burstsize);
12125
12126 out:
12127 return ret;
12128 }
12129
12130 /*
12131 * t4_config_watchdog - configure (enable/disable) a watchdog timer
12132 * @adapter: the adapter
12133 * @mbox: mailbox to use for the FW command
12134 * @pf: the PF owning the queue
12135 * @vf: the VF owning the queue
12136 * @timeout: watchdog timeout in ms
12137 * @action: watchdog timer / action
12138 *
12139 * There are separate watchdog timers for each possible watchdog
12140 * action. Configure one of the watchdog timers by setting a non-zero
12141 * timeout. Disable a watchdog timer by using a timeout of zero.
12142 */
t4_config_watchdog(struct adapter * adapter,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int timeout,unsigned int action)12143 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
12144 unsigned int pf, unsigned int vf,
12145 unsigned int timeout, unsigned int action)
12146 {
12147 struct fw_watchdog_cmd wdog;
12148 unsigned int ticks;
12149
12150 /*
12151 * The watchdog command expects a timeout in units of 10ms so we need
12152 * to convert it here (via rounding) and force a minimum of one 10ms
12153 * "tick" if the timeout is non-zero but the convertion results in 0
12154 * ticks.
12155 */
12156 ticks = (timeout + 5)/10;
12157 if (timeout && !ticks)
12158 ticks = 1;
12159
12160 memset(&wdog, 0, sizeof wdog);
12161 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
12162 F_FW_CMD_REQUEST |
12163 F_FW_CMD_WRITE |
12164 V_FW_PARAMS_CMD_PFN(pf) |
12165 V_FW_PARAMS_CMD_VFN(vf));
12166 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
12167 wdog.timeout = cpu_to_be32(ticks);
12168 wdog.action = cpu_to_be32(action);
12169
12170 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
12171 }
12172
t4_get_devlog_level(struct adapter * adapter,unsigned int * level)12173 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
12174 {
12175 struct fw_devlog_cmd devlog_cmd;
12176 int ret;
12177
12178 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12179 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12180 F_FW_CMD_REQUEST | F_FW_CMD_READ);
12181 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12182 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12183 sizeof(devlog_cmd), &devlog_cmd);
12184 if (ret)
12185 return ret;
12186
12187 *level = devlog_cmd.level;
12188 return 0;
12189 }
12190
t4_set_devlog_level(struct adapter * adapter,unsigned int level)12191 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
12192 {
12193 struct fw_devlog_cmd devlog_cmd;
12194
12195 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12196 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12197 F_FW_CMD_REQUEST |
12198 F_FW_CMD_WRITE);
12199 devlog_cmd.level = level;
12200 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12201 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12202 sizeof(devlog_cmd), &devlog_cmd);
12203 }
12204
t4_configure_add_smac(struct adapter * adap)12205 int t4_configure_add_smac(struct adapter *adap)
12206 {
12207 unsigned int param, val;
12208 int ret = 0;
12209
12210 adap->params.smac_add_support = 0;
12211 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12212 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
12213 /* Query FW to check if FW supports adding source mac address
12214 * to TCAM feature or not.
12215 * If FW returns 1, driver can use this feature and driver need to send
12216 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
12217 * enable adding smac to TCAM.
12218 */
12219 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12220 if (ret)
12221 return ret;
12222
12223 if (val == 1) {
12224 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
12225 ¶m, &val);
12226 if (!ret)
12227 /* Firmware allows adding explicit TCAM entries.
12228 * Save this internally.
12229 */
12230 adap->params.smac_add_support = 1;
12231 }
12232
12233 return ret;
12234 }
12235
t4_configure_ringbb(struct adapter * adap)12236 int t4_configure_ringbb(struct adapter *adap)
12237 {
12238 unsigned int param, val;
12239 int ret = 0;
12240
12241 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12242 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
12243 /* Query FW to check if FW supports ring switch feature or not.
12244 * If FW returns 1, driver can use this feature and driver need to send
12245 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
12246 * enable the ring backbone configuration.
12247 */
12248 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12249 if (ret < 0) {
12250 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
12251 ret);
12252 goto out;
12253 }
12254
12255 if (val != 1) {
12256 CH_ERR(adap, "FW doesnot support ringbackbone features\n");
12257 goto out;
12258 }
12259
12260 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12261 if (ret < 0) {
12262 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
12263 ret);
12264 goto out;
12265 }
12266
12267 out:
12268 return ret;
12269 }
12270
12271 /*
12272 * t4_set_vlan_acl - Set a VLAN id for the specified VF
12273 * @adapter: the adapter
12274 * @mbox: mailbox to use for the FW command
12275 * @vf: one of the VFs instantiated by the specified PF
12276 * @vlan: The vlanid to be set
12277 *
12278 */
t4_set_vlan_acl(struct adapter * adap,unsigned int mbox,unsigned int vf,u16 vlan)12279 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
12280 u16 vlan)
12281 {
12282 struct fw_acl_vlan_cmd vlan_cmd;
12283 unsigned int enable;
12284
12285 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
12286 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
12287 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
12288 F_FW_CMD_REQUEST |
12289 F_FW_CMD_WRITE |
12290 F_FW_CMD_EXEC |
12291 V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
12292 V_FW_ACL_VLAN_CMD_VFN(vf));
12293 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
12294 /* Drop all packets that donot match vlan id */
12295 vlan_cmd.dropnovlan_fm = (enable
12296 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
12297 F_FW_ACL_VLAN_CMD_FM)
12298 : 0);
12299 if (enable != 0) {
12300 vlan_cmd.nvlan = 1;
12301 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
12302 }
12303
12304 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
12305 }
12306
12307 /**
12308 * t4_del_mac - Removes the exact-match filter for a MAC address
12309 * @adap: the adapter
12310 * @mbox: mailbox to use for the FW command
12311 * @viid: the VI id
12312 * @addr: the MAC address value
12313 * @smac: if true, delete from only the smac region of MPS
12314 *
12315 * Modifies an exact-match filter and sets it to the new MAC address if
12316 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
12317 * latter case the address is added persistently if @persist is %true.
12318 *
12319 * Returns a negative error number or the index of the filter with the new
12320 * MAC value. Note that this index may differ from @idx.
12321 */
t4_del_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,const u8 * addr,bool smac)12322 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12323 const u8 *addr, bool smac)
12324 {
12325 int ret;
12326 struct fw_vi_mac_cmd c;
12327 struct fw_vi_mac_exact *p = c.u.exact;
12328 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12329
12330 memset(&c, 0, sizeof(c));
12331 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12332 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12333 V_FW_VI_MAC_CMD_VIID(viid));
12334 c.freemacs_to_len16 = cpu_to_be32(
12335 V_FW_CMD_LEN16(1) |
12336 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12337
12338 memcpy(p->macaddr, addr, sizeof(p->macaddr));
12339 p->valid_to_idx = cpu_to_be16(
12340 F_FW_VI_MAC_CMD_VALID |
12341 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
12342
12343 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12344 if (ret == 0) {
12345 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12346 if (ret < max_mac_addr)
12347 return -ENOMEM;
12348 }
12349
12350 return ret;
12351 }
12352
12353 /**
12354 * t4_add_mac - Adds an exact-match filter for a MAC address
12355 * @adap: the adapter
12356 * @mbox: mailbox to use for the FW command
12357 * @viid: the VI id
12358 * @idx: index of existing filter for old value of MAC address, or -1
12359 * @addr: the new MAC address value
12360 * @persist: whether a new MAC allocation should be persistent
12361 * @add_smt: if true also add the address to the HW SMT
12362 * @smac: if true, update only the smac region of MPS
12363 *
12364 * Modifies an exact-match filter and sets it to the new MAC address if
12365 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
12366 * latter case the address is added persistently if @persist is %true.
12367 *
12368 * Returns a negative error number or the index of the filter with the new
12369 * MAC value. Note that this index may differ from @idx.
12370 */
t4_add_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx,bool smac)12371 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12372 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
12373 {
12374 int ret, mode;
12375 struct fw_vi_mac_cmd c;
12376 struct fw_vi_mac_exact *p = c.u.exact;
12377 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12378
12379 if (idx < 0) /* new allocation */
12380 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
12381 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
12382
12383 memset(&c, 0, sizeof(c));
12384 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12385 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12386 V_FW_VI_MAC_CMD_VIID(viid));
12387 c.freemacs_to_len16 = cpu_to_be32(
12388 V_FW_CMD_LEN16(1) |
12389 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12390 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
12391 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
12392 V_FW_VI_MAC_CMD_IDX(idx));
12393 memcpy(p->macaddr, addr, sizeof(p->macaddr));
12394
12395 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12396 if (ret == 0) {
12397 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12398 if (ret >= max_mac_addr)
12399 return -ENOMEM;
12400 if (smt_idx) {
12401 /* Does fw supports returning smt_idx? */
12402 if (adap->params.viid_smt_extn_support)
12403 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
12404 else {
12405 /* In T4/T5, SMT contains 256 SMAC entries
12406 * organized in 128 rows of 2 entries each.
12407 * In T6, SMT contains 256 SMAC entries in
12408 * 256 rows.
12409 */
12410 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
12411 *smt_idx = ((viid & M_FW_VIID_VIN) << 1);
12412 else
12413 *smt_idx = (viid & M_FW_VIID_VIN);
12414 }
12415 }
12416 }
12417
12418 return ret;
12419 }
12420
12421