1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14 *
15 * Copyright (C) 2003-2019 Chelsio Communications. All rights reserved.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 * release for licensing terms and conditions.
21 */
22
23 /*
24 * Copyright 2020 RackTop Systems, Inc.
25 */
26
27 #include "common.h"
28 #include "t4_regs.h"
29 #include "t4_regs_values.h"
30 #include "t4fw_interface.h"
31
32 static inline void
t4_os_lock(t4_os_lock_t * lock)33 t4_os_lock(t4_os_lock_t *lock)
34 {
35 mutex_enter(lock);
36 }
37
38 static inline void
t4_os_unlock(t4_os_lock_t * lock)39 t4_os_unlock(t4_os_lock_t *lock)
40 {
41 mutex_exit(lock);
42 }
43
44 static inline void
t4_os_pci_read_cfg1(struct adapter * sc,int reg,uint8_t * val)45 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
46 {
47 *val = pci_config_get8(sc->pci_regh, reg);
48 }
49
50 static inline void
t4_os_pci_write_cfg1(struct adapter * sc,int reg,uint8_t val)51 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
52 {
53 pci_config_put8(sc->pci_regh, reg, val);
54 }
55
56 static inline void
t4_os_pci_read_cfg2(struct adapter * sc,int reg,uint16_t * val)57 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
58 {
59 *val = pci_config_get16(sc->pci_regh, reg);
60 }
61
62 static inline void
t4_os_pci_write_cfg2(struct adapter * sc,int reg,uint16_t val)63 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
64 {
65 pci_config_put16(sc->pci_regh, reg, val);
66 }
67
68 static inline void
t4_os_pci_read_cfg4(struct adapter * sc,int reg,uint32_t * val)69 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
70 {
71 *val = pci_config_get32(sc->pci_regh, reg);
72 }
73
74 static inline void
t4_os_pci_write_cfg4(struct adapter * sc,int reg,uint32_t val)75 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
76 {
77 pci_config_put32(sc->pci_regh, reg, val);
78 }
79
80 static inline void *
t4_os_alloc(size_t size)81 t4_os_alloc(size_t size)
82 {
83 return (kmem_alloc(size, KM_SLEEP));
84 }
85
86 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
87 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
88
89 /*
90 * t4_os_pci_read_seeprom - read four bytes of SEEPROM/VPD contents
91 * @adapter: the adapter
92 * @addr: SEEPROM/VPD Address to read
93 * @valp: where to store the value read
94 *
95 * Read a 32-bit value from the given address in the SEEPROM/VPD. The address
96 * must be four-byte aligned. Returns 0 on success, a negative error number
97 * on failure.
98 */
t4_os_pci_read_seeprom(adapter_t * adapter,int addr,u32 * valp)99 static inline int t4_os_pci_read_seeprom(adapter_t *adapter, int addr,
100 u32 *valp)
101 {
102 const int ret = t4_seeprom_read(adapter, addr, valp);
103 return (ret >= 0 ? 0 : ret);
104 }
105
106 /*
107 * t4_os_pci_write_seeprom - write four bytes of SEEPROM/VPD contents
108 * @adapter: the adapter
109 * @addr: SEEPROM/VPD Address to write
110 * @val: the value write
111 *
112 * Write a 32-bit value to the given address in the SEEPROM/VPD. The address
113 * must be four-byte aligned. Returns 0 on success, a negative error number
114 * on failure.
115 */
t4_os_pci_write_seeprom(adapter_t * adapter,int addr,u32 val)116 static inline int t4_os_pci_write_seeprom(adapter_t *adapter, int addr, u32 val)
117 {
118 const int ret = t4_seeprom_write(adapter, addr, val);
119 return (ret >= 0 ? 0 : ret);
120 }
121
122
t4_os_pci_set_vpd_size(struct adapter * adapter,size_t len)123 static inline int t4_os_pci_set_vpd_size(struct adapter *adapter, size_t len)
124 {
125 /* Presently unused on illumos. */
126 return (0);
127 }
128
129
130 /**
131 * t4_wait_op_done_val - wait until an operation is completed
132 * @adapter: the adapter performing the operation
133 * @reg: the register to check for completion
134 * @mask: a single-bit field within @reg that indicates completion
135 * @polarity: the value of the field when the operation is completed
136 * @attempts: number of check iterations
137 * @delay: delay in usecs between iterations
138 * @valp: where to store the value of the register at completion time
139 *
140 * Wait until an operation is completed by checking a bit in a register
141 * up to @attempts times. If @valp is not NULL the value of the register
142 * at the time it indicated completion is stored there. Returns 0 if the
143 * operation completes and -EAGAIN otherwise.
144 */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)145 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
146 int polarity, int attempts, int delay, u32 *valp)
147 {
148 while (1) {
149 u32 val = t4_read_reg(adapter, reg);
150
151 if (!!(val & mask) == polarity) {
152 if (valp)
153 *valp = val;
154 return 0;
155 }
156 if (--attempts == 0)
157 return -EAGAIN;
158 if (delay)
159 udelay(delay);
160 }
161 }
162
t4_wait_op_done(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay)163 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
164 int polarity, int attempts, int delay)
165 {
166 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
167 delay, NULL);
168 }
169
170 /**
171 * t4_set_reg_field - set a register field to a value
172 * @adapter: the adapter to program
173 * @addr: the register address
174 * @mask: specifies the portion of the register to modify
175 * @val: the new value for the register field
176 *
177 * Sets a register field specified by the supplied mask to the
178 * given value.
179 */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)180 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
181 u32 val)
182 {
183 u32 v = t4_read_reg(adapter, addr) & ~mask;
184
185 t4_write_reg(adapter, addr, v | val);
186 (void) t4_read_reg(adapter, addr); /* flush */
187 }
188
189 /**
190 * t4_read_indirect - read indirectly addressed registers
191 * @adap: the adapter
192 * @addr_reg: register holding the indirect address
193 * @data_reg: register holding the value of the indirect register
194 * @vals: where the read register values are stored
195 * @nregs: how many indirect registers to read
196 * @start_idx: index of first indirect register to read
197 *
198 * Reads registers that are accessed indirectly through an address/data
199 * register pair.
200 */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)201 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
202 unsigned int data_reg, u32 *vals,
203 unsigned int nregs, unsigned int start_idx)
204 {
205 while (nregs--) {
206 t4_write_reg(adap, addr_reg, start_idx);
207 *vals++ = t4_read_reg(adap, data_reg);
208 start_idx++;
209 }
210 }
211
212 /**
213 * t4_write_indirect - write indirectly addressed registers
214 * @adap: the adapter
215 * @addr_reg: register holding the indirect addresses
216 * @data_reg: register holding the value for the indirect registers
217 * @vals: values to write
218 * @nregs: how many indirect registers to write
219 * @start_idx: address of first indirect register to write
220 *
221 * Writes a sequential block of registers that are accessed indirectly
222 * through an address/data register pair.
223 */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)224 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
225 unsigned int data_reg, const u32 *vals,
226 unsigned int nregs, unsigned int start_idx)
227 {
228 while (nregs--) {
229 t4_write_reg(adap, addr_reg, start_idx++);
230 t4_write_reg(adap, data_reg, *vals++);
231 }
232 }
233
234 /*
235 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
236 * mechanism. This guarantees that we get the real value even if we're
237 * operating within a Virtual Machine and the Hypervisor is trapping our
238 * Configuration Space accesses.
239 *
240 * N.B. This routine should only be used as a last resort: the firmware uses
241 * the backdoor registers on a regular basis and we can end up
242 * conflicting with it's uses!
243 */
t4_hw_pci_read_cfg4(struct adapter * adap,int reg,u32 * val)244 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
245 {
246 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
247
248 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
249 req |= F_ENABLE;
250 else
251 req |= F_T6_ENABLE;
252
253 if (is_t4(adap->params.chip))
254 req |= F_LOCALCFG;
255
256 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
257 *val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
258
259 /* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
260 * Configuration Space read. (None of the other fields matter when
261 * F_ENABLE is 0 so a simple register write is easier than a
262 * read-modify-write via t4_set_reg_field().)
263 */
264 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
265 }
266
267 /*
268 * t4_report_fw_error - report firmware error
269 * @adap: the adapter
270 *
271 * The adapter firmware can indicate error conditions to the host.
272 * If the firmware has indicated an error, print out the reason for
273 * the firmware error.
274 */
t4_report_fw_error(struct adapter * adap)275 static void t4_report_fw_error(struct adapter *adap)
276 {
277 static const char *const reason[] = {
278 "Crash", /* PCIE_FW_EVAL_CRASH */
279 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
280 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
281 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
282 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
283 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
284 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
285 "Reserved", /* reserved */
286 };
287 u32 pcie_fw;
288
289 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
290 if (pcie_fw & F_PCIE_FW_ERR) {
291 CH_ERR(adap, "Firmware reports adapter error: %s\n",
292 reason[G_PCIE_FW_EVAL(pcie_fw)]);
293 adap->flags &= ~FW_OK;
294 }
295 }
296
297 /*
298 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
299 */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)300 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
301 u32 mbox_addr)
302 {
303 for ( ; nflit; nflit--, mbox_addr += 8)
304 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
305 }
306
307 /*
308 * Handle a FW assertion reported in a mailbox.
309 */
fw_asrt(struct adapter * adap,struct fw_debug_cmd * asrt)310 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
311 {
312 CH_ALERT(adap,
313 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
314 asrt->u.assert.filename_0_7,
315 be32_to_cpu(asrt->u.assert.line),
316 be32_to_cpu(asrt->u.assert.x),
317 be32_to_cpu(asrt->u.assert.y));
318 }
319
320 #define X_CIM_PF_NOACCESS 0xeeeeeeee
321
322 /*
323 * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
324 * busy loops which don't sleep.
325 */
326 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
327 #define T4_OS_TOUCH_NMI_WATCHDOG() t4_os_touch_nmi_watchdog()
328 #else
329 #define T4_OS_TOUCH_NMI_WATCHDOG()
330 #endif
331
332 #ifdef T4_OS_LOG_MBOX_CMDS
333 /**
334 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
335 * @adapter: the adapter
336 * @cmd: the Firmware Mailbox Command or Reply
337 * @size: command length in bytes
338 * @access: the time (ms) needed to access the Firmware Mailbox
339 * @execute: the time (ms) the command spent being executed
340 */
t4_record_mbox(struct adapter * adapter,const __be64 * cmd,unsigned int size,int access,int execute)341 static void t4_record_mbox(struct adapter *adapter,
342 const __be64 *cmd, unsigned int size,
343 int access, int execute)
344 {
345 struct mbox_cmd_log *log = adapter->mbox_log;
346 struct mbox_cmd *entry;
347 int i;
348
349 entry = mbox_cmd_log_entry(log, log->cursor++);
350 if (log->cursor == log->size)
351 log->cursor = 0;
352
353 for (i = 0; i < size/8; i++)
354 entry->cmd[i] = be64_to_cpu(cmd[i]);
355 while (i < MBOX_LEN/8)
356 entry->cmd[i++] = 0;
357 entry->timestamp = t4_os_timestamp();
358 entry->seqno = log->seqno++;
359 entry->access = access;
360 entry->execute = execute;
361 }
362
363 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
364 t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
365
366 #else /* !T4_OS_LOG_MBOX_CMDS */
367
368 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
369 /* nothing */
370
371 #endif /* !T4_OS_LOG_MBOX_CMDS */
372
373 /**
374 * t4_record_mbox_marker - record a marker in the mailbox log
375 * @adapter: the adapter
376 * @marker: byte array marker
377 * @size: marker size in bytes
378 *
379 * We inject a "fake mailbox command" into the Firmware Mailbox Log
380 * using a known command token and then the bytes of the specified
381 * marker. This lets debugging code inject markers into the log to
382 * help identify which commands are in response to higher level code.
383 */
t4_record_mbox_marker(struct adapter * adapter,const void * marker,unsigned int size)384 void t4_record_mbox_marker(struct adapter *adapter,
385 const void *marker, unsigned int size)
386 {
387 #ifdef T4_OS_LOG_MBOX_CMDS
388 __be64 marker_cmd[MBOX_LEN/8];
389 const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
390 unsigned int marker_cmd_size;
391
392 if (size > max_marker)
393 size = max_marker;
394
395 marker_cmd[0] = cpu_to_be64(~0LLU);
396 memcpy(&marker_cmd[1], marker, size);
397 memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
398 marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
399
400 t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
401 #endif /* T4_OS_LOG_MBOX_CMDS */
402 }
403
404 /*
405 * Delay time in microseconds to wait for mailbox access/fw reply
406 * to mailbox command
407 */
408 #define MIN_MBOX_CMD_DELAY 900
409 #define MBOX_CMD_DELAY 1000
410
411 /**
412 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
413 * @adap: the adapter
414 * @mbox: index of the mailbox to use
415 * @cmd: the command to write
416 * @size: command length in bytes
417 * @rpl: where to optionally store the reply
418 * @sleep_ok: if true we may sleep while awaiting command completion
419 * @timeout: time to wait for command to finish before timing out
420 * (negative implies @sleep_ok=false)
421 *
422 * Sends the given command to FW through the selected mailbox and waits
423 * for the FW to execute the command. If @rpl is not %NULL it is used to
424 * store the FW's reply to the command. The command and its optional
425 * reply are of the same length. Some FW commands like RESET and
426 * INITIALIZE can take a considerable amount of time to execute.
427 * @sleep_ok determines whether we may sleep while awaiting the response.
428 * If sleeping is allowed we use progressive backoff otherwise we spin.
429 * Note that passing in a negative @timeout is an alternate mechanism
430 * for specifying @sleep_ok=false. This is useful when a higher level
431 * interface allows for specification of @timeout but not @sleep_ok ...
432 *
433 * The return value is 0 on success or a negative errno on failure. A
434 * failure can happen either because we are not able to execute the
435 * command or FW executes it but signals an error. In the latter case
436 * the return value is the error code indicated by FW (negated).
437 */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)438 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
439 int size, void *rpl, bool sleep_ok, int timeout)
440 {
441 #ifdef T4_OS_LOG_MBOX_CMDS
442 u16 access = 0;
443 #endif /* T4_OS_LOG_MBOX_CMDS */
444 u32 v;
445 u64 res;
446 int i, ret;
447 const __be64 *p = cmd;
448 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
449 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
450 u32 ctl;
451 __be64 cmd_rpl[MBOX_LEN/8];
452 t4_mbox_waiter_t entry;
453 u32 pcie_fw;
454
455 if ((size & 15) || size > MBOX_LEN)
456 return -EINVAL;
457
458 /*
459 * If we have a negative timeout, that implies that we can't sleep.
460 */
461 if (timeout < 0) {
462 sleep_ok = false;
463 timeout = -timeout;
464 }
465
466 /*
467 * Queue ourselves onto the mailbox access list. When our entry is at
468 * the front of the list, we have rights to access the mailbox. So we
469 * wait [for a while] till we're at the front [or bail out with an
470 * EBUSY] ...
471 */
472 t4_mbox_waiter_add(adap, &entry);
473
474 for (i = 0; ; i++) {
475 /*
476 * If we've waited too long, return a busy indication. This
477 * really ought to be based on our initial position in the
478 * mailbox access list but this is a start. We very rarely
479 * contend on access to the mailbox ... Also check for a
480 * firmware error which we'll report as a device error.
481 */
482 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
483 if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
484 t4_mbox_waiter_remove(adap, &entry);
485 t4_report_fw_error(adap);
486 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
487 T4_RECORD_MBOX(adap, cmd, size, ret, 0);
488 return ret;
489 }
490
491 if (t4_mbox_wait_owner(adap, MBOX_CMD_DELAY, sleep_ok)) {
492 break;
493 }
494 }
495 #ifdef T4_OS_LOG_MBOX_CMDS
496 access = i;
497 #endif /* T4_OS_LOG_MBOX_CMDS */
498
499 /*
500 * Attempt to gain access to the mailbox.
501 */
502 for (i = 0; i < 4; i++) {
503 ctl = t4_read_reg(adap, ctl_reg);
504 v = G_MBOWNER(ctl);
505 if (v != X_MBOWNER_NONE)
506 break;
507 }
508
509 /*
510 * If we were unable to gain access, dequeue ourselves from the
511 * mailbox atomic access list and report the error to our caller.
512 */
513 if (v != X_MBOWNER_PL) {
514 t4_mbox_waiter_remove(adap, &entry);
515 t4_report_fw_error(adap);
516 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
517 T4_RECORD_MBOX(adap, cmd, size, access, ret);
518 return ret;
519 }
520
521 /*
522 * If we gain ownership of the mailbox and there's a "valid" message
523 * in it, this is likely an asynchronous error message from the
524 * firmware. So we'll report that and then proceed on with attempting
525 * to issue our own command ... which may well fail if the error
526 * presaged the firmware crashing ...
527 */
528 if (ctl & F_MBMSGVALID) {
529 CH_ERR(adap, "found VALID command in mbox %u: "
530 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
531 (unsigned long long)t4_read_reg64(adap, data_reg),
532 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
533 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
534 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
535 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
536 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
537 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
538 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
539 }
540
541 /*
542 * Copy in the new mailbox command and send it on its way ...
543 */
544 T4_RECORD_MBOX(adap, cmd, size, access, 0);
545 for (i = 0; i < size; i += 8, p++)
546 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
547
548 /*
549 * XXX It's not clear that we need this anymore now
550 * XXX that we have mailbox logging ...
551 */
552 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
553
554 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
555 (void) t4_read_reg(adap, ctl_reg); /* flush write */
556
557 /*
558 * Loop waiting for the reply; bail out if we time out or the firmware
559 * reports an error.
560 */
561 for (i = 0;
562 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
563 i < timeout;
564 i++) {
565 if (sleep_ok) {
566 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
567 } else {
568 T4_OS_TOUCH_NMI_WATCHDOG();
569 udelay(MBOX_CMD_DELAY);
570 }
571
572 v = t4_read_reg(adap, ctl_reg);
573 if (v == X_CIM_PF_NOACCESS)
574 continue;
575 if (G_MBOWNER(v) == X_MBOWNER_PL) {
576 if (!(v & F_MBMSGVALID)) {
577 t4_write_reg(adap, ctl_reg,
578 V_MBOWNER(X_MBOWNER_NONE));
579 continue;
580 }
581
582 /*
583 * Retrieve the command reply and release the mailbox.
584 */
585 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
586 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
587 t4_mbox_waiter_remove(adap, &entry);
588
589 T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1);
590
591 /*
592 * XXX It's not clear that we need this anymore now
593 * XXX that we have mailbox logging ...
594 */
595 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
596 CH_MSG(adap, INFO, HW,
597 "command completed in %d ms (%ssleeping)\n",
598 i + 1, sleep_ok ? "" : "non-");
599
600 res = be64_to_cpu(cmd_rpl[0]);
601 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
602 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
603 res = V_FW_CMD_RETVAL(EIO);
604 } else if (rpl)
605 memcpy(rpl, cmd_rpl, size);
606 return -G_FW_CMD_RETVAL((int)res);
607 }
608 }
609
610 /*
611 * We timed out waiting for a reply to our mailbox command. Report
612 * the error and also check to see if the firmware reported any
613 * errors ...
614 */
615 t4_mbox_waiter_remove(adap, &entry);
616
617 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
618 T4_RECORD_MBOX(adap, cmd, size, access, ret);
619 CH_ERR(adap, "command 0x%x in mailbox %d timed out\n",
620 *(const u8 *)cmd, mbox);
621
622 t4_report_fw_error(adap);
623 t4_fatal_err(adap);
624 return ret;
625 }
626
627 #ifdef CONFIG_CUDBG
628 /*
629 * The maximum number of times to iterate for FW reply before
630 * issuing a mailbox timeout
631 */
632 #define FW_REPLY_WAIT_LOOP 6000000
633
634 /**
635 * t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
636 * mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
637 * and is only invoked during a kernel crash. Since this function is
638 * called through a atomic notifier chain ,we cannot sleep awaiting a
639 * response from FW, hence repeatedly loop until we get a reply.
640 *
641 * @adap: the adapter
642 * @mbox: index of the mailbox to use
643 * @cmd: the command to write
644 * @size: command length in bytes
645 * @rpl: where to optionally store the reply
646 */
647
t4_wr_mbox_meat_timeout_panic(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)648 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
649 const void *cmd, int size, void *rpl)
650 {
651 u32 v;
652 u64 res;
653 int i, ret;
654 u64 cnt;
655 const __be64 *p = cmd;
656 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
657 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
658 u32 ctl;
659 __be64 cmd_rpl[MBOX_LEN/8];
660 u32 pcie_fw;
661
662 if ((size & 15) || size > MBOX_LEN)
663 return -EINVAL;
664
665 /*
666 * Check for a firmware error which we'll report as a
667 * device error.
668 */
669 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
670 if (pcie_fw & F_PCIE_FW_ERR) {
671 t4_report_fw_error(adap);
672 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
673 return ret;
674 }
675
676 /*
677 * Attempt to gain access to the mailbox.
678 */
679 for (i = 0; i < 4; i++) {
680 ctl = t4_read_reg(adap, ctl_reg);
681 v = G_MBOWNER(ctl);
682 if (v != X_MBOWNER_NONE)
683 break;
684 }
685
686 /*
687 * If we were unable to gain access, report the error to our caller.
688 */
689 if (v != X_MBOWNER_PL) {
690 t4_report_fw_error(adap);
691 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
692 return ret;
693 }
694
695 /*
696 * If we gain ownership of the mailbox and there's a "valid" message
697 * in it, this is likely an asynchronous error message from the
698 * firmware. So we'll report that and then proceed on with attempting
699 * to issue our own command ... which may well fail if the error
700 * presaged the firmware crashing ...
701 */
702 if (ctl & F_MBMSGVALID) {
703 CH_ERR(adap, "found VALID command in mbox %u: "
704 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
705 (unsigned long long)t4_read_reg64(adap, data_reg),
706 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
707 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
708 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
709 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
710 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
711 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
712 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
713 }
714
715 /*
716 * Copy in the new mailbox command and send it on its way ...
717 */
718 for (i = 0; i < size; i += 8, p++)
719 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
720
721 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
722
723 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
724 t4_read_reg(adap, ctl_reg); /* flush write */
725
726 /*
727 * Loop waiting for the reply; bail out if we time out or the firmware
728 * reports an error.
729 */
730 for (cnt = 0;
731 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
732 cnt < FW_REPLY_WAIT_LOOP;
733 cnt++) {
734 v = t4_read_reg(adap, ctl_reg);
735 if (v == X_CIM_PF_NOACCESS)
736 continue;
737 if (G_MBOWNER(v) == X_MBOWNER_PL) {
738 if (!(v & F_MBMSGVALID)) {
739 t4_write_reg(adap, ctl_reg,
740 V_MBOWNER(X_MBOWNER_NONE));
741 continue;
742 }
743
744 /*
745 * Retrieve the command reply and release the mailbox.
746 */
747 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
748 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
749
750 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
751
752 res = be64_to_cpu(cmd_rpl[0]);
753 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
754 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
755 res = V_FW_CMD_RETVAL(EIO);
756 } else if (rpl)
757 memcpy(rpl, cmd_rpl, size);
758 return -G_FW_CMD_RETVAL((int)res);
759 }
760 }
761
762 /*
763 * We timed out waiting for a reply to our mailbox command. Report
764 * the error and also check to see if the firmware reported any
765 * errors ...
766 */
767 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
768 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
769 *(const u8 *)cmd, mbox);
770
771 t4_report_fw_error(adap);
772 t4_fatal_err(adap);
773 return ret;
774 }
775 #endif
776
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)777 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
778 void *rpl, bool sleep_ok)
779 {
780 #ifdef CONFIG_CUDBG
781 if (adap->flags & K_CRASH)
782 return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
783 rpl);
784 else
785 #endif
786 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
787 sleep_ok, FW_CMD_MAX_TIMEOUT);
788
789 }
790
t4_edc_err_read(struct adapter * adap,int idx)791 static int t4_edc_err_read(struct adapter *adap, int idx)
792 {
793 u32 edc_ecc_err_addr_reg;
794 u32 edc_bist_status_rdata_reg;
795
796 if (is_t4(adap->params.chip)) {
797 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
798 return 0;
799 }
800 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
801 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
802 return 0;
803 }
804
805 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
806 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
807
808 CH_WARN(adap,
809 "edc%d err addr 0x%x: 0x%x.\n",
810 idx, edc_ecc_err_addr_reg,
811 t4_read_reg(adap, edc_ecc_err_addr_reg));
812 CH_WARN(adap,
813 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
814 edc_bist_status_rdata_reg,
815 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
816 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
817 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
818 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
819 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
820 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
821 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
822 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
823 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
824
825 return 0;
826 }
827
828 /**
829 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
830 * @adap: the adapter
831 * @win: PCI-E Memory Window to use
832 * @addr: address within adapter memory
833 * @len: amount of memory to transfer
834 * @hbuf: host memory buffer
835 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
836 *
837 * Reads/writes an [almost] arbitrary memory region in the firmware: the
838 * firmware memory address and host buffer must be aligned on 32-bit
839 * boudaries; the length may be arbitrary.
840 *
841 * NOTES:
842 * 1. The memory is transferred as a raw byte sequence from/to the
843 * firmware's memory. If this memory contains data structures which
844 * contain multi-byte integers, it's the caller's responsibility to
845 * perform appropriate byte order conversions.
846 *
847 * 2. It is the Caller's responsibility to ensure that no other code
848 * uses the specified PCI-E Memory Window while this routine is
849 * using it. This is typically done via the use of OS-specific
850 * locks, etc.
851 */
t4_memory_rw_addr(struct adapter * adap,int win,u32 addr,u32 len,void * hbuf,int dir)852 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
853 u32 len, void *hbuf, int dir)
854 {
855 u32 pos, offset, resid;
856 u32 win_pf, mem_reg, mem_aperture, mem_base;
857 u32 *buf;
858
859 /* Argument sanity checks ...
860 */
861 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
862 return -EINVAL;
863 buf = (u32 *)hbuf;
864
865 /* It's convenient to be able to handle lengths which aren't a
866 * multiple of 32-bits because we often end up transferring files to
867 * the firmware. So we'll handle that by normalizing the length here
868 * and then handling any residual transfer at the end.
869 */
870 resid = len & 0x3;
871 len -= resid;
872
873 /* Each PCI-E Memory Window is programmed with a window size -- or
874 * "aperture" -- which controls the granularity of its mapping onto
875 * adapter memory. We need to grab that aperture in order to know
876 * how to use the specified window. The window is also programmed
877 * with the base address of the Memory Window in BAR0's address
878 * space. For T4 this is an absolute PCI-E Bus Address. For T5
879 * the address is relative to BAR0.
880 */
881 mem_reg = t4_read_reg(adap,
882 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
883 win));
884
885 /* a dead adapter will return 0xffffffff for PIO reads */
886 if (mem_reg == 0xffffffff) {
887 CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
888 win);
889 return -ENXIO;
890 }
891
892 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
893 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
894 if (is_t4(adap->params.chip))
895 mem_base -= adap->t4_bar0;
896 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
897
898 /* Calculate our initial PCI-E Memory Window Position and Offset into
899 * that Window.
900 */
901 pos = addr & ~(mem_aperture-1);
902 offset = addr - pos;
903
904 /* Set up initial PCI-E Memory Window to cover the start of our
905 * transfer. (Read it back to ensure that changes propagate before we
906 * attempt to use the new value.)
907 */
908 t4_write_reg(adap,
909 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
910 pos | win_pf);
911 t4_read_reg(adap,
912 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
913
914 /* Transfer data to/from the adapter as long as there's an integral
915 * number of 32-bit transfers to complete.
916 *
917 * A note on Endianness issues:
918 *
919 * The "register" reads and writes below from/to the PCI-E Memory
920 * Window invoke the standard adapter Big-Endian to PCI-E Link
921 * Little-Endian "swizzel." As a result, if we have the following
922 * data in adapter memory:
923 *
924 * Memory: ... | b0 | b1 | b2 | b3 | ...
925 * Address: i+0 i+1 i+2 i+3
926 *
927 * Then a read of the adapter memory via the PCI-E Memory Window
928 * will yield:
929 *
930 * x = readl(i)
931 * 31 0
932 * [ b3 | b2 | b1 | b0 ]
933 *
934 * If this value is stored into local memory on a Little-Endian system
935 * it will show up correctly in local memory as:
936 *
937 * ( ..., b0, b1, b2, b3, ... )
938 *
939 * But on a Big-Endian system, the store will show up in memory
940 * incorrectly swizzled as:
941 *
942 * ( ..., b3, b2, b1, b0, ... )
943 *
944 * So we need to account for this in the reads and writes to the
945 * PCI-E Memory Window below by undoing the register read/write
946 * swizzels.
947 */
948 while (len > 0) {
949 if (dir == T4_MEMORY_READ)
950 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
951 mem_base + offset));
952 else
953 t4_write_reg(adap, mem_base + offset,
954 (__force u32)cpu_to_le32(*buf++));
955 offset += sizeof(__be32);
956 len -= sizeof(__be32);
957
958 /* If we've reached the end of our current window aperture,
959 * move the PCI-E Memory Window on to the next. Note that
960 * doing this here after "len" may be 0 allows us to set up
961 * the PCI-E Memory Window for a possible final residual
962 * transfer below ...
963 */
964 if (offset == mem_aperture) {
965 pos += mem_aperture;
966 offset = 0;
967 t4_write_reg(adap,
968 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
969 win), pos | win_pf);
970 t4_read_reg(adap,
971 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
972 win));
973 }
974 }
975
976 /* If the original transfer had a length which wasn't a multiple of
977 * 32-bits, now's where we need to finish off the transfer of the
978 * residual amount. The PCI-E Memory Window has already been moved
979 * above (if necessary) to cover this final transfer.
980 */
981 if (resid) {
982 union {
983 u32 word;
984 char byte[4];
985 } last;
986 unsigned char *bp;
987 int i;
988
989 if (dir == T4_MEMORY_READ) {
990 last.word = le32_to_cpu(
991 (__force __le32)t4_read_reg(adap,
992 mem_base + offset));
993 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
994 bp[i] = last.byte[i];
995 } else {
996 last.word = *buf;
997 for (i = resid; i < 4; i++)
998 last.byte[i] = 0;
999 t4_write_reg(adap, mem_base + offset,
1000 (__force u32)cpu_to_le32(last.word));
1001 }
1002 }
1003
1004 return 0;
1005 }
1006
1007 /**
1008 * t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
1009 * @adap: the adapter
1010 * @win: PCI-E Memory Window to use
1011 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
1012 * @maddr: address within indicated memory type
1013 * @len: amount of memory to transfer
1014 * @hbuf: host memory buffer
1015 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
1016 *
1017 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
1018 * provides an (memory type, address withing memory type) interface.
1019 */
t4_memory_rw_mtype(struct adapter * adap,int win,int mtype,u32 maddr,u32 len,void * hbuf,int dir)1020 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
1021 u32 len, void *hbuf, int dir)
1022 {
1023 u32 mtype_offset;
1024 u32 edc_size, mc_size;
1025
1026 /* Offset into the region of memory which is being accessed
1027 * MEM_EDC0 = 0
1028 * MEM_EDC1 = 1
1029 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
1030 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
1031 * MEM_HMA = 4
1032 */
1033 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
1034 if (mtype == MEM_HMA) {
1035 mtype_offset = 2 * (edc_size * 1024 * 1024);
1036 } else if (mtype != MEM_MC1)
1037 mtype_offset = (mtype * (edc_size * 1024 * 1024));
1038 else {
1039 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
1040 A_MA_EXT_MEMORY0_BAR));
1041 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
1042 }
1043
1044 return t4_memory_rw_addr(adap, win,
1045 mtype_offset + maddr, len,
1046 hbuf, dir);
1047 }
1048
1049 /*
1050 * Return the specified PCI-E Configuration Space register from our Physical
1051 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
1052 * since we prefer to let the firmware own all of these registers, but if that
1053 * fails we go for it directly ourselves.
1054 */
t4_read_pcie_cfg4(struct adapter * adap,int reg,int drv_fw_attach)1055 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
1056 {
1057 u32 val;
1058
1059 /*
1060 * If fw_attach != 0, construct and send the Firmware LDST Command to
1061 * retrieve the specified PCI-E Configuration Space register.
1062 */
1063 if (drv_fw_attach != 0) {
1064 struct fw_ldst_cmd ldst_cmd;
1065 int ret;
1066
1067 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1068 ldst_cmd.op_to_addrspace =
1069 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
1070 F_FW_CMD_REQUEST |
1071 F_FW_CMD_READ |
1072 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
1073 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
1074 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
1075 ldst_cmd.u.pcie.ctrl_to_fn =
1076 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
1077 ldst_cmd.u.pcie.r = reg;
1078
1079 /*
1080 * If the LDST Command succeeds, return the result, otherwise
1081 * fall through to reading it directly ourselves ...
1082 */
1083 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1084 &ldst_cmd);
1085 if (ret == 0)
1086 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1087
1088 CH_WARN(adap, "Firmware failed to return "
1089 "Configuration Space register %d, err = %d\n",
1090 reg, -ret);
1091 }
1092
1093 /*
1094 * Read the desired Configuration Space register via the PCI-E
1095 * Backdoor mechanism.
1096 */
1097 t4_hw_pci_read_cfg4(adap, reg, &val);
1098 return val;
1099 }
1100
1101 /*
1102 * Get the window based on base passed to it.
1103 * Window aperture is currently unhandled, but there is no use case for it
1104 * right now
1105 */
t4_get_window(struct adapter * adap,u64 pci_base,u64 pci_mask,u64 memwin_base,int drv_fw_attach)1106 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1107 {
1108 if (is_t4(adap->params.chip)) {
1109 u32 bar0;
1110
1111 /*
1112 * Truncation intentional: we only read the bottom 32-bits of
1113 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
1114 * mechanism to read BAR0 instead of using
1115 * pci_resource_start() because we could be operating from
1116 * within a Virtual Machine which is trapping our accesses to
1117 * our Configuration Space and we need to set up the PCI-E
1118 * Memory Window decoders with the actual addresses which will
1119 * be coming across the PCI-E link.
1120 */
1121 bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1122 bar0 &= pci_mask;
1123 adap->t4_bar0 = bar0;
1124
1125 return bar0 + memwin_base;
1126 } else {
1127 /* For T5, only relative offset inside the PCIe BAR is passed */
1128 return memwin_base;
1129 }
1130 }
1131
1132 /* Get the default utility window (win0) used by everyone */
t4_get_util_window(struct adapter * adap,int drv_fw_attach)1133 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1134 {
1135 return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1136 }
1137
1138 /*
1139 * Set up memory window for accessing adapter memory ranges. (Read
1140 * back MA register to ensure that changes propagate before we attempt
1141 * to use the new values.)
1142 */
t4_setup_memwin(struct adapter * adap,u32 memwin_base,u32 window)1143 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1144 {
1145 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1146 memwin_base | V_BIR(0) |
1147 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1148 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1149 }
1150
1151 /**
1152 * t4_get_regs_len - return the size of the chips register set
1153 * @adapter: the adapter
1154 *
1155 * Returns the size of the chip's BAR0 register space.
1156 */
t4_get_regs_len(struct adapter * adapter)1157 unsigned int t4_get_regs_len(struct adapter *adapter)
1158 {
1159 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1160
1161 switch (chip_version) {
1162 case CHELSIO_T4:
1163 return T4_REGMAP_SIZE;
1164
1165 case CHELSIO_T5:
1166 case CHELSIO_T6:
1167 return T5_REGMAP_SIZE;
1168 }
1169
1170 CH_ERR(adapter,
1171 "Unsupported chip version %d\n", chip_version);
1172 return 0;
1173 }
1174
1175 /**
1176 * t4_get_regs - read chip registers into provided buffer
1177 * @adap: the adapter
1178 * @buf: register buffer
1179 * @buf_size: size (in bytes) of register buffer
1180 *
1181 * If the provided register buffer isn't large enough for the chip's
1182 * full register range, the register dump will be truncated to the
1183 * register buffer's size.
1184 */
t4_get_regs(struct adapter * adap,void * buf,size_t buf_size)1185 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1186 {
1187 static const unsigned int t4_reg_ranges[] = {
1188 0x1008, 0x1108,
1189 0x1180, 0x1184,
1190 0x1190, 0x1194,
1191 0x11a0, 0x11a4,
1192 0x11b0, 0x11b4,
1193 0x11fc, 0x123c,
1194 0x1300, 0x173c,
1195 0x1800, 0x18fc,
1196 0x3000, 0x30d8,
1197 0x30e0, 0x30e4,
1198 0x30ec, 0x5910,
1199 0x5920, 0x5924,
1200 0x5960, 0x5960,
1201 0x5968, 0x5968,
1202 0x5970, 0x5970,
1203 0x5978, 0x5978,
1204 0x5980, 0x5980,
1205 0x5988, 0x5988,
1206 0x5990, 0x5990,
1207 0x5998, 0x5998,
1208 0x59a0, 0x59d4,
1209 0x5a00, 0x5ae0,
1210 0x5ae8, 0x5ae8,
1211 0x5af0, 0x5af0,
1212 0x5af8, 0x5af8,
1213 0x6000, 0x6098,
1214 0x6100, 0x6150,
1215 0x6200, 0x6208,
1216 0x6240, 0x6248,
1217 0x6280, 0x62b0,
1218 0x62c0, 0x6338,
1219 0x6370, 0x638c,
1220 0x6400, 0x643c,
1221 0x6500, 0x6524,
1222 0x6a00, 0x6a04,
1223 0x6a14, 0x6a38,
1224 0x6a60, 0x6a70,
1225 0x6a78, 0x6a78,
1226 0x6b00, 0x6b0c,
1227 0x6b1c, 0x6b84,
1228 0x6bf0, 0x6bf8,
1229 0x6c00, 0x6c0c,
1230 0x6c1c, 0x6c84,
1231 0x6cf0, 0x6cf8,
1232 0x6d00, 0x6d0c,
1233 0x6d1c, 0x6d84,
1234 0x6df0, 0x6df8,
1235 0x6e00, 0x6e0c,
1236 0x6e1c, 0x6e84,
1237 0x6ef0, 0x6ef8,
1238 0x6f00, 0x6f0c,
1239 0x6f1c, 0x6f84,
1240 0x6ff0, 0x6ff8,
1241 0x7000, 0x700c,
1242 0x701c, 0x7084,
1243 0x70f0, 0x70f8,
1244 0x7100, 0x710c,
1245 0x711c, 0x7184,
1246 0x71f0, 0x71f8,
1247 0x7200, 0x720c,
1248 0x721c, 0x7284,
1249 0x72f0, 0x72f8,
1250 0x7300, 0x730c,
1251 0x731c, 0x7384,
1252 0x73f0, 0x73f8,
1253 0x7400, 0x7450,
1254 0x7500, 0x7530,
1255 0x7600, 0x760c,
1256 0x7614, 0x761c,
1257 0x7680, 0x76cc,
1258 0x7700, 0x7798,
1259 0x77c0, 0x77fc,
1260 0x7900, 0x79fc,
1261 0x7b00, 0x7b58,
1262 0x7b60, 0x7b84,
1263 0x7b8c, 0x7c38,
1264 0x7d00, 0x7d38,
1265 0x7d40, 0x7d80,
1266 0x7d8c, 0x7ddc,
1267 0x7de4, 0x7e04,
1268 0x7e10, 0x7e1c,
1269 0x7e24, 0x7e38,
1270 0x7e40, 0x7e44,
1271 0x7e4c, 0x7e78,
1272 0x7e80, 0x7ea4,
1273 0x7eac, 0x7edc,
1274 0x7ee8, 0x7efc,
1275 0x8dc0, 0x8e04,
1276 0x8e10, 0x8e1c,
1277 0x8e30, 0x8e78,
1278 0x8ea0, 0x8eb8,
1279 0x8ec0, 0x8f6c,
1280 0x8fc0, 0x9008,
1281 0x9010, 0x9058,
1282 0x9060, 0x9060,
1283 0x9068, 0x9074,
1284 0x90fc, 0x90fc,
1285 0x9400, 0x9408,
1286 0x9410, 0x9458,
1287 0x9600, 0x9600,
1288 0x9608, 0x9638,
1289 0x9640, 0x96bc,
1290 0x9800, 0x9808,
1291 0x9820, 0x983c,
1292 0x9850, 0x9864,
1293 0x9c00, 0x9c6c,
1294 0x9c80, 0x9cec,
1295 0x9d00, 0x9d6c,
1296 0x9d80, 0x9dec,
1297 0x9e00, 0x9e6c,
1298 0x9e80, 0x9eec,
1299 0x9f00, 0x9f6c,
1300 0x9f80, 0x9fec,
1301 0xd004, 0xd004,
1302 0xd010, 0xd03c,
1303 0xdfc0, 0xdfe0,
1304 0xe000, 0xea7c,
1305 0xf000, 0x11110,
1306 0x11118, 0x11190,
1307 0x19040, 0x1906c,
1308 0x19078, 0x19080,
1309 0x1908c, 0x190e4,
1310 0x190f0, 0x190f8,
1311 0x19100, 0x19110,
1312 0x19120, 0x19124,
1313 0x19150, 0x19194,
1314 0x1919c, 0x191b0,
1315 0x191d0, 0x191e8,
1316 0x19238, 0x1924c,
1317 0x193f8, 0x1943c,
1318 0x1944c, 0x19474,
1319 0x19490, 0x194e0,
1320 0x194f0, 0x194f8,
1321 0x19800, 0x19c08,
1322 0x19c10, 0x19c90,
1323 0x19ca0, 0x19ce4,
1324 0x19cf0, 0x19d40,
1325 0x19d50, 0x19d94,
1326 0x19da0, 0x19de8,
1327 0x19df0, 0x19e40,
1328 0x19e50, 0x19e90,
1329 0x19ea0, 0x19f4c,
1330 0x1a000, 0x1a004,
1331 0x1a010, 0x1a06c,
1332 0x1a0b0, 0x1a0e4,
1333 0x1a0ec, 0x1a0f4,
1334 0x1a100, 0x1a108,
1335 0x1a114, 0x1a120,
1336 0x1a128, 0x1a130,
1337 0x1a138, 0x1a138,
1338 0x1a190, 0x1a1c4,
1339 0x1a1fc, 0x1a1fc,
1340 0x1e040, 0x1e04c,
1341 0x1e284, 0x1e28c,
1342 0x1e2c0, 0x1e2c0,
1343 0x1e2e0, 0x1e2e0,
1344 0x1e300, 0x1e384,
1345 0x1e3c0, 0x1e3c8,
1346 0x1e440, 0x1e44c,
1347 0x1e684, 0x1e68c,
1348 0x1e6c0, 0x1e6c0,
1349 0x1e6e0, 0x1e6e0,
1350 0x1e700, 0x1e784,
1351 0x1e7c0, 0x1e7c8,
1352 0x1e840, 0x1e84c,
1353 0x1ea84, 0x1ea8c,
1354 0x1eac0, 0x1eac0,
1355 0x1eae0, 0x1eae0,
1356 0x1eb00, 0x1eb84,
1357 0x1ebc0, 0x1ebc8,
1358 0x1ec40, 0x1ec4c,
1359 0x1ee84, 0x1ee8c,
1360 0x1eec0, 0x1eec0,
1361 0x1eee0, 0x1eee0,
1362 0x1ef00, 0x1ef84,
1363 0x1efc0, 0x1efc8,
1364 0x1f040, 0x1f04c,
1365 0x1f284, 0x1f28c,
1366 0x1f2c0, 0x1f2c0,
1367 0x1f2e0, 0x1f2e0,
1368 0x1f300, 0x1f384,
1369 0x1f3c0, 0x1f3c8,
1370 0x1f440, 0x1f44c,
1371 0x1f684, 0x1f68c,
1372 0x1f6c0, 0x1f6c0,
1373 0x1f6e0, 0x1f6e0,
1374 0x1f700, 0x1f784,
1375 0x1f7c0, 0x1f7c8,
1376 0x1f840, 0x1f84c,
1377 0x1fa84, 0x1fa8c,
1378 0x1fac0, 0x1fac0,
1379 0x1fae0, 0x1fae0,
1380 0x1fb00, 0x1fb84,
1381 0x1fbc0, 0x1fbc8,
1382 0x1fc40, 0x1fc4c,
1383 0x1fe84, 0x1fe8c,
1384 0x1fec0, 0x1fec0,
1385 0x1fee0, 0x1fee0,
1386 0x1ff00, 0x1ff84,
1387 0x1ffc0, 0x1ffc8,
1388 0x20000, 0x2002c,
1389 0x20100, 0x2013c,
1390 0x20190, 0x201a0,
1391 0x201a8, 0x201b8,
1392 0x201c4, 0x201c8,
1393 0x20200, 0x20318,
1394 0x20400, 0x204b4,
1395 0x204c0, 0x20528,
1396 0x20540, 0x20614,
1397 0x21000, 0x21040,
1398 0x2104c, 0x21060,
1399 0x210c0, 0x210ec,
1400 0x21200, 0x21268,
1401 0x21270, 0x21284,
1402 0x212fc, 0x21388,
1403 0x21400, 0x21404,
1404 0x21500, 0x21500,
1405 0x21510, 0x21518,
1406 0x2152c, 0x21530,
1407 0x2153c, 0x2153c,
1408 0x21550, 0x21554,
1409 0x21600, 0x21600,
1410 0x21608, 0x2161c,
1411 0x21624, 0x21628,
1412 0x21630, 0x21634,
1413 0x2163c, 0x2163c,
1414 0x21700, 0x2171c,
1415 0x21780, 0x2178c,
1416 0x21800, 0x21818,
1417 0x21820, 0x21828,
1418 0x21830, 0x21848,
1419 0x21850, 0x21854,
1420 0x21860, 0x21868,
1421 0x21870, 0x21870,
1422 0x21878, 0x21898,
1423 0x218a0, 0x218a8,
1424 0x218b0, 0x218c8,
1425 0x218d0, 0x218d4,
1426 0x218e0, 0x218e8,
1427 0x218f0, 0x218f0,
1428 0x218f8, 0x21a18,
1429 0x21a20, 0x21a28,
1430 0x21a30, 0x21a48,
1431 0x21a50, 0x21a54,
1432 0x21a60, 0x21a68,
1433 0x21a70, 0x21a70,
1434 0x21a78, 0x21a98,
1435 0x21aa0, 0x21aa8,
1436 0x21ab0, 0x21ac8,
1437 0x21ad0, 0x21ad4,
1438 0x21ae0, 0x21ae8,
1439 0x21af0, 0x21af0,
1440 0x21af8, 0x21c18,
1441 0x21c20, 0x21c20,
1442 0x21c28, 0x21c30,
1443 0x21c38, 0x21c38,
1444 0x21c80, 0x21c98,
1445 0x21ca0, 0x21ca8,
1446 0x21cb0, 0x21cc8,
1447 0x21cd0, 0x21cd4,
1448 0x21ce0, 0x21ce8,
1449 0x21cf0, 0x21cf0,
1450 0x21cf8, 0x21d7c,
1451 0x21e00, 0x21e04,
1452 0x22000, 0x2202c,
1453 0x22100, 0x2213c,
1454 0x22190, 0x221a0,
1455 0x221a8, 0x221b8,
1456 0x221c4, 0x221c8,
1457 0x22200, 0x22318,
1458 0x22400, 0x224b4,
1459 0x224c0, 0x22528,
1460 0x22540, 0x22614,
1461 0x23000, 0x23040,
1462 0x2304c, 0x23060,
1463 0x230c0, 0x230ec,
1464 0x23200, 0x23268,
1465 0x23270, 0x23284,
1466 0x232fc, 0x23388,
1467 0x23400, 0x23404,
1468 0x23500, 0x23500,
1469 0x23510, 0x23518,
1470 0x2352c, 0x23530,
1471 0x2353c, 0x2353c,
1472 0x23550, 0x23554,
1473 0x23600, 0x23600,
1474 0x23608, 0x2361c,
1475 0x23624, 0x23628,
1476 0x23630, 0x23634,
1477 0x2363c, 0x2363c,
1478 0x23700, 0x2371c,
1479 0x23780, 0x2378c,
1480 0x23800, 0x23818,
1481 0x23820, 0x23828,
1482 0x23830, 0x23848,
1483 0x23850, 0x23854,
1484 0x23860, 0x23868,
1485 0x23870, 0x23870,
1486 0x23878, 0x23898,
1487 0x238a0, 0x238a8,
1488 0x238b0, 0x238c8,
1489 0x238d0, 0x238d4,
1490 0x238e0, 0x238e8,
1491 0x238f0, 0x238f0,
1492 0x238f8, 0x23a18,
1493 0x23a20, 0x23a28,
1494 0x23a30, 0x23a48,
1495 0x23a50, 0x23a54,
1496 0x23a60, 0x23a68,
1497 0x23a70, 0x23a70,
1498 0x23a78, 0x23a98,
1499 0x23aa0, 0x23aa8,
1500 0x23ab0, 0x23ac8,
1501 0x23ad0, 0x23ad4,
1502 0x23ae0, 0x23ae8,
1503 0x23af0, 0x23af0,
1504 0x23af8, 0x23c18,
1505 0x23c20, 0x23c20,
1506 0x23c28, 0x23c30,
1507 0x23c38, 0x23c38,
1508 0x23c80, 0x23c98,
1509 0x23ca0, 0x23ca8,
1510 0x23cb0, 0x23cc8,
1511 0x23cd0, 0x23cd4,
1512 0x23ce0, 0x23ce8,
1513 0x23cf0, 0x23cf0,
1514 0x23cf8, 0x23d7c,
1515 0x23e00, 0x23e04,
1516 0x24000, 0x2402c,
1517 0x24100, 0x2413c,
1518 0x24190, 0x241a0,
1519 0x241a8, 0x241b8,
1520 0x241c4, 0x241c8,
1521 0x24200, 0x24318,
1522 0x24400, 0x244b4,
1523 0x244c0, 0x24528,
1524 0x24540, 0x24614,
1525 0x25000, 0x25040,
1526 0x2504c, 0x25060,
1527 0x250c0, 0x250ec,
1528 0x25200, 0x25268,
1529 0x25270, 0x25284,
1530 0x252fc, 0x25388,
1531 0x25400, 0x25404,
1532 0x25500, 0x25500,
1533 0x25510, 0x25518,
1534 0x2552c, 0x25530,
1535 0x2553c, 0x2553c,
1536 0x25550, 0x25554,
1537 0x25600, 0x25600,
1538 0x25608, 0x2561c,
1539 0x25624, 0x25628,
1540 0x25630, 0x25634,
1541 0x2563c, 0x2563c,
1542 0x25700, 0x2571c,
1543 0x25780, 0x2578c,
1544 0x25800, 0x25818,
1545 0x25820, 0x25828,
1546 0x25830, 0x25848,
1547 0x25850, 0x25854,
1548 0x25860, 0x25868,
1549 0x25870, 0x25870,
1550 0x25878, 0x25898,
1551 0x258a0, 0x258a8,
1552 0x258b0, 0x258c8,
1553 0x258d0, 0x258d4,
1554 0x258e0, 0x258e8,
1555 0x258f0, 0x258f0,
1556 0x258f8, 0x25a18,
1557 0x25a20, 0x25a28,
1558 0x25a30, 0x25a48,
1559 0x25a50, 0x25a54,
1560 0x25a60, 0x25a68,
1561 0x25a70, 0x25a70,
1562 0x25a78, 0x25a98,
1563 0x25aa0, 0x25aa8,
1564 0x25ab0, 0x25ac8,
1565 0x25ad0, 0x25ad4,
1566 0x25ae0, 0x25ae8,
1567 0x25af0, 0x25af0,
1568 0x25af8, 0x25c18,
1569 0x25c20, 0x25c20,
1570 0x25c28, 0x25c30,
1571 0x25c38, 0x25c38,
1572 0x25c80, 0x25c98,
1573 0x25ca0, 0x25ca8,
1574 0x25cb0, 0x25cc8,
1575 0x25cd0, 0x25cd4,
1576 0x25ce0, 0x25ce8,
1577 0x25cf0, 0x25cf0,
1578 0x25cf8, 0x25d7c,
1579 0x25e00, 0x25e04,
1580 0x26000, 0x2602c,
1581 0x26100, 0x2613c,
1582 0x26190, 0x261a0,
1583 0x261a8, 0x261b8,
1584 0x261c4, 0x261c8,
1585 0x26200, 0x26318,
1586 0x26400, 0x264b4,
1587 0x264c0, 0x26528,
1588 0x26540, 0x26614,
1589 0x27000, 0x27040,
1590 0x2704c, 0x27060,
1591 0x270c0, 0x270ec,
1592 0x27200, 0x27268,
1593 0x27270, 0x27284,
1594 0x272fc, 0x27388,
1595 0x27400, 0x27404,
1596 0x27500, 0x27500,
1597 0x27510, 0x27518,
1598 0x2752c, 0x27530,
1599 0x2753c, 0x2753c,
1600 0x27550, 0x27554,
1601 0x27600, 0x27600,
1602 0x27608, 0x2761c,
1603 0x27624, 0x27628,
1604 0x27630, 0x27634,
1605 0x2763c, 0x2763c,
1606 0x27700, 0x2771c,
1607 0x27780, 0x2778c,
1608 0x27800, 0x27818,
1609 0x27820, 0x27828,
1610 0x27830, 0x27848,
1611 0x27850, 0x27854,
1612 0x27860, 0x27868,
1613 0x27870, 0x27870,
1614 0x27878, 0x27898,
1615 0x278a0, 0x278a8,
1616 0x278b0, 0x278c8,
1617 0x278d0, 0x278d4,
1618 0x278e0, 0x278e8,
1619 0x278f0, 0x278f0,
1620 0x278f8, 0x27a18,
1621 0x27a20, 0x27a28,
1622 0x27a30, 0x27a48,
1623 0x27a50, 0x27a54,
1624 0x27a60, 0x27a68,
1625 0x27a70, 0x27a70,
1626 0x27a78, 0x27a98,
1627 0x27aa0, 0x27aa8,
1628 0x27ab0, 0x27ac8,
1629 0x27ad0, 0x27ad4,
1630 0x27ae0, 0x27ae8,
1631 0x27af0, 0x27af0,
1632 0x27af8, 0x27c18,
1633 0x27c20, 0x27c20,
1634 0x27c28, 0x27c30,
1635 0x27c38, 0x27c38,
1636 0x27c80, 0x27c98,
1637 0x27ca0, 0x27ca8,
1638 0x27cb0, 0x27cc8,
1639 0x27cd0, 0x27cd4,
1640 0x27ce0, 0x27ce8,
1641 0x27cf0, 0x27cf0,
1642 0x27cf8, 0x27d7c,
1643 0x27e00, 0x27e04,
1644 };
1645
1646 static const unsigned int t5_reg_ranges[] = {
1647 0x1008, 0x10c0,
1648 0x10cc, 0x10f8,
1649 0x1100, 0x1100,
1650 0x110c, 0x1148,
1651 0x1180, 0x1184,
1652 0x1190, 0x1194,
1653 0x11a0, 0x11a4,
1654 0x11b0, 0x11b4,
1655 0x11fc, 0x123c,
1656 0x1280, 0x173c,
1657 0x1800, 0x18fc,
1658 0x3000, 0x3028,
1659 0x3060, 0x30b0,
1660 0x30b8, 0x30d8,
1661 0x30e0, 0x30fc,
1662 0x3140, 0x357c,
1663 0x35a8, 0x35cc,
1664 0x35ec, 0x35ec,
1665 0x3600, 0x5624,
1666 0x56cc, 0x56ec,
1667 0x56f4, 0x5720,
1668 0x5728, 0x575c,
1669 0x580c, 0x5814,
1670 0x5890, 0x589c,
1671 0x58a4, 0x58ac,
1672 0x58b8, 0x58bc,
1673 0x5940, 0x59c8,
1674 0x59d0, 0x59dc,
1675 0x59fc, 0x5a18,
1676 0x5a60, 0x5a70,
1677 0x5a80, 0x5a9c,
1678 0x5b94, 0x5bfc,
1679 0x6000, 0x6020,
1680 0x6028, 0x6040,
1681 0x6058, 0x609c,
1682 0x60a8, 0x614c,
1683 0x7700, 0x7798,
1684 0x77c0, 0x78fc,
1685 0x7b00, 0x7b58,
1686 0x7b60, 0x7b84,
1687 0x7b8c, 0x7c54,
1688 0x7d00, 0x7d38,
1689 0x7d40, 0x7d80,
1690 0x7d8c, 0x7ddc,
1691 0x7de4, 0x7e04,
1692 0x7e10, 0x7e1c,
1693 0x7e24, 0x7e38,
1694 0x7e40, 0x7e44,
1695 0x7e4c, 0x7e78,
1696 0x7e80, 0x7edc,
1697 0x7ee8, 0x7efc,
1698 0x8dc0, 0x8de0,
1699 0x8df8, 0x8e04,
1700 0x8e10, 0x8e84,
1701 0x8ea0, 0x8f84,
1702 0x8fc0, 0x9058,
1703 0x9060, 0x9060,
1704 0x9068, 0x90f8,
1705 0x9400, 0x9408,
1706 0x9410, 0x9470,
1707 0x9600, 0x9600,
1708 0x9608, 0x9638,
1709 0x9640, 0x96f4,
1710 0x9800, 0x9808,
1711 0x9820, 0x983c,
1712 0x9850, 0x9864,
1713 0x9c00, 0x9c6c,
1714 0x9c80, 0x9cec,
1715 0x9d00, 0x9d6c,
1716 0x9d80, 0x9dec,
1717 0x9e00, 0x9e6c,
1718 0x9e80, 0x9eec,
1719 0x9f00, 0x9f6c,
1720 0x9f80, 0xa020,
1721 0xd004, 0xd004,
1722 0xd010, 0xd03c,
1723 0xdfc0, 0xdfe0,
1724 0xe000, 0x1106c,
1725 0x11074, 0x11088,
1726 0x1109c, 0x1117c,
1727 0x11190, 0x11204,
1728 0x19040, 0x1906c,
1729 0x19078, 0x19080,
1730 0x1908c, 0x190e8,
1731 0x190f0, 0x190f8,
1732 0x19100, 0x19110,
1733 0x19120, 0x19124,
1734 0x19150, 0x19194,
1735 0x1919c, 0x191b0,
1736 0x191d0, 0x191e8,
1737 0x19238, 0x19290,
1738 0x193f8, 0x19428,
1739 0x19430, 0x19444,
1740 0x1944c, 0x1946c,
1741 0x19474, 0x19474,
1742 0x19490, 0x194cc,
1743 0x194f0, 0x194f8,
1744 0x19c00, 0x19c08,
1745 0x19c10, 0x19c60,
1746 0x19c94, 0x19ce4,
1747 0x19cf0, 0x19d40,
1748 0x19d50, 0x19d94,
1749 0x19da0, 0x19de8,
1750 0x19df0, 0x19e10,
1751 0x19e50, 0x19e90,
1752 0x19ea0, 0x19f24,
1753 0x19f34, 0x19f34,
1754 0x19f40, 0x19f50,
1755 0x19f90, 0x19fb4,
1756 0x19fc4, 0x19fe4,
1757 0x1a000, 0x1a004,
1758 0x1a010, 0x1a06c,
1759 0x1a0b0, 0x1a0e4,
1760 0x1a0ec, 0x1a0f8,
1761 0x1a100, 0x1a108,
1762 0x1a114, 0x1a120,
1763 0x1a128, 0x1a130,
1764 0x1a138, 0x1a138,
1765 0x1a190, 0x1a1c4,
1766 0x1a1fc, 0x1a1fc,
1767 0x1e008, 0x1e00c,
1768 0x1e040, 0x1e044,
1769 0x1e04c, 0x1e04c,
1770 0x1e284, 0x1e290,
1771 0x1e2c0, 0x1e2c0,
1772 0x1e2e0, 0x1e2e0,
1773 0x1e300, 0x1e384,
1774 0x1e3c0, 0x1e3c8,
1775 0x1e408, 0x1e40c,
1776 0x1e440, 0x1e444,
1777 0x1e44c, 0x1e44c,
1778 0x1e684, 0x1e690,
1779 0x1e6c0, 0x1e6c0,
1780 0x1e6e0, 0x1e6e0,
1781 0x1e700, 0x1e784,
1782 0x1e7c0, 0x1e7c8,
1783 0x1e808, 0x1e80c,
1784 0x1e840, 0x1e844,
1785 0x1e84c, 0x1e84c,
1786 0x1ea84, 0x1ea90,
1787 0x1eac0, 0x1eac0,
1788 0x1eae0, 0x1eae0,
1789 0x1eb00, 0x1eb84,
1790 0x1ebc0, 0x1ebc8,
1791 0x1ec08, 0x1ec0c,
1792 0x1ec40, 0x1ec44,
1793 0x1ec4c, 0x1ec4c,
1794 0x1ee84, 0x1ee90,
1795 0x1eec0, 0x1eec0,
1796 0x1eee0, 0x1eee0,
1797 0x1ef00, 0x1ef84,
1798 0x1efc0, 0x1efc8,
1799 0x1f008, 0x1f00c,
1800 0x1f040, 0x1f044,
1801 0x1f04c, 0x1f04c,
1802 0x1f284, 0x1f290,
1803 0x1f2c0, 0x1f2c0,
1804 0x1f2e0, 0x1f2e0,
1805 0x1f300, 0x1f384,
1806 0x1f3c0, 0x1f3c8,
1807 0x1f408, 0x1f40c,
1808 0x1f440, 0x1f444,
1809 0x1f44c, 0x1f44c,
1810 0x1f684, 0x1f690,
1811 0x1f6c0, 0x1f6c0,
1812 0x1f6e0, 0x1f6e0,
1813 0x1f700, 0x1f784,
1814 0x1f7c0, 0x1f7c8,
1815 0x1f808, 0x1f80c,
1816 0x1f840, 0x1f844,
1817 0x1f84c, 0x1f84c,
1818 0x1fa84, 0x1fa90,
1819 0x1fac0, 0x1fac0,
1820 0x1fae0, 0x1fae0,
1821 0x1fb00, 0x1fb84,
1822 0x1fbc0, 0x1fbc8,
1823 0x1fc08, 0x1fc0c,
1824 0x1fc40, 0x1fc44,
1825 0x1fc4c, 0x1fc4c,
1826 0x1fe84, 0x1fe90,
1827 0x1fec0, 0x1fec0,
1828 0x1fee0, 0x1fee0,
1829 0x1ff00, 0x1ff84,
1830 0x1ffc0, 0x1ffc8,
1831 0x30000, 0x30030,
1832 0x30100, 0x30144,
1833 0x30190, 0x301a0,
1834 0x301a8, 0x301b8,
1835 0x301c4, 0x301c8,
1836 0x301d0, 0x301d0,
1837 0x30200, 0x30318,
1838 0x30400, 0x304b4,
1839 0x304c0, 0x3052c,
1840 0x30540, 0x3061c,
1841 0x30800, 0x30828,
1842 0x30834, 0x30834,
1843 0x308c0, 0x30908,
1844 0x30910, 0x309ac,
1845 0x30a00, 0x30a14,
1846 0x30a1c, 0x30a2c,
1847 0x30a44, 0x30a50,
1848 0x30a74, 0x30a74,
1849 0x30a7c, 0x30afc,
1850 0x30b08, 0x30c24,
1851 0x30d00, 0x30d00,
1852 0x30d08, 0x30d14,
1853 0x30d1c, 0x30d20,
1854 0x30d3c, 0x30d3c,
1855 0x30d48, 0x30d50,
1856 0x31200, 0x3120c,
1857 0x31220, 0x31220,
1858 0x31240, 0x31240,
1859 0x31600, 0x3160c,
1860 0x31a00, 0x31a1c,
1861 0x31e00, 0x31e20,
1862 0x31e38, 0x31e3c,
1863 0x31e80, 0x31e80,
1864 0x31e88, 0x31ea8,
1865 0x31eb0, 0x31eb4,
1866 0x31ec8, 0x31ed4,
1867 0x31fb8, 0x32004,
1868 0x32200, 0x32200,
1869 0x32208, 0x32240,
1870 0x32248, 0x32280,
1871 0x32288, 0x322c0,
1872 0x322c8, 0x322fc,
1873 0x32600, 0x32630,
1874 0x32a00, 0x32abc,
1875 0x32b00, 0x32b10,
1876 0x32b20, 0x32b30,
1877 0x32b40, 0x32b50,
1878 0x32b60, 0x32b70,
1879 0x33000, 0x33028,
1880 0x33030, 0x33048,
1881 0x33060, 0x33068,
1882 0x33070, 0x3309c,
1883 0x330f0, 0x33128,
1884 0x33130, 0x33148,
1885 0x33160, 0x33168,
1886 0x33170, 0x3319c,
1887 0x331f0, 0x33238,
1888 0x33240, 0x33240,
1889 0x33248, 0x33250,
1890 0x3325c, 0x33264,
1891 0x33270, 0x332b8,
1892 0x332c0, 0x332e4,
1893 0x332f8, 0x33338,
1894 0x33340, 0x33340,
1895 0x33348, 0x33350,
1896 0x3335c, 0x33364,
1897 0x33370, 0x333b8,
1898 0x333c0, 0x333e4,
1899 0x333f8, 0x33428,
1900 0x33430, 0x33448,
1901 0x33460, 0x33468,
1902 0x33470, 0x3349c,
1903 0x334f0, 0x33528,
1904 0x33530, 0x33548,
1905 0x33560, 0x33568,
1906 0x33570, 0x3359c,
1907 0x335f0, 0x33638,
1908 0x33640, 0x33640,
1909 0x33648, 0x33650,
1910 0x3365c, 0x33664,
1911 0x33670, 0x336b8,
1912 0x336c0, 0x336e4,
1913 0x336f8, 0x33738,
1914 0x33740, 0x33740,
1915 0x33748, 0x33750,
1916 0x3375c, 0x33764,
1917 0x33770, 0x337b8,
1918 0x337c0, 0x337e4,
1919 0x337f8, 0x337fc,
1920 0x33814, 0x33814,
1921 0x3382c, 0x3382c,
1922 0x33880, 0x3388c,
1923 0x338e8, 0x338ec,
1924 0x33900, 0x33928,
1925 0x33930, 0x33948,
1926 0x33960, 0x33968,
1927 0x33970, 0x3399c,
1928 0x339f0, 0x33a38,
1929 0x33a40, 0x33a40,
1930 0x33a48, 0x33a50,
1931 0x33a5c, 0x33a64,
1932 0x33a70, 0x33ab8,
1933 0x33ac0, 0x33ae4,
1934 0x33af8, 0x33b10,
1935 0x33b28, 0x33b28,
1936 0x33b3c, 0x33b50,
1937 0x33bf0, 0x33c10,
1938 0x33c28, 0x33c28,
1939 0x33c3c, 0x33c50,
1940 0x33cf0, 0x33cfc,
1941 0x34000, 0x34030,
1942 0x34100, 0x34144,
1943 0x34190, 0x341a0,
1944 0x341a8, 0x341b8,
1945 0x341c4, 0x341c8,
1946 0x341d0, 0x341d0,
1947 0x34200, 0x34318,
1948 0x34400, 0x344b4,
1949 0x344c0, 0x3452c,
1950 0x34540, 0x3461c,
1951 0x34800, 0x34828,
1952 0x34834, 0x34834,
1953 0x348c0, 0x34908,
1954 0x34910, 0x349ac,
1955 0x34a00, 0x34a14,
1956 0x34a1c, 0x34a2c,
1957 0x34a44, 0x34a50,
1958 0x34a74, 0x34a74,
1959 0x34a7c, 0x34afc,
1960 0x34b08, 0x34c24,
1961 0x34d00, 0x34d00,
1962 0x34d08, 0x34d14,
1963 0x34d1c, 0x34d20,
1964 0x34d3c, 0x34d3c,
1965 0x34d48, 0x34d50,
1966 0x35200, 0x3520c,
1967 0x35220, 0x35220,
1968 0x35240, 0x35240,
1969 0x35600, 0x3560c,
1970 0x35a00, 0x35a1c,
1971 0x35e00, 0x35e20,
1972 0x35e38, 0x35e3c,
1973 0x35e80, 0x35e80,
1974 0x35e88, 0x35ea8,
1975 0x35eb0, 0x35eb4,
1976 0x35ec8, 0x35ed4,
1977 0x35fb8, 0x36004,
1978 0x36200, 0x36200,
1979 0x36208, 0x36240,
1980 0x36248, 0x36280,
1981 0x36288, 0x362c0,
1982 0x362c8, 0x362fc,
1983 0x36600, 0x36630,
1984 0x36a00, 0x36abc,
1985 0x36b00, 0x36b10,
1986 0x36b20, 0x36b30,
1987 0x36b40, 0x36b50,
1988 0x36b60, 0x36b70,
1989 0x37000, 0x37028,
1990 0x37030, 0x37048,
1991 0x37060, 0x37068,
1992 0x37070, 0x3709c,
1993 0x370f0, 0x37128,
1994 0x37130, 0x37148,
1995 0x37160, 0x37168,
1996 0x37170, 0x3719c,
1997 0x371f0, 0x37238,
1998 0x37240, 0x37240,
1999 0x37248, 0x37250,
2000 0x3725c, 0x37264,
2001 0x37270, 0x372b8,
2002 0x372c0, 0x372e4,
2003 0x372f8, 0x37338,
2004 0x37340, 0x37340,
2005 0x37348, 0x37350,
2006 0x3735c, 0x37364,
2007 0x37370, 0x373b8,
2008 0x373c0, 0x373e4,
2009 0x373f8, 0x37428,
2010 0x37430, 0x37448,
2011 0x37460, 0x37468,
2012 0x37470, 0x3749c,
2013 0x374f0, 0x37528,
2014 0x37530, 0x37548,
2015 0x37560, 0x37568,
2016 0x37570, 0x3759c,
2017 0x375f0, 0x37638,
2018 0x37640, 0x37640,
2019 0x37648, 0x37650,
2020 0x3765c, 0x37664,
2021 0x37670, 0x376b8,
2022 0x376c0, 0x376e4,
2023 0x376f8, 0x37738,
2024 0x37740, 0x37740,
2025 0x37748, 0x37750,
2026 0x3775c, 0x37764,
2027 0x37770, 0x377b8,
2028 0x377c0, 0x377e4,
2029 0x377f8, 0x377fc,
2030 0x37814, 0x37814,
2031 0x3782c, 0x3782c,
2032 0x37880, 0x3788c,
2033 0x378e8, 0x378ec,
2034 0x37900, 0x37928,
2035 0x37930, 0x37948,
2036 0x37960, 0x37968,
2037 0x37970, 0x3799c,
2038 0x379f0, 0x37a38,
2039 0x37a40, 0x37a40,
2040 0x37a48, 0x37a50,
2041 0x37a5c, 0x37a64,
2042 0x37a70, 0x37ab8,
2043 0x37ac0, 0x37ae4,
2044 0x37af8, 0x37b10,
2045 0x37b28, 0x37b28,
2046 0x37b3c, 0x37b50,
2047 0x37bf0, 0x37c10,
2048 0x37c28, 0x37c28,
2049 0x37c3c, 0x37c50,
2050 0x37cf0, 0x37cfc,
2051 0x38000, 0x38030,
2052 0x38100, 0x38144,
2053 0x38190, 0x381a0,
2054 0x381a8, 0x381b8,
2055 0x381c4, 0x381c8,
2056 0x381d0, 0x381d0,
2057 0x38200, 0x38318,
2058 0x38400, 0x384b4,
2059 0x384c0, 0x3852c,
2060 0x38540, 0x3861c,
2061 0x38800, 0x38828,
2062 0x38834, 0x38834,
2063 0x388c0, 0x38908,
2064 0x38910, 0x389ac,
2065 0x38a00, 0x38a14,
2066 0x38a1c, 0x38a2c,
2067 0x38a44, 0x38a50,
2068 0x38a74, 0x38a74,
2069 0x38a7c, 0x38afc,
2070 0x38b08, 0x38c24,
2071 0x38d00, 0x38d00,
2072 0x38d08, 0x38d14,
2073 0x38d1c, 0x38d20,
2074 0x38d3c, 0x38d3c,
2075 0x38d48, 0x38d50,
2076 0x39200, 0x3920c,
2077 0x39220, 0x39220,
2078 0x39240, 0x39240,
2079 0x39600, 0x3960c,
2080 0x39a00, 0x39a1c,
2081 0x39e00, 0x39e20,
2082 0x39e38, 0x39e3c,
2083 0x39e80, 0x39e80,
2084 0x39e88, 0x39ea8,
2085 0x39eb0, 0x39eb4,
2086 0x39ec8, 0x39ed4,
2087 0x39fb8, 0x3a004,
2088 0x3a200, 0x3a200,
2089 0x3a208, 0x3a240,
2090 0x3a248, 0x3a280,
2091 0x3a288, 0x3a2c0,
2092 0x3a2c8, 0x3a2fc,
2093 0x3a600, 0x3a630,
2094 0x3aa00, 0x3aabc,
2095 0x3ab00, 0x3ab10,
2096 0x3ab20, 0x3ab30,
2097 0x3ab40, 0x3ab50,
2098 0x3ab60, 0x3ab70,
2099 0x3b000, 0x3b028,
2100 0x3b030, 0x3b048,
2101 0x3b060, 0x3b068,
2102 0x3b070, 0x3b09c,
2103 0x3b0f0, 0x3b128,
2104 0x3b130, 0x3b148,
2105 0x3b160, 0x3b168,
2106 0x3b170, 0x3b19c,
2107 0x3b1f0, 0x3b238,
2108 0x3b240, 0x3b240,
2109 0x3b248, 0x3b250,
2110 0x3b25c, 0x3b264,
2111 0x3b270, 0x3b2b8,
2112 0x3b2c0, 0x3b2e4,
2113 0x3b2f8, 0x3b338,
2114 0x3b340, 0x3b340,
2115 0x3b348, 0x3b350,
2116 0x3b35c, 0x3b364,
2117 0x3b370, 0x3b3b8,
2118 0x3b3c0, 0x3b3e4,
2119 0x3b3f8, 0x3b428,
2120 0x3b430, 0x3b448,
2121 0x3b460, 0x3b468,
2122 0x3b470, 0x3b49c,
2123 0x3b4f0, 0x3b528,
2124 0x3b530, 0x3b548,
2125 0x3b560, 0x3b568,
2126 0x3b570, 0x3b59c,
2127 0x3b5f0, 0x3b638,
2128 0x3b640, 0x3b640,
2129 0x3b648, 0x3b650,
2130 0x3b65c, 0x3b664,
2131 0x3b670, 0x3b6b8,
2132 0x3b6c0, 0x3b6e4,
2133 0x3b6f8, 0x3b738,
2134 0x3b740, 0x3b740,
2135 0x3b748, 0x3b750,
2136 0x3b75c, 0x3b764,
2137 0x3b770, 0x3b7b8,
2138 0x3b7c0, 0x3b7e4,
2139 0x3b7f8, 0x3b7fc,
2140 0x3b814, 0x3b814,
2141 0x3b82c, 0x3b82c,
2142 0x3b880, 0x3b88c,
2143 0x3b8e8, 0x3b8ec,
2144 0x3b900, 0x3b928,
2145 0x3b930, 0x3b948,
2146 0x3b960, 0x3b968,
2147 0x3b970, 0x3b99c,
2148 0x3b9f0, 0x3ba38,
2149 0x3ba40, 0x3ba40,
2150 0x3ba48, 0x3ba50,
2151 0x3ba5c, 0x3ba64,
2152 0x3ba70, 0x3bab8,
2153 0x3bac0, 0x3bae4,
2154 0x3baf8, 0x3bb10,
2155 0x3bb28, 0x3bb28,
2156 0x3bb3c, 0x3bb50,
2157 0x3bbf0, 0x3bc10,
2158 0x3bc28, 0x3bc28,
2159 0x3bc3c, 0x3bc50,
2160 0x3bcf0, 0x3bcfc,
2161 0x3c000, 0x3c030,
2162 0x3c100, 0x3c144,
2163 0x3c190, 0x3c1a0,
2164 0x3c1a8, 0x3c1b8,
2165 0x3c1c4, 0x3c1c8,
2166 0x3c1d0, 0x3c1d0,
2167 0x3c200, 0x3c318,
2168 0x3c400, 0x3c4b4,
2169 0x3c4c0, 0x3c52c,
2170 0x3c540, 0x3c61c,
2171 0x3c800, 0x3c828,
2172 0x3c834, 0x3c834,
2173 0x3c8c0, 0x3c908,
2174 0x3c910, 0x3c9ac,
2175 0x3ca00, 0x3ca14,
2176 0x3ca1c, 0x3ca2c,
2177 0x3ca44, 0x3ca50,
2178 0x3ca74, 0x3ca74,
2179 0x3ca7c, 0x3cafc,
2180 0x3cb08, 0x3cc24,
2181 0x3cd00, 0x3cd00,
2182 0x3cd08, 0x3cd14,
2183 0x3cd1c, 0x3cd20,
2184 0x3cd3c, 0x3cd3c,
2185 0x3cd48, 0x3cd50,
2186 0x3d200, 0x3d20c,
2187 0x3d220, 0x3d220,
2188 0x3d240, 0x3d240,
2189 0x3d600, 0x3d60c,
2190 0x3da00, 0x3da1c,
2191 0x3de00, 0x3de20,
2192 0x3de38, 0x3de3c,
2193 0x3de80, 0x3de80,
2194 0x3de88, 0x3dea8,
2195 0x3deb0, 0x3deb4,
2196 0x3dec8, 0x3ded4,
2197 0x3dfb8, 0x3e004,
2198 0x3e200, 0x3e200,
2199 0x3e208, 0x3e240,
2200 0x3e248, 0x3e280,
2201 0x3e288, 0x3e2c0,
2202 0x3e2c8, 0x3e2fc,
2203 0x3e600, 0x3e630,
2204 0x3ea00, 0x3eabc,
2205 0x3eb00, 0x3eb10,
2206 0x3eb20, 0x3eb30,
2207 0x3eb40, 0x3eb50,
2208 0x3eb60, 0x3eb70,
2209 0x3f000, 0x3f028,
2210 0x3f030, 0x3f048,
2211 0x3f060, 0x3f068,
2212 0x3f070, 0x3f09c,
2213 0x3f0f0, 0x3f128,
2214 0x3f130, 0x3f148,
2215 0x3f160, 0x3f168,
2216 0x3f170, 0x3f19c,
2217 0x3f1f0, 0x3f238,
2218 0x3f240, 0x3f240,
2219 0x3f248, 0x3f250,
2220 0x3f25c, 0x3f264,
2221 0x3f270, 0x3f2b8,
2222 0x3f2c0, 0x3f2e4,
2223 0x3f2f8, 0x3f338,
2224 0x3f340, 0x3f340,
2225 0x3f348, 0x3f350,
2226 0x3f35c, 0x3f364,
2227 0x3f370, 0x3f3b8,
2228 0x3f3c0, 0x3f3e4,
2229 0x3f3f8, 0x3f428,
2230 0x3f430, 0x3f448,
2231 0x3f460, 0x3f468,
2232 0x3f470, 0x3f49c,
2233 0x3f4f0, 0x3f528,
2234 0x3f530, 0x3f548,
2235 0x3f560, 0x3f568,
2236 0x3f570, 0x3f59c,
2237 0x3f5f0, 0x3f638,
2238 0x3f640, 0x3f640,
2239 0x3f648, 0x3f650,
2240 0x3f65c, 0x3f664,
2241 0x3f670, 0x3f6b8,
2242 0x3f6c0, 0x3f6e4,
2243 0x3f6f8, 0x3f738,
2244 0x3f740, 0x3f740,
2245 0x3f748, 0x3f750,
2246 0x3f75c, 0x3f764,
2247 0x3f770, 0x3f7b8,
2248 0x3f7c0, 0x3f7e4,
2249 0x3f7f8, 0x3f7fc,
2250 0x3f814, 0x3f814,
2251 0x3f82c, 0x3f82c,
2252 0x3f880, 0x3f88c,
2253 0x3f8e8, 0x3f8ec,
2254 0x3f900, 0x3f928,
2255 0x3f930, 0x3f948,
2256 0x3f960, 0x3f968,
2257 0x3f970, 0x3f99c,
2258 0x3f9f0, 0x3fa38,
2259 0x3fa40, 0x3fa40,
2260 0x3fa48, 0x3fa50,
2261 0x3fa5c, 0x3fa64,
2262 0x3fa70, 0x3fab8,
2263 0x3fac0, 0x3fae4,
2264 0x3faf8, 0x3fb10,
2265 0x3fb28, 0x3fb28,
2266 0x3fb3c, 0x3fb50,
2267 0x3fbf0, 0x3fc10,
2268 0x3fc28, 0x3fc28,
2269 0x3fc3c, 0x3fc50,
2270 0x3fcf0, 0x3fcfc,
2271 0x40000, 0x4000c,
2272 0x40040, 0x40050,
2273 0x40060, 0x40068,
2274 0x4007c, 0x4008c,
2275 0x40094, 0x400b0,
2276 0x400c0, 0x40144,
2277 0x40180, 0x4018c,
2278 0x40200, 0x40254,
2279 0x40260, 0x40264,
2280 0x40270, 0x40288,
2281 0x40290, 0x40298,
2282 0x402ac, 0x402c8,
2283 0x402d0, 0x402e0,
2284 0x402f0, 0x402f0,
2285 0x40300, 0x4033c,
2286 0x403f8, 0x403fc,
2287 0x41304, 0x413c4,
2288 0x41400, 0x4140c,
2289 0x41414, 0x4141c,
2290 0x41480, 0x414d0,
2291 0x44000, 0x44054,
2292 0x4405c, 0x44078,
2293 0x440c0, 0x44174,
2294 0x44180, 0x441ac,
2295 0x441b4, 0x441b8,
2296 0x441c0, 0x44254,
2297 0x4425c, 0x44278,
2298 0x442c0, 0x44374,
2299 0x44380, 0x443ac,
2300 0x443b4, 0x443b8,
2301 0x443c0, 0x44454,
2302 0x4445c, 0x44478,
2303 0x444c0, 0x44574,
2304 0x44580, 0x445ac,
2305 0x445b4, 0x445b8,
2306 0x445c0, 0x44654,
2307 0x4465c, 0x44678,
2308 0x446c0, 0x44774,
2309 0x44780, 0x447ac,
2310 0x447b4, 0x447b8,
2311 0x447c0, 0x44854,
2312 0x4485c, 0x44878,
2313 0x448c0, 0x44974,
2314 0x44980, 0x449ac,
2315 0x449b4, 0x449b8,
2316 0x449c0, 0x449fc,
2317 0x45000, 0x45004,
2318 0x45010, 0x45030,
2319 0x45040, 0x45060,
2320 0x45068, 0x45068,
2321 0x45080, 0x45084,
2322 0x450a0, 0x450b0,
2323 0x45200, 0x45204,
2324 0x45210, 0x45230,
2325 0x45240, 0x45260,
2326 0x45268, 0x45268,
2327 0x45280, 0x45284,
2328 0x452a0, 0x452b0,
2329 0x460c0, 0x460e4,
2330 0x47000, 0x4703c,
2331 0x47044, 0x4708c,
2332 0x47200, 0x47250,
2333 0x47400, 0x47408,
2334 0x47414, 0x47420,
2335 0x47600, 0x47618,
2336 0x47800, 0x47814,
2337 0x48000, 0x4800c,
2338 0x48040, 0x48050,
2339 0x48060, 0x48068,
2340 0x4807c, 0x4808c,
2341 0x48094, 0x480b0,
2342 0x480c0, 0x48144,
2343 0x48180, 0x4818c,
2344 0x48200, 0x48254,
2345 0x48260, 0x48264,
2346 0x48270, 0x48288,
2347 0x48290, 0x48298,
2348 0x482ac, 0x482c8,
2349 0x482d0, 0x482e0,
2350 0x482f0, 0x482f0,
2351 0x48300, 0x4833c,
2352 0x483f8, 0x483fc,
2353 0x49304, 0x493c4,
2354 0x49400, 0x4940c,
2355 0x49414, 0x4941c,
2356 0x49480, 0x494d0,
2357 0x4c000, 0x4c054,
2358 0x4c05c, 0x4c078,
2359 0x4c0c0, 0x4c174,
2360 0x4c180, 0x4c1ac,
2361 0x4c1b4, 0x4c1b8,
2362 0x4c1c0, 0x4c254,
2363 0x4c25c, 0x4c278,
2364 0x4c2c0, 0x4c374,
2365 0x4c380, 0x4c3ac,
2366 0x4c3b4, 0x4c3b8,
2367 0x4c3c0, 0x4c454,
2368 0x4c45c, 0x4c478,
2369 0x4c4c0, 0x4c574,
2370 0x4c580, 0x4c5ac,
2371 0x4c5b4, 0x4c5b8,
2372 0x4c5c0, 0x4c654,
2373 0x4c65c, 0x4c678,
2374 0x4c6c0, 0x4c774,
2375 0x4c780, 0x4c7ac,
2376 0x4c7b4, 0x4c7b8,
2377 0x4c7c0, 0x4c854,
2378 0x4c85c, 0x4c878,
2379 0x4c8c0, 0x4c974,
2380 0x4c980, 0x4c9ac,
2381 0x4c9b4, 0x4c9b8,
2382 0x4c9c0, 0x4c9fc,
2383 0x4d000, 0x4d004,
2384 0x4d010, 0x4d030,
2385 0x4d040, 0x4d060,
2386 0x4d068, 0x4d068,
2387 0x4d080, 0x4d084,
2388 0x4d0a0, 0x4d0b0,
2389 0x4d200, 0x4d204,
2390 0x4d210, 0x4d230,
2391 0x4d240, 0x4d260,
2392 0x4d268, 0x4d268,
2393 0x4d280, 0x4d284,
2394 0x4d2a0, 0x4d2b0,
2395 0x4e0c0, 0x4e0e4,
2396 0x4f000, 0x4f03c,
2397 0x4f044, 0x4f08c,
2398 0x4f200, 0x4f250,
2399 0x4f400, 0x4f408,
2400 0x4f414, 0x4f420,
2401 0x4f600, 0x4f618,
2402 0x4f800, 0x4f814,
2403 0x50000, 0x50084,
2404 0x50090, 0x500cc,
2405 0x50400, 0x50400,
2406 0x50800, 0x50884,
2407 0x50890, 0x508cc,
2408 0x50c00, 0x50c00,
2409 0x51000, 0x5101c,
2410 0x51300, 0x51308,
2411 };
2412
2413 static const unsigned int t6_reg_ranges[] = {
2414 0x1008, 0x101c,
2415 0x1024, 0x10a8,
2416 0x10b4, 0x10f8,
2417 0x1100, 0x1114,
2418 0x111c, 0x112c,
2419 0x1138, 0x113c,
2420 0x1144, 0x114c,
2421 0x1180, 0x1184,
2422 0x1190, 0x1194,
2423 0x11a0, 0x11a4,
2424 0x11b0, 0x11c4,
2425 0x11fc, 0x1274,
2426 0x1280, 0x133c,
2427 0x1800, 0x18fc,
2428 0x3000, 0x302c,
2429 0x3060, 0x30b0,
2430 0x30b8, 0x30d8,
2431 0x30e0, 0x30fc,
2432 0x3140, 0x357c,
2433 0x35a8, 0x35cc,
2434 0x35ec, 0x35ec,
2435 0x3600, 0x5624,
2436 0x56cc, 0x56ec,
2437 0x56f4, 0x5720,
2438 0x5728, 0x575c,
2439 0x580c, 0x5814,
2440 0x5890, 0x589c,
2441 0x58a4, 0x58ac,
2442 0x58b8, 0x58bc,
2443 0x5940, 0x595c,
2444 0x5980, 0x598c,
2445 0x59b0, 0x59c8,
2446 0x59d0, 0x59dc,
2447 0x59fc, 0x5a18,
2448 0x5a60, 0x5a6c,
2449 0x5a80, 0x5a8c,
2450 0x5a94, 0x5a9c,
2451 0x5b94, 0x5bfc,
2452 0x5c10, 0x5e48,
2453 0x5e50, 0x5e94,
2454 0x5ea0, 0x5eb0,
2455 0x5ec0, 0x5ec0,
2456 0x5ec8, 0x5ed0,
2457 0x5ee0, 0x5ee0,
2458 0x5ef0, 0x5ef0,
2459 0x5f00, 0x5f00,
2460 0x6000, 0x6020,
2461 0x6028, 0x6040,
2462 0x6058, 0x609c,
2463 0x60a8, 0x619c,
2464 0x7700, 0x7798,
2465 0x77c0, 0x7880,
2466 0x78cc, 0x78fc,
2467 0x7b00, 0x7b58,
2468 0x7b60, 0x7b84,
2469 0x7b8c, 0x7c54,
2470 0x7d00, 0x7d38,
2471 0x7d40, 0x7d84,
2472 0x7d8c, 0x7ddc,
2473 0x7de4, 0x7e04,
2474 0x7e10, 0x7e1c,
2475 0x7e24, 0x7e38,
2476 0x7e40, 0x7e44,
2477 0x7e4c, 0x7e78,
2478 0x7e80, 0x7edc,
2479 0x7ee8, 0x7efc,
2480 0x8dc0, 0x8de0,
2481 0x8df8, 0x8e04,
2482 0x8e10, 0x8e84,
2483 0x8ea0, 0x8f88,
2484 0x8fb8, 0x9058,
2485 0x9060, 0x9060,
2486 0x9068, 0x90f8,
2487 0x9100, 0x9124,
2488 0x9400, 0x9470,
2489 0x9600, 0x9600,
2490 0x9608, 0x9638,
2491 0x9640, 0x9704,
2492 0x9710, 0x971c,
2493 0x9800, 0x9808,
2494 0x9820, 0x983c,
2495 0x9850, 0x9864,
2496 0x9c00, 0x9c6c,
2497 0x9c80, 0x9cec,
2498 0x9d00, 0x9d6c,
2499 0x9d80, 0x9dec,
2500 0x9e00, 0x9e6c,
2501 0x9e80, 0x9eec,
2502 0x9f00, 0x9f6c,
2503 0x9f80, 0xa020,
2504 0xd004, 0xd03c,
2505 0xd100, 0xd118,
2506 0xd200, 0xd214,
2507 0xd220, 0xd234,
2508 0xd240, 0xd254,
2509 0xd260, 0xd274,
2510 0xd280, 0xd294,
2511 0xd2a0, 0xd2b4,
2512 0xd2c0, 0xd2d4,
2513 0xd2e0, 0xd2f4,
2514 0xd300, 0xd31c,
2515 0xdfc0, 0xdfe0,
2516 0xe000, 0xf008,
2517 0xf010, 0xf018,
2518 0xf020, 0xf028,
2519 0x11000, 0x11014,
2520 0x11048, 0x1106c,
2521 0x11074, 0x11088,
2522 0x11098, 0x11120,
2523 0x1112c, 0x1117c,
2524 0x11190, 0x112e0,
2525 0x11300, 0x1130c,
2526 0x12000, 0x1206c,
2527 0x19040, 0x1906c,
2528 0x19078, 0x19080,
2529 0x1908c, 0x190e8,
2530 0x190f0, 0x190f8,
2531 0x19100, 0x19110,
2532 0x19120, 0x19124,
2533 0x19150, 0x19194,
2534 0x1919c, 0x191b0,
2535 0x191d0, 0x191e8,
2536 0x19238, 0x19290,
2537 0x192a4, 0x192b0,
2538 0x19348, 0x1934c,
2539 0x193f8, 0x19418,
2540 0x19420, 0x19428,
2541 0x19430, 0x19444,
2542 0x1944c, 0x1946c,
2543 0x19474, 0x19474,
2544 0x19490, 0x194cc,
2545 0x194f0, 0x194f8,
2546 0x19c00, 0x19c48,
2547 0x19c50, 0x19c80,
2548 0x19c94, 0x19c98,
2549 0x19ca0, 0x19cbc,
2550 0x19ce4, 0x19ce4,
2551 0x19cf0, 0x19cf8,
2552 0x19d00, 0x19d28,
2553 0x19d50, 0x19d78,
2554 0x19d94, 0x19d98,
2555 0x19da0, 0x19de0,
2556 0x19df0, 0x19e10,
2557 0x19e50, 0x19e6c,
2558 0x19ea0, 0x19ebc,
2559 0x19ec4, 0x19ef4,
2560 0x19f04, 0x19f2c,
2561 0x19f34, 0x19f34,
2562 0x19f40, 0x19f50,
2563 0x19f90, 0x19fac,
2564 0x19fc4, 0x19fc8,
2565 0x19fd0, 0x19fe4,
2566 0x1a000, 0x1a004,
2567 0x1a010, 0x1a06c,
2568 0x1a0b0, 0x1a0e4,
2569 0x1a0ec, 0x1a0f8,
2570 0x1a100, 0x1a108,
2571 0x1a114, 0x1a120,
2572 0x1a128, 0x1a130,
2573 0x1a138, 0x1a138,
2574 0x1a190, 0x1a1c4,
2575 0x1a1fc, 0x1a1fc,
2576 0x1e008, 0x1e00c,
2577 0x1e040, 0x1e044,
2578 0x1e04c, 0x1e04c,
2579 0x1e284, 0x1e290,
2580 0x1e2c0, 0x1e2c0,
2581 0x1e2e0, 0x1e2e0,
2582 0x1e300, 0x1e384,
2583 0x1e3c0, 0x1e3c8,
2584 0x1e408, 0x1e40c,
2585 0x1e440, 0x1e444,
2586 0x1e44c, 0x1e44c,
2587 0x1e684, 0x1e690,
2588 0x1e6c0, 0x1e6c0,
2589 0x1e6e0, 0x1e6e0,
2590 0x1e700, 0x1e784,
2591 0x1e7c0, 0x1e7c8,
2592 0x1e808, 0x1e80c,
2593 0x1e840, 0x1e844,
2594 0x1e84c, 0x1e84c,
2595 0x1ea84, 0x1ea90,
2596 0x1eac0, 0x1eac0,
2597 0x1eae0, 0x1eae0,
2598 0x1eb00, 0x1eb84,
2599 0x1ebc0, 0x1ebc8,
2600 0x1ec08, 0x1ec0c,
2601 0x1ec40, 0x1ec44,
2602 0x1ec4c, 0x1ec4c,
2603 0x1ee84, 0x1ee90,
2604 0x1eec0, 0x1eec0,
2605 0x1eee0, 0x1eee0,
2606 0x1ef00, 0x1ef84,
2607 0x1efc0, 0x1efc8,
2608 0x1f008, 0x1f00c,
2609 0x1f040, 0x1f044,
2610 0x1f04c, 0x1f04c,
2611 0x1f284, 0x1f290,
2612 0x1f2c0, 0x1f2c0,
2613 0x1f2e0, 0x1f2e0,
2614 0x1f300, 0x1f384,
2615 0x1f3c0, 0x1f3c8,
2616 0x1f408, 0x1f40c,
2617 0x1f440, 0x1f444,
2618 0x1f44c, 0x1f44c,
2619 0x1f684, 0x1f690,
2620 0x1f6c0, 0x1f6c0,
2621 0x1f6e0, 0x1f6e0,
2622 0x1f700, 0x1f784,
2623 0x1f7c0, 0x1f7c8,
2624 0x1f808, 0x1f80c,
2625 0x1f840, 0x1f844,
2626 0x1f84c, 0x1f84c,
2627 0x1fa84, 0x1fa90,
2628 0x1fac0, 0x1fac0,
2629 0x1fae0, 0x1fae0,
2630 0x1fb00, 0x1fb84,
2631 0x1fbc0, 0x1fbc8,
2632 0x1fc08, 0x1fc0c,
2633 0x1fc40, 0x1fc44,
2634 0x1fc4c, 0x1fc4c,
2635 0x1fe84, 0x1fe90,
2636 0x1fec0, 0x1fec0,
2637 0x1fee0, 0x1fee0,
2638 0x1ff00, 0x1ff84,
2639 0x1ffc0, 0x1ffc8,
2640 0x30000, 0x30030,
2641 0x30100, 0x30168,
2642 0x30190, 0x301a0,
2643 0x301a8, 0x301b8,
2644 0x301c4, 0x301c8,
2645 0x301d0, 0x301d0,
2646 0x30200, 0x30320,
2647 0x30400, 0x304b4,
2648 0x304c0, 0x3052c,
2649 0x30540, 0x3061c,
2650 0x30800, 0x308a0,
2651 0x308c0, 0x30908,
2652 0x30910, 0x309b8,
2653 0x30a00, 0x30a04,
2654 0x30a0c, 0x30a14,
2655 0x30a1c, 0x30a2c,
2656 0x30a44, 0x30a50,
2657 0x30a74, 0x30a74,
2658 0x30a7c, 0x30afc,
2659 0x30b08, 0x30c24,
2660 0x30d00, 0x30d14,
2661 0x30d1c, 0x30d3c,
2662 0x30d44, 0x30d4c,
2663 0x30d54, 0x30d74,
2664 0x30d7c, 0x30d7c,
2665 0x30de0, 0x30de0,
2666 0x30e00, 0x30ed4,
2667 0x30f00, 0x30fa4,
2668 0x30fc0, 0x30fc4,
2669 0x31000, 0x31004,
2670 0x31080, 0x310fc,
2671 0x31208, 0x31220,
2672 0x3123c, 0x31254,
2673 0x31300, 0x31300,
2674 0x31308, 0x3131c,
2675 0x31338, 0x3133c,
2676 0x31380, 0x31380,
2677 0x31388, 0x313a8,
2678 0x313b4, 0x313b4,
2679 0x31400, 0x31420,
2680 0x31438, 0x3143c,
2681 0x31480, 0x31480,
2682 0x314a8, 0x314a8,
2683 0x314b0, 0x314b4,
2684 0x314c8, 0x314d4,
2685 0x31a40, 0x31a4c,
2686 0x31af0, 0x31b20,
2687 0x31b38, 0x31b3c,
2688 0x31b80, 0x31b80,
2689 0x31ba8, 0x31ba8,
2690 0x31bb0, 0x31bb4,
2691 0x31bc8, 0x31bd4,
2692 0x32140, 0x3218c,
2693 0x321f0, 0x321f4,
2694 0x32200, 0x32200,
2695 0x32218, 0x32218,
2696 0x32400, 0x32400,
2697 0x32408, 0x3241c,
2698 0x32618, 0x32620,
2699 0x32664, 0x32664,
2700 0x326a8, 0x326a8,
2701 0x326ec, 0x326ec,
2702 0x32a00, 0x32abc,
2703 0x32b00, 0x32b18,
2704 0x32b20, 0x32b38,
2705 0x32b40, 0x32b58,
2706 0x32b60, 0x32b78,
2707 0x32c00, 0x32c00,
2708 0x32c08, 0x32c3c,
2709 0x33000, 0x3302c,
2710 0x33034, 0x33050,
2711 0x33058, 0x33058,
2712 0x33060, 0x3308c,
2713 0x3309c, 0x330ac,
2714 0x330c0, 0x330c0,
2715 0x330c8, 0x330d0,
2716 0x330d8, 0x330e0,
2717 0x330ec, 0x3312c,
2718 0x33134, 0x33150,
2719 0x33158, 0x33158,
2720 0x33160, 0x3318c,
2721 0x3319c, 0x331ac,
2722 0x331c0, 0x331c0,
2723 0x331c8, 0x331d0,
2724 0x331d8, 0x331e0,
2725 0x331ec, 0x33290,
2726 0x33298, 0x332c4,
2727 0x332e4, 0x33390,
2728 0x33398, 0x333c4,
2729 0x333e4, 0x3342c,
2730 0x33434, 0x33450,
2731 0x33458, 0x33458,
2732 0x33460, 0x3348c,
2733 0x3349c, 0x334ac,
2734 0x334c0, 0x334c0,
2735 0x334c8, 0x334d0,
2736 0x334d8, 0x334e0,
2737 0x334ec, 0x3352c,
2738 0x33534, 0x33550,
2739 0x33558, 0x33558,
2740 0x33560, 0x3358c,
2741 0x3359c, 0x335ac,
2742 0x335c0, 0x335c0,
2743 0x335c8, 0x335d0,
2744 0x335d8, 0x335e0,
2745 0x335ec, 0x33690,
2746 0x33698, 0x336c4,
2747 0x336e4, 0x33790,
2748 0x33798, 0x337c4,
2749 0x337e4, 0x337fc,
2750 0x33814, 0x33814,
2751 0x33854, 0x33868,
2752 0x33880, 0x3388c,
2753 0x338c0, 0x338d0,
2754 0x338e8, 0x338ec,
2755 0x33900, 0x3392c,
2756 0x33934, 0x33950,
2757 0x33958, 0x33958,
2758 0x33960, 0x3398c,
2759 0x3399c, 0x339ac,
2760 0x339c0, 0x339c0,
2761 0x339c8, 0x339d0,
2762 0x339d8, 0x339e0,
2763 0x339ec, 0x33a90,
2764 0x33a98, 0x33ac4,
2765 0x33ae4, 0x33b10,
2766 0x33b24, 0x33b28,
2767 0x33b38, 0x33b50,
2768 0x33bf0, 0x33c10,
2769 0x33c24, 0x33c28,
2770 0x33c38, 0x33c50,
2771 0x33cf0, 0x33cfc,
2772 0x34000, 0x34030,
2773 0x34100, 0x34168,
2774 0x34190, 0x341a0,
2775 0x341a8, 0x341b8,
2776 0x341c4, 0x341c8,
2777 0x341d0, 0x341d0,
2778 0x34200, 0x34320,
2779 0x34400, 0x344b4,
2780 0x344c0, 0x3452c,
2781 0x34540, 0x3461c,
2782 0x34800, 0x348a0,
2783 0x348c0, 0x34908,
2784 0x34910, 0x349b8,
2785 0x34a00, 0x34a04,
2786 0x34a0c, 0x34a14,
2787 0x34a1c, 0x34a2c,
2788 0x34a44, 0x34a50,
2789 0x34a74, 0x34a74,
2790 0x34a7c, 0x34afc,
2791 0x34b08, 0x34c24,
2792 0x34d00, 0x34d14,
2793 0x34d1c, 0x34d3c,
2794 0x34d44, 0x34d4c,
2795 0x34d54, 0x34d74,
2796 0x34d7c, 0x34d7c,
2797 0x34de0, 0x34de0,
2798 0x34e00, 0x34ed4,
2799 0x34f00, 0x34fa4,
2800 0x34fc0, 0x34fc4,
2801 0x35000, 0x35004,
2802 0x35080, 0x350fc,
2803 0x35208, 0x35220,
2804 0x3523c, 0x35254,
2805 0x35300, 0x35300,
2806 0x35308, 0x3531c,
2807 0x35338, 0x3533c,
2808 0x35380, 0x35380,
2809 0x35388, 0x353a8,
2810 0x353b4, 0x353b4,
2811 0x35400, 0x35420,
2812 0x35438, 0x3543c,
2813 0x35480, 0x35480,
2814 0x354a8, 0x354a8,
2815 0x354b0, 0x354b4,
2816 0x354c8, 0x354d4,
2817 0x35a40, 0x35a4c,
2818 0x35af0, 0x35b20,
2819 0x35b38, 0x35b3c,
2820 0x35b80, 0x35b80,
2821 0x35ba8, 0x35ba8,
2822 0x35bb0, 0x35bb4,
2823 0x35bc8, 0x35bd4,
2824 0x36140, 0x3618c,
2825 0x361f0, 0x361f4,
2826 0x36200, 0x36200,
2827 0x36218, 0x36218,
2828 0x36400, 0x36400,
2829 0x36408, 0x3641c,
2830 0x36618, 0x36620,
2831 0x36664, 0x36664,
2832 0x366a8, 0x366a8,
2833 0x366ec, 0x366ec,
2834 0x36a00, 0x36abc,
2835 0x36b00, 0x36b18,
2836 0x36b20, 0x36b38,
2837 0x36b40, 0x36b58,
2838 0x36b60, 0x36b78,
2839 0x36c00, 0x36c00,
2840 0x36c08, 0x36c3c,
2841 0x37000, 0x3702c,
2842 0x37034, 0x37050,
2843 0x37058, 0x37058,
2844 0x37060, 0x3708c,
2845 0x3709c, 0x370ac,
2846 0x370c0, 0x370c0,
2847 0x370c8, 0x370d0,
2848 0x370d8, 0x370e0,
2849 0x370ec, 0x3712c,
2850 0x37134, 0x37150,
2851 0x37158, 0x37158,
2852 0x37160, 0x3718c,
2853 0x3719c, 0x371ac,
2854 0x371c0, 0x371c0,
2855 0x371c8, 0x371d0,
2856 0x371d8, 0x371e0,
2857 0x371ec, 0x37290,
2858 0x37298, 0x372c4,
2859 0x372e4, 0x37390,
2860 0x37398, 0x373c4,
2861 0x373e4, 0x3742c,
2862 0x37434, 0x37450,
2863 0x37458, 0x37458,
2864 0x37460, 0x3748c,
2865 0x3749c, 0x374ac,
2866 0x374c0, 0x374c0,
2867 0x374c8, 0x374d0,
2868 0x374d8, 0x374e0,
2869 0x374ec, 0x3752c,
2870 0x37534, 0x37550,
2871 0x37558, 0x37558,
2872 0x37560, 0x3758c,
2873 0x3759c, 0x375ac,
2874 0x375c0, 0x375c0,
2875 0x375c8, 0x375d0,
2876 0x375d8, 0x375e0,
2877 0x375ec, 0x37690,
2878 0x37698, 0x376c4,
2879 0x376e4, 0x37790,
2880 0x37798, 0x377c4,
2881 0x377e4, 0x377fc,
2882 0x37814, 0x37814,
2883 0x37854, 0x37868,
2884 0x37880, 0x3788c,
2885 0x378c0, 0x378d0,
2886 0x378e8, 0x378ec,
2887 0x37900, 0x3792c,
2888 0x37934, 0x37950,
2889 0x37958, 0x37958,
2890 0x37960, 0x3798c,
2891 0x3799c, 0x379ac,
2892 0x379c0, 0x379c0,
2893 0x379c8, 0x379d0,
2894 0x379d8, 0x379e0,
2895 0x379ec, 0x37a90,
2896 0x37a98, 0x37ac4,
2897 0x37ae4, 0x37b10,
2898 0x37b24, 0x37b28,
2899 0x37b38, 0x37b50,
2900 0x37bf0, 0x37c10,
2901 0x37c24, 0x37c28,
2902 0x37c38, 0x37c50,
2903 0x37cf0, 0x37cfc,
2904 0x40040, 0x40040,
2905 0x40080, 0x40084,
2906 0x40100, 0x40100,
2907 0x40140, 0x401bc,
2908 0x40200, 0x40214,
2909 0x40228, 0x40228,
2910 0x40240, 0x40258,
2911 0x40280, 0x40280,
2912 0x40304, 0x40304,
2913 0x40330, 0x4033c,
2914 0x41304, 0x413c8,
2915 0x413d0, 0x413dc,
2916 0x413f0, 0x413f0,
2917 0x41400, 0x4140c,
2918 0x41414, 0x4141c,
2919 0x41480, 0x414d0,
2920 0x44000, 0x4407c,
2921 0x440c0, 0x441ac,
2922 0x441b4, 0x4427c,
2923 0x442c0, 0x443ac,
2924 0x443b4, 0x4447c,
2925 0x444c0, 0x445ac,
2926 0x445b4, 0x4467c,
2927 0x446c0, 0x447ac,
2928 0x447b4, 0x4487c,
2929 0x448c0, 0x449ac,
2930 0x449b4, 0x44a7c,
2931 0x44ac0, 0x44bac,
2932 0x44bb4, 0x44c7c,
2933 0x44cc0, 0x44dac,
2934 0x44db4, 0x44e7c,
2935 0x44ec0, 0x44fac,
2936 0x44fb4, 0x4507c,
2937 0x450c0, 0x451ac,
2938 0x451b4, 0x451fc,
2939 0x45800, 0x45804,
2940 0x45810, 0x45830,
2941 0x45840, 0x45860,
2942 0x45868, 0x45868,
2943 0x45880, 0x45884,
2944 0x458a0, 0x458b0,
2945 0x45a00, 0x45a04,
2946 0x45a10, 0x45a30,
2947 0x45a40, 0x45a60,
2948 0x45a68, 0x45a68,
2949 0x45a80, 0x45a84,
2950 0x45aa0, 0x45ab0,
2951 0x460c0, 0x460e4,
2952 0x47000, 0x4703c,
2953 0x47044, 0x4708c,
2954 0x47200, 0x47250,
2955 0x47400, 0x47408,
2956 0x47414, 0x47420,
2957 0x47600, 0x47618,
2958 0x47800, 0x47814,
2959 0x47820, 0x4782c,
2960 0x50000, 0x50084,
2961 0x50090, 0x500cc,
2962 0x50300, 0x50384,
2963 0x50400, 0x50400,
2964 0x50800, 0x50884,
2965 0x50890, 0x508cc,
2966 0x50b00, 0x50b84,
2967 0x50c00, 0x50c00,
2968 0x51000, 0x51020,
2969 0x51028, 0x510b0,
2970 0x51300, 0x51324,
2971 };
2972
2973 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2974 const unsigned int *reg_ranges;
2975 int reg_ranges_size, range;
2976 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2977
2978 /* Select the right set of register ranges to dump depending on the
2979 * adapter chip type.
2980 */
2981 switch (chip_version) {
2982 case CHELSIO_T4:
2983 reg_ranges = t4_reg_ranges;
2984 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2985 break;
2986
2987 case CHELSIO_T5:
2988 reg_ranges = t5_reg_ranges;
2989 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2990 break;
2991
2992 case CHELSIO_T6:
2993 reg_ranges = t6_reg_ranges;
2994 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2995 break;
2996
2997 default:
2998 CH_ERR(adap,
2999 "Unsupported chip version %d\n", chip_version);
3000 return;
3001 }
3002
3003 /* Clear the register buffer and insert the appropriate register
3004 * values selected by the above register ranges.
3005 */
3006 memset(buf, 0, buf_size);
3007 for (range = 0; range < reg_ranges_size; range += 2) {
3008 unsigned int reg = reg_ranges[range];
3009 unsigned int last_reg = reg_ranges[range + 1];
3010 u32 *bufp = (u32 *)((char *)buf + reg);
3011
3012 /* Iterate across the register range filling in the register
3013 * buffer but don't write past the end of the register buffer.
3014 */
3015 while (reg <= last_reg && bufp < buf_end) {
3016 *bufp++ = t4_read_reg(adap, reg);
3017 reg += sizeof(u32);
3018 }
3019 }
3020 }
3021
3022 /*
3023 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
3024 */
3025 #define EEPROM_DELAY 10 // 10us per poll spin
3026 #define EEPROM_MAX_POLL 5000 // x 5000 == 50ms
3027
3028 #define EEPROM_STAT_ADDR 0x7bfc
3029 #define VPD_SIZE 0x800
3030 #define VPD_BASE 0x400
3031 #define VPD_BASE_OLD 0
3032 #define VPD_LEN 1024
3033 #define VPD_INFO_FLD_HDR_SIZE 3
3034 #define CHELSIO_VPD_UNIQUE_ID 0x82
3035
3036 /*
3037 * Small utility function to wait till any outstanding VPD Access is complete.
3038 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
3039 * VPD Access in flight. This allows us to handle the problem of having a
3040 * previous VPD Access time out and prevent an attempt to inject a new VPD
3041 * Request before any in-flight VPD reguest has completed.
3042 */
t4_seeprom_wait(struct adapter * adapter)3043 static int t4_seeprom_wait(struct adapter *adapter)
3044 {
3045 unsigned int base = adapter->params.pci.vpd_cap_addr;
3046 int max_poll;
3047
3048 /*
3049 * If no VPD Access is in flight, we can just return success right
3050 * away.
3051 */
3052 if (!adapter->vpd_busy)
3053 return 0;
3054
3055 /*
3056 * Poll the VPD Capability Address/Flag register waiting for it
3057 * to indicate that the operation is complete.
3058 */
3059 max_poll = EEPROM_MAX_POLL;
3060 do {
3061 u16 val;
3062
3063 udelay(EEPROM_DELAY);
3064 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
3065
3066 /*
3067 * If the operation is complete, mark the VPD as no longer
3068 * busy and return success.
3069 */
3070 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
3071 adapter->vpd_busy = 0;
3072 return 0;
3073 }
3074 } while (--max_poll);
3075
3076 /*
3077 * Failure! Note that we leave the VPD Busy status set in order to
3078 * avoid pushing a new VPD Access request into the VPD Capability till
3079 * the current operation eventually succeeds. It's a bug to issue a
3080 * new request when an existing request is in flight and will result
3081 * in corrupt hardware state.
3082 */
3083 return -ETIMEDOUT;
3084 }
3085
3086 /**
3087 * t4_seeprom_read - read a serial EEPROM location
3088 * @adapter: adapter to read
3089 * @addr: EEPROM virtual address
3090 * @data: where to store the read data
3091 *
3092 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
3093 * VPD capability. Note that this function must be called with a virtual
3094 * address.
3095 */
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)3096 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3097 {
3098 unsigned int base = adapter->params.pci.vpd_cap_addr;
3099 int ret;
3100
3101 /*
3102 * VPD Accesses must alway be 4-byte aligned!
3103 */
3104 if (addr >= EEPROMVSIZE || (addr & 3))
3105 return -EINVAL;
3106
3107 /*
3108 * Wait for any previous operation which may still be in flight to
3109 * complete.
3110 */
3111 ret = t4_seeprom_wait(adapter);
3112 if (ret) {
3113 CH_ERR(adapter, "VPD still busy from previous operation\n");
3114 return ret;
3115 }
3116
3117 /*
3118 * Issue our new VPD Read request, mark the VPD as being busy and wait
3119 * for our request to complete. If it doesn't complete, note the
3120 * error and return it to our caller. Note that we do not reset the
3121 * VPD Busy status!
3122 */
3123 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3124 adapter->vpd_busy = 1;
3125 adapter->vpd_flag = PCI_VPD_ADDR_F;
3126 ret = t4_seeprom_wait(adapter);
3127 if (ret) {
3128 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3129 return ret;
3130 }
3131
3132 /*
3133 * Grab the returned data, swizzle it into our endianess and
3134 * return success.
3135 */
3136 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3137 *data = le32_to_cpu(*data);
3138 return 0;
3139 }
3140
3141 /**
3142 * t4_seeprom_write - write a serial EEPROM location
3143 * @adapter: adapter to write
3144 * @addr: virtual EEPROM address
3145 * @data: value to write
3146 *
3147 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
3148 * VPD capability. Note that this function must be called with a virtual
3149 * address.
3150 */
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)3151 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3152 {
3153 unsigned int base = adapter->params.pci.vpd_cap_addr;
3154 int ret;
3155 u32 stats_reg;
3156 int max_poll;
3157
3158 /*
3159 * VPD Accesses must alway be 4-byte aligned!
3160 */
3161 if (addr >= EEPROMVSIZE || (addr & 3))
3162 return -EINVAL;
3163
3164 /*
3165 * Wait for any previous operation which may still be in flight to
3166 * complete.
3167 */
3168 ret = t4_seeprom_wait(adapter);
3169 if (ret) {
3170 CH_ERR(adapter, "VPD still busy from previous operation\n");
3171 return ret;
3172 }
3173
3174 /*
3175 * Issue our new VPD Read request, mark the VPD as being busy and wait
3176 * for our request to complete. If it doesn't complete, note the
3177 * error and return it to our caller. Note that we do not reset the
3178 * VPD Busy status!
3179 */
3180 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3181 cpu_to_le32(data));
3182 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3183 (u16)addr | PCI_VPD_ADDR_F);
3184 adapter->vpd_busy = 1;
3185 adapter->vpd_flag = 0;
3186 ret = t4_seeprom_wait(adapter);
3187 if (ret) {
3188 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3189 return ret;
3190 }
3191
3192 /*
3193 * Reset PCI_VPD_DATA register after a transaction and wait for our
3194 * request to complete. If it doesn't complete, return error.
3195 */
3196 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3197 max_poll = EEPROM_MAX_POLL;
3198 do {
3199 udelay(EEPROM_DELAY);
3200 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3201 } while ((stats_reg & 0x1) && --max_poll);
3202 if (!max_poll)
3203 return -ETIMEDOUT;
3204
3205 /* Return success! */
3206 return 0;
3207 }
3208
3209 /**
3210 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
3211 * @phys_addr: the physical EEPROM address
3212 * @fn: the PCI function number
3213 * @sz: size of function-specific area
3214 *
3215 * Translate a physical EEPROM address to virtual. The first 1K is
3216 * accessed through virtual addresses starting at 31K, the rest is
3217 * accessed through virtual addresses starting at 0.
3218 *
3219 * The mapping is as follows:
3220 * [0..1K) -> [31K..32K)
3221 * [1K..1K+A) -> [ES-A..ES)
3222 * [1K+A..ES) -> [0..ES-A-1K)
3223 *
3224 * where A = @fn * @sz, and ES = EEPROM size.
3225 */
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)3226 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3227 {
3228 fn *= sz;
3229 if (phys_addr < 1024)
3230 return phys_addr + (31 << 10);
3231 if (phys_addr < 1024 + fn)
3232 return EEPROMSIZE - fn + phys_addr - 1024;
3233 if (phys_addr < EEPROMSIZE)
3234 return phys_addr - 1024 - fn;
3235 return -EINVAL;
3236 }
3237
3238 /**
3239 * t4_seeprom_wp - enable/disable EEPROM write protection
3240 * @adapter: the adapter
3241 * @enable: whether to enable or disable write protection
3242 *
3243 * Enables or disables write protection on the serial EEPROM.
3244 */
t4_seeprom_wp(struct adapter * adapter,int enable)3245 int t4_seeprom_wp(struct adapter *adapter, int enable)
3246 {
3247 return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3248 }
3249
3250 /**
3251 * get_vpd_keyword_val - Locates an information field keyword in the VPD
3252 * @v: Pointer to buffered vpd data structure
3253 * @kw: The keyword to search for
3254 *
3255 * Returns the value of the information field keyword or
3256 * -ENOENT otherwise.
3257 */
get_vpd_keyword_val(const struct t4_vpd_hdr * v,const char * kw)3258 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3259 {
3260 int i;
3261 unsigned int offset , len;
3262 const u8 *buf = (const u8 *)v;
3263 const u8 *vpdr_len = &v->vpdr_len[0];
3264 offset = sizeof(struct t4_vpd_hdr);
3265 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3266
3267 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3268 return -ENOENT;
3269 }
3270
3271 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3272 if(memcmp(buf + i , kw , 2) == 0){
3273 i += VPD_INFO_FLD_HDR_SIZE;
3274 return i;
3275 }
3276
3277 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3278 }
3279
3280 return -ENOENT;
3281 }
3282
3283 /*
3284 * str_strip
3285 * Removes trailing whitespaces from string "s"
3286 * Based on strstrip() implementation in string.c
3287 */
str_strip(char * s)3288 static void str_strip(char *s)
3289 {
3290 size_t size;
3291 char *end;
3292
3293 size = strlen(s);
3294 if (!size)
3295 return;
3296
3297 end = s + size - 1;
3298 while (end >= s && isspace(*end))
3299 end--;
3300 *(end + 1) = '\0';
3301 }
3302
3303 /**
3304 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3305 * @adapter: adapter to read
3306 * @p: where to store the parameters
3307 *
3308 * Reads card parameters stored in VPD EEPROM.
3309 */
t4_get_raw_vpd_params(struct adapter * adapter,struct vpd_params * p)3310 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3311 {
3312 int i, ret = 0, addr;
3313 int ec, sn, pn, na;
3314 u8 *vpd, csum;
3315 const struct t4_vpd_hdr *v;
3316
3317 vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3318 if (!vpd)
3319 return -ENOMEM;
3320
3321 /* We have two VPD data structures stored in the adapter VPD area.
3322 * By default, Linux calculates the size of the VPD area by traversing
3323 * the first VPD area at offset 0x0, so we need to tell the OS what
3324 * our real VPD size is.
3325 */
3326 ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3327 if (ret < 0)
3328 goto out;
3329
3330 /* Card information normally starts at VPD_BASE but early cards had
3331 * it at 0.
3332 */
3333 ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3334 if (ret)
3335 goto out;
3336
3337 /* The VPD shall have a unique identifier specified by the PCI SIG.
3338 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3339 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3340 * is expected to automatically put this entry at the
3341 * beginning of the VPD.
3342 */
3343 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3344
3345 for (i = 0; i < VPD_LEN; i += 4) {
3346 ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3347 if (ret)
3348 goto out;
3349 }
3350 v = (const struct t4_vpd_hdr *)vpd;
3351
3352 #define FIND_VPD_KW(var,name) do { \
3353 var = get_vpd_keyword_val(v , name); \
3354 if (var < 0) { \
3355 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3356 ret = -EINVAL; \
3357 goto out; \
3358 } \
3359 } while (0)
3360
3361 FIND_VPD_KW(i, "RV");
3362 for (csum = 0; i >= 0; i--)
3363 csum += vpd[i];
3364
3365 if (csum) {
3366 CH_ERR(adapter,
3367 "corrupted VPD EEPROM, actual csum %u\n", csum);
3368 ret = -EINVAL;
3369 goto out;
3370 }
3371
3372 FIND_VPD_KW(ec, "EC");
3373 FIND_VPD_KW(sn, "SN");
3374 FIND_VPD_KW(pn, "PN");
3375 FIND_VPD_KW(na, "NA");
3376 #undef FIND_VPD_KW
3377
3378 memcpy(p->id, v->id_data, ID_LEN);
3379 str_strip((char *)p->id);
3380 memcpy(p->ec, vpd + ec, EC_LEN);
3381 str_strip((char *)p->ec);
3382 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3383 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3384 str_strip((char *)p->sn);
3385 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3386 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3387 str_strip((char *)p->pn);
3388 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3389 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3390 str_strip((char *)p->na);
3391
3392 out:
3393 kmem_free(vpd, sizeof(u8) * VPD_LEN);
3394 return ret < 0 ? ret : 0;
3395 }
3396
3397 /**
3398 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3399 * @adapter: adapter to read
3400 * @p: where to store the parameters
3401 *
3402 * Reads card parameters stored in VPD EEPROM and retrieves the Core
3403 * Clock. This can only be called after a connection to the firmware
3404 * is established.
3405 */
t4_get_vpd_params(struct adapter * adapter,struct vpd_params * p)3406 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3407 {
3408 u32 cclk_param, cclk_val;
3409 int ret;
3410
3411 /*
3412 * Grab the raw VPD parameters.
3413 */
3414 ret = t4_get_raw_vpd_params(adapter, p);
3415 if (ret)
3416 return ret;
3417
3418 /*
3419 * Ask firmware for the Core Clock since it knows how to translate the
3420 * Reference Clock ('V2') VPD field into a Core Clock value ...
3421 */
3422 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3423 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3424 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3425 1, &cclk_param, &cclk_val);
3426
3427 if (ret)
3428 return ret;
3429 p->cclk = cclk_val;
3430
3431 return 0;
3432 }
3433
3434 /**
3435 * t4_get_pfres - retrieve VF resource limits
3436 * @adapter: the adapter
3437 *
3438 * Retrieves configured resource limits and capabilities for a physical
3439 * function. The results are stored in @adapter->pfres.
3440 */
t4_get_pfres(struct adapter * adapter)3441 int t4_get_pfres(struct adapter *adapter)
3442 {
3443 struct pf_resources *pfres = &adapter->params.pfres;
3444 struct fw_pfvf_cmd cmd, rpl;
3445 int v;
3446 u32 word;
3447
3448 /*
3449 * Execute PFVF Read command to get VF resource limits; bail out early
3450 * with error on command failure.
3451 */
3452 memset(&cmd, 0, sizeof(cmd));
3453 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
3454 F_FW_CMD_REQUEST |
3455 F_FW_CMD_READ |
3456 V_FW_PFVF_CMD_PFN(adapter->pf) |
3457 V_FW_PFVF_CMD_VFN(0));
3458 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3459 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
3460 if (v != FW_SUCCESS)
3461 return v;
3462
3463 /*
3464 * Extract PF resource limits and return success.
3465 */
3466 word = be32_to_cpu(rpl.niqflint_niq);
3467 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
3468
3469 word = be32_to_cpu(rpl.type_to_neq);
3470 pfres->neq = G_FW_PFVF_CMD_NEQ(word);
3471 pfres->pmask = G_FW_PFVF_CMD_PMASK(word);
3472
3473 word = be32_to_cpu(rpl.tc_to_nexactf);
3474 pfres->tc = G_FW_PFVF_CMD_TC(word);
3475 pfres->nvi = G_FW_PFVF_CMD_NVI(word);
3476 pfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
3477
3478 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
3479 pfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
3480 pfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
3481 pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
3482
3483 return 0;
3484 }
3485
3486 /* serial flash and firmware constants and flash config file constants */
3487 enum {
3488 SF_ATTEMPTS = 10, /* max retries for SF operations */
3489
3490 /* flash command opcodes */
3491 SF_PROG_PAGE = 2, /* program page */
3492 SF_WR_DISABLE = 4, /* disable writes */
3493 SF_RD_STATUS = 5, /* read status register */
3494 SF_WR_ENABLE = 6, /* enable writes */
3495 SF_RD_DATA_FAST = 0xb, /* read flash */
3496 SF_RD_ID = 0x9f, /* read ID */
3497 SF_ERASE_SECTOR = 0xd8, /* erase sector */
3498 };
3499
3500 /**
3501 * sf1_read - read data from the serial flash
3502 * @adapter: the adapter
3503 * @byte_cnt: number of bytes to read
3504 * @cont: whether another operation will be chained
3505 * @lock: whether to lock SF for PL access only
3506 * @valp: where to store the read data
3507 *
3508 * Reads up to 4 bytes of data from the serial flash. The location of
3509 * the read needs to be specified prior to calling this by issuing the
3510 * appropriate commands to the serial flash.
3511 */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)3512 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3513 int lock, u32 *valp)
3514 {
3515 int ret;
3516
3517 if (!byte_cnt || byte_cnt > 4)
3518 return -EINVAL;
3519 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3520 return -EBUSY;
3521 t4_write_reg(adapter, A_SF_OP,
3522 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3523 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3524 if (!ret)
3525 *valp = t4_read_reg(adapter, A_SF_DATA);
3526 return ret;
3527 }
3528
3529 /**
3530 * sf1_write - write data to the serial flash
3531 * @adapter: the adapter
3532 * @byte_cnt: number of bytes to write
3533 * @cont: whether another operation will be chained
3534 * @lock: whether to lock SF for PL access only
3535 * @val: value to write
3536 *
3537 * Writes up to 4 bytes of data to the serial flash. The location of
3538 * the write needs to be specified prior to calling this by issuing the
3539 * appropriate commands to the serial flash.
3540 */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)3541 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3542 int lock, u32 val)
3543 {
3544 if (!byte_cnt || byte_cnt > 4)
3545 return -EINVAL;
3546 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3547 return -EBUSY;
3548 t4_write_reg(adapter, A_SF_DATA, val);
3549 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3550 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3551 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3552 }
3553
3554 /**
3555 * flash_wait_op - wait for a flash operation to complete
3556 * @adapter: the adapter
3557 * @attempts: max number of polls of the status register
3558 * @delay: delay between polls in ms
3559 *
3560 * Wait for a flash operation to complete by polling the status register.
3561 */
flash_wait_op(struct adapter * adapter,int attempts,int ch_delay)3562 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3563 {
3564 int ret;
3565 u32 status;
3566
3567 while (1) {
3568 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3569 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3570 return ret;
3571 if (!(status & 1))
3572 return 0;
3573 if (--attempts == 0)
3574 return -EAGAIN;
3575 if (ch_delay) {
3576 #ifdef CONFIG_CUDBG
3577 if (adapter->flags & K_CRASH)
3578 mdelay(ch_delay);
3579 else
3580 #endif
3581 msleep(ch_delay);
3582 }
3583 }
3584 }
3585
3586 /**
3587 * t4_read_flash - read words from serial flash
3588 * @adapter: the adapter
3589 * @addr: the start address for the read
3590 * @nwords: how many 32-bit words to read
3591 * @data: where to store the read data
3592 * @byte_oriented: whether to store data as bytes or as words
3593 *
3594 * Read the specified number of 32-bit words from the serial flash.
3595 * If @byte_oriented is set the read data is stored as a byte array
3596 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3597 * natural endianness.
3598 */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)3599 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3600 unsigned int nwords, u32 *data, int byte_oriented)
3601 {
3602 int ret;
3603
3604 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3605 return -EINVAL;
3606
3607 addr = swab32(addr) | SF_RD_DATA_FAST;
3608
3609 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3610 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3611 return ret;
3612
3613 for ( ; nwords; nwords--, data++) {
3614 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3615 if (nwords == 1)
3616 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3617 if (ret)
3618 return ret;
3619 if (byte_oriented)
3620 *data = (__force __u32)(cpu_to_be32(*data));
3621 }
3622 return 0;
3623 }
3624
3625 /**
3626 * t4_write_flash - write up to a page of data to the serial flash
3627 * @adapter: the adapter
3628 * @addr: the start address to write
3629 * @n: length of data to write in bytes
3630 * @data: the data to write
3631 * @byte_oriented: whether to store data as bytes or as words
3632 *
3633 * Writes up to a page of data (256 bytes) to the serial flash starting
3634 * at the given address. All the data must be written to the same page.
3635 * If @byte_oriented is set the write data is stored as byte stream
3636 * (i.e. matches what on disk), otherwise in big-endian.
3637 */
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data,int byte_oriented)3638 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3639 unsigned int n, const u8 *data, int byte_oriented)
3640 {
3641 int ret;
3642 u32 buf[64];
3643 unsigned int i, c, left, val, offset = addr & 0xff;
3644
3645 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3646 return -EINVAL;
3647
3648 val = swab32(addr) | SF_PROG_PAGE;
3649
3650 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3651 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3652 goto unlock;
3653
3654 for (left = n; left; left -= c) {
3655 c = min(left, 4U);
3656 for (val = 0, i = 0; i < c; ++i)
3657 val = (val << 8) + *data++;
3658
3659 if (!byte_oriented)
3660 val = cpu_to_be32(val);
3661
3662 ret = sf1_write(adapter, c, c != left, 1, val);
3663 if (ret)
3664 goto unlock;
3665 }
3666 ret = flash_wait_op(adapter, 8, 1);
3667 if (ret)
3668 goto unlock;
3669
3670 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3671
3672 /* Read the page to verify the write succeeded */
3673 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3674 byte_oriented);
3675 if (ret)
3676 return ret;
3677
3678 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3679 CH_ERR(adapter,
3680 "failed to correctly write the flash page at %#x\n",
3681 addr);
3682 return -EIO;
3683 }
3684 return 0;
3685
3686 unlock:
3687 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3688 return ret;
3689 }
3690
3691 /**
3692 * t4_get_fw_version - read the firmware version
3693 * @adapter: the adapter
3694 * @vers: where to place the version
3695 *
3696 * Reads the FW version from flash.
3697 */
t4_get_fw_version(struct adapter * adapter,u32 * vers)3698 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3699 {
3700 return t4_read_flash(adapter, FLASH_FW_START +
3701 offsetof(struct fw_hdr, fw_ver), 1,
3702 vers, 0);
3703 }
3704
3705 /**
3706 * t4_get_bs_version - read the firmware bootstrap version
3707 * @adapter: the adapter
3708 * @vers: where to place the version
3709 *
3710 * Reads the FW Bootstrap version from flash.
3711 */
t4_get_bs_version(struct adapter * adapter,u32 * vers)3712 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3713 {
3714 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3715 offsetof(struct fw_hdr, fw_ver), 1,
3716 vers, 0);
3717 }
3718
3719 /**
3720 * t4_get_tp_version - read the TP microcode version
3721 * @adapter: the adapter
3722 * @vers: where to place the version
3723 *
3724 * Reads the TP microcode version from flash.
3725 */
t4_get_tp_version(struct adapter * adapter,u32 * vers)3726 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3727 {
3728 return t4_read_flash(adapter, FLASH_FW_START +
3729 offsetof(struct fw_hdr, tp_microcode_ver),
3730 1, vers, 0);
3731 }
3732
3733 /**
3734 * t4_get_exprom_version - return the Expansion ROM version (if any)
3735 * @adapter: the adapter
3736 * @vers: where to place the version
3737 *
3738 * Reads the Expansion ROM header from FLASH and returns the version
3739 * number (if present) through the @vers return value pointer. We return
3740 * this in the Firmware Version Format since it's convenient. Return
3741 * 0 on success, -ENOENT if no Expansion ROM is present.
3742 */
t4_get_exprom_version(struct adapter * adapter,u32 * vers)3743 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3744 {
3745 struct exprom_header {
3746 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3747 unsigned char hdr_ver[4]; /* Expansion ROM version */
3748 } *hdr;
3749 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3750 sizeof(u32))];
3751 int ret;
3752
3753 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3754 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3755 0);
3756 if (ret)
3757 return ret;
3758
3759 hdr = (struct exprom_header *)exprom_header_buf;
3760 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3761 return -ENOENT;
3762
3763 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3764 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3765 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3766 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3767 return 0;
3768 }
3769
3770 /**
3771 * t4_get_scfg_version - return the Serial Configuration version
3772 * @adapter: the adapter
3773 * @vers: where to place the version
3774 *
3775 * Reads the Serial Configuration Version via the Firmware interface
3776 * (thus this can only be called once we're ready to issue Firmware
3777 * commands). The format of the Serial Configuration version is
3778 * adapter specific. Returns 0 on success, an error on failure.
3779 *
3780 * Note that early versions of the Firmware didn't include the ability
3781 * to retrieve the Serial Configuration version, so we zero-out the
3782 * return-value parameter in that case to avoid leaving it with
3783 * garbage in it.
3784 *
3785 * Also note that the Firmware will return its cached copy of the Serial
3786 * Initialization Revision ID, not the actual Revision ID as written in
3787 * the Serial EEPROM. This is only an issue if a new VPD has been written
3788 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3789 * it's best to defer calling this routine till after a FW_RESET_CMD has
3790 * been issued if the Host Driver will be performing a full adapter
3791 * initialization.
3792 */
t4_get_scfg_version(struct adapter * adapter,u32 * vers)3793 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3794 {
3795 u32 scfgrev_param;
3796 int ret;
3797
3798 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3799 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3800 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3801 1, &scfgrev_param, vers);
3802 if (ret)
3803 *vers = 0;
3804 return ret;
3805 }
3806
3807 /**
3808 * t4_get_vpd_version - return the VPD version
3809 * @adapter: the adapter
3810 * @vers: where to place the version
3811 *
3812 * Reads the VPD via the Firmware interface (thus this can only be called
3813 * once we're ready to issue Firmware commands). The format of the
3814 * VPD version is adapter specific. Returns 0 on success, an error on
3815 * failure.
3816 *
3817 * Note that early versions of the Firmware didn't include the ability
3818 * to retrieve the VPD version, so we zero-out the return-value parameter
3819 * in that case to avoid leaving it with garbage in it.
3820 *
3821 * Also note that the Firmware will return its cached copy of the VPD
3822 * Revision ID, not the actual Revision ID as written in the Serial
3823 * EEPROM. This is only an issue if a new VPD has been written and the
3824 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3825 * to defer calling this routine till after a FW_RESET_CMD has been issued
3826 * if the Host Driver will be performing a full adapter initialization.
3827 */
t4_get_vpd_version(struct adapter * adapter,u32 * vers)3828 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3829 {
3830 u32 vpdrev_param;
3831 int ret;
3832
3833 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3834 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3835 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3836 1, &vpdrev_param, vers);
3837 if (ret)
3838 *vers = 0;
3839 return ret;
3840 }
3841
3842 /**
3843 * t4_get_version_info - extract various chip/firmware version information
3844 * @adapter: the adapter
3845 *
3846 * Reads various chip/firmware version numbers and stores them into the
3847 * adapter Adapter Parameters structure. If any of the efforts fails
3848 * the first failure will be returned, but all of the version numbers
3849 * will be read.
3850 */
t4_get_version_info(struct adapter * adapter)3851 int t4_get_version_info(struct adapter *adapter)
3852 {
3853 int ret = 0;
3854
3855 #define FIRST_RET(__getvinfo) \
3856 do { \
3857 int __ret = __getvinfo; \
3858 if (__ret && !ret) \
3859 ret = __ret; \
3860 } while (0)
3861
3862 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3863 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3864 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3865 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3866 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3867 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3868
3869 #undef FIRST_RET
3870
3871 return ret;
3872 }
3873
3874 /**
3875 * t4_dump_version_info - dump all of the adapter configuration IDs
3876 * @adapter: the adapter
3877 *
3878 * Dumps all of the various bits of adapter configuration version/revision
3879 * IDs information. This is typically called at some point after
3880 * t4_get_version_info() has been called.
3881 */
t4_dump_version_info(struct adapter * adapter)3882 void t4_dump_version_info(struct adapter *adapter)
3883 {
3884 /*
3885 * Device information.
3886 */
3887 CH_INFO(adapter, "Chelsio %s rev %d\n",
3888 adapter->params.vpd.id,
3889 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3890 CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3891 adapter->params.vpd.sn,
3892 adapter->params.vpd.pn);
3893
3894 /*
3895 * Firmware Version.
3896 */
3897 if (!adapter->params.fw_vers)
3898 CH_WARN(adapter, "No firmware loaded\n");
3899 else
3900 CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3901 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3902 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3903 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3904 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3905
3906 /*
3907 * Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3908 * Firmware, so dev_info() is more appropriate here.)
3909 */
3910 if (!adapter->params.bs_vers)
3911 CH_INFO(adapter, "No bootstrap loaded\n");
3912 else
3913 CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3914 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3915 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3916 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3917 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3918
3919 /*
3920 * TP Microcode Version.
3921 */
3922 if (!adapter->params.tp_vers)
3923 CH_WARN(adapter, "No TP Microcode loaded\n");
3924 else
3925 CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3926 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3927 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3928 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3929 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3930
3931 /*
3932 * Expansion ROM version.
3933 */
3934 if (!adapter->params.er_vers)
3935 CH_INFO(adapter, "No Expansion ROM loaded\n");
3936 else
3937 CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3938 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3939 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3940 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3941 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3942
3943
3944 /*
3945 * Serial Configuration version.
3946 */
3947 CH_INFO(adapter, "Serial Configuration version: %x\n",
3948 adapter->params.scfg_vers);
3949
3950 /*
3951 * VPD version.
3952 */
3953 CH_INFO(adapter, "VPD version: %x\n",
3954 adapter->params.vpd_vers);
3955 }
3956
3957 /**
3958 * t4_check_fw_version - check if the FW is supported with this driver
3959 * @adap: the adapter
3960 *
3961 * Checks if an adapter's FW is compatible with the driver. Returns 0
3962 * if there's exact match, a negative error if the version could not be
3963 * read or there's a major version mismatch
3964 */
t4_check_fw_version(struct adapter * adap)3965 int t4_check_fw_version(struct adapter *adap)
3966 {
3967 int ret, major, minor, micro;
3968 int exp_major, exp_minor, exp_micro;
3969 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3970
3971 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3972 if (ret)
3973 return ret;
3974
3975 major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3976 minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3977 micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3978
3979 switch (chip_version) {
3980 case CHELSIO_T4:
3981 exp_major = T4FW_MIN_VERSION_MAJOR;
3982 exp_minor = T4FW_MIN_VERSION_MINOR;
3983 exp_micro = T4FW_MIN_VERSION_MICRO;
3984 break;
3985 case CHELSIO_T5:
3986 exp_major = T5FW_MIN_VERSION_MAJOR;
3987 exp_minor = T5FW_MIN_VERSION_MINOR;
3988 exp_micro = T5FW_MIN_VERSION_MICRO;
3989 break;
3990 case CHELSIO_T6:
3991 exp_major = T6FW_MIN_VERSION_MAJOR;
3992 exp_minor = T6FW_MIN_VERSION_MINOR;
3993 exp_micro = T6FW_MIN_VERSION_MICRO;
3994 break;
3995 default:
3996 CH_ERR(adap, "Unsupported chip type, %x\n",
3997 adap->params.chip);
3998 return -EINVAL;
3999 }
4000
4001 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
4002 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
4003 CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
4004 "supported firmware is %u.%u.%u.\n", major, minor,
4005 micro, exp_major, exp_minor, exp_micro);
4006 return -EFAULT;
4007 }
4008 return 0;
4009 }
4010
4011 /* Is the given firmware API compatible with the one the driver was compiled
4012 * with?
4013 */
fw_compatible(const struct fw_hdr * hdr1,const struct fw_hdr * hdr2)4014 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
4015 {
4016
4017 /* short circuit if it's the exact same firmware version */
4018 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
4019 return 1;
4020
4021 /*
4022 * XXX: Is this too conservative? Perhaps I should limit this to the
4023 * features that are supported in the driver.
4024 */
4025 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
4026 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
4027 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
4028 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
4029 return 1;
4030 #undef SAME_INTF
4031
4032 return 0;
4033 }
4034
4035 /* The firmware in the filesystem is usable, but should it be installed?
4036 * This routine explains itself in detail if it indicates the filesystem
4037 * firmware should be installed.
4038 */
should_install_fs_fw(struct adapter * adap,int card_fw_usable,int k,int c,int t4_fw_install)4039 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
4040 int k, int c, int t4_fw_install)
4041 {
4042 const char *reason;
4043
4044 if (!card_fw_usable) {
4045 reason = "incompatible or unusable";
4046 goto install;
4047 }
4048
4049 if (k > c) {
4050 reason = "older than the version bundled with this driver";
4051 goto install;
4052 }
4053
4054 if (t4_fw_install == 2 && k != c) {
4055 reason = "different than the version bundled with this driver";
4056 goto install;
4057 }
4058
4059 return 0;
4060
4061 install:
4062 if (t4_fw_install == 0) {
4063 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
4064 "but the driver is prohibited from installing a "
4065 "different firmware on the card.\n",
4066 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4067 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4068 reason);
4069
4070 return (0);
4071 }
4072
4073 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
4074 "installing firmware %u.%u.%u.%u on card.\n",
4075 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4076 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
4077 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4078 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4079
4080 return 1;
4081 }
4082
t4_prep_fw(struct adapter * adap,struct fw_info * fw_info,const u8 * fw_data,unsigned int fw_size,struct fw_hdr * card_fw,const int t4_fw_install,enum dev_state state,int * reset)4083 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
4084 const u8 *fw_data, unsigned int fw_size,
4085 struct fw_hdr *card_fw, const int t4_fw_install,
4086 enum dev_state state, int *reset)
4087 {
4088 int ret, card_fw_usable, fs_fw_usable;
4089 const struct fw_hdr *fs_fw;
4090 const struct fw_hdr *drv_fw;
4091
4092 drv_fw = &fw_info->fw_hdr;
4093
4094 /* Read the header of the firmware on the card */
4095 ret = -t4_read_flash(adap, FLASH_FW_START,
4096 sizeof(*card_fw) / sizeof(uint32_t),
4097 (uint32_t *)card_fw, 1);
4098 if (ret == 0) {
4099 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
4100 } else {
4101 CH_ERR(adap,
4102 "Unable to read card's firmware header: %d\n", ret);
4103 card_fw_usable = 0;
4104 }
4105
4106 if (fw_data != NULL) {
4107 fs_fw = (const void *)fw_data;
4108 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
4109 } else {
4110 fs_fw = NULL;
4111 fs_fw_usable = 0;
4112 }
4113
4114 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
4115 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
4116 /* Common case: the firmware on the card is an exact match and
4117 * the filesystem one is an exact match too, or the filesystem
4118 * one is absent/incompatible. Note that t4_fw_install = 2
4119 * is ignored here -- use cxgbtool loadfw if you want to
4120 * reinstall the same firmware as the one on the card.
4121 */
4122 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
4123 should_install_fs_fw(adap, card_fw_usable,
4124 be32_to_cpu(fs_fw->fw_ver),
4125 be32_to_cpu(card_fw->fw_ver),
4126 t4_fw_install)) {
4127
4128 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
4129 fw_size, 0);
4130 if (ret != 0) {
4131 CH_ERR(adap,
4132 "failed to install firmware: %d\n", ret);
4133 goto bye;
4134 }
4135
4136 /* Installed successfully, update cached information */
4137 memcpy(card_fw, fs_fw, sizeof(*card_fw));
4138 (void)t4_init_devlog_params(adap, 1);
4139 card_fw_usable = 1;
4140 *reset = 0; /* already reset as part of load_fw */
4141 }
4142
4143 if (!card_fw_usable) {
4144 uint32_t d, c, k;
4145
4146 d = be32_to_cpu(drv_fw->fw_ver);
4147 c = be32_to_cpu(card_fw->fw_ver);
4148 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4149
4150 CH_ERR(adap, "Cannot find a usable firmware: "
4151 "fw_install %d, chip state %d, "
4152 "driver compiled with %d.%d.%d.%d, "
4153 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4154 t4_fw_install, state,
4155 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4156 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4157 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4158 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4159 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4160 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4161 ret = EINVAL;
4162 goto bye;
4163 }
4164
4165 /* We're using whatever's on the card and it's known to be good. */
4166 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4167 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4168
4169 bye:
4170 return ret;
4171
4172 }
4173
4174 /**
4175 * t4_flash_erase_sectors - erase a range of flash sectors
4176 * @adapter: the adapter
4177 * @start: the first sector to erase
4178 * @end: the last sector to erase
4179 *
4180 * Erases the sectors in the given inclusive range.
4181 */
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)4182 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4183 {
4184 int ret = 0;
4185
4186 if (end >= adapter->params.sf_nsec)
4187 return -EINVAL;
4188
4189 while (start <= end) {
4190 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4191 (ret = sf1_write(adapter, 4, 0, 1,
4192 SF_ERASE_SECTOR | (start << 8))) != 0 ||
4193 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4194 CH_ERR(adapter,
4195 "erase of flash sector %d failed, error %d\n",
4196 start, ret);
4197 break;
4198 }
4199 start++;
4200 }
4201 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4202 return ret;
4203 }
4204
4205 /**
4206 * t4_flash_cfg_addr - return the address of the flash configuration file
4207 * @adapter: the adapter
4208 *
4209 * Return the address within the flash where the Firmware Configuration
4210 * File is stored, or an error if the device FLASH is too small to contain
4211 * a Firmware Configuration File.
4212 */
t4_flash_cfg_addr(struct adapter * adapter)4213 int t4_flash_cfg_addr(struct adapter *adapter)
4214 {
4215 /*
4216 * If the device FLASH isn't large enough to hold a Firmware
4217 * Configuration File, return an error.
4218 */
4219 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4220 return -ENOSPC;
4221
4222 return FLASH_CFG_START;
4223 }
4224
4225 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
4226 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
4227 * and emit an error message for mismatched firmware to save our caller the
4228 * effort ...
4229 */
t4_fw_matches_chip(const struct adapter * adap,const struct fw_hdr * hdr)4230 static int t4_fw_matches_chip(const struct adapter *adap,
4231 const struct fw_hdr *hdr)
4232 {
4233 /*
4234 * The expression below will return FALSE for any unsupported adapter
4235 * which will keep us "honest" in the future ...
4236 */
4237 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4238 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4239 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4240 return 1;
4241
4242 CH_ERR(adap,
4243 "FW image (%d) is not suitable for this adapter (%d)\n",
4244 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4245 return 0;
4246 }
4247
4248 /**
4249 * t4_load_fw - download firmware
4250 * @adap: the adapter
4251 * @fw_data: the firmware image to write
4252 * @size: image size
4253 * @bootstrap: indicates if the binary is a bootstrap fw
4254 *
4255 * Write the supplied firmware image to the card's serial flash.
4256 */
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size,unsigned int bootstrap)4257 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4258 unsigned int bootstrap)
4259 {
4260 u32 csum;
4261 int ret, addr;
4262 unsigned int i;
4263 u8 first_page[SF_PAGE_SIZE];
4264 const __be32 *p = (const __be32 *)fw_data;
4265 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4266 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4267 unsigned int fw_start_sec;
4268 unsigned int fw_start;
4269 unsigned int fw_size;
4270
4271 if (bootstrap) {
4272 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4273 fw_start = FLASH_FWBOOTSTRAP_START;
4274 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4275 } else {
4276 fw_start_sec = FLASH_FW_START_SEC;
4277 fw_start = FLASH_FW_START;
4278 fw_size = FLASH_FW_MAX_SIZE;
4279 }
4280
4281 if (!size) {
4282 CH_ERR(adap, "FW image has no data\n");
4283 return -EINVAL;
4284 }
4285 if (size & 511) {
4286 CH_ERR(adap,
4287 "FW image size not multiple of 512 bytes\n");
4288 return -EINVAL;
4289 }
4290 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4291 CH_ERR(adap,
4292 "FW image size differs from size in FW header\n");
4293 return -EINVAL;
4294 }
4295 if (size > fw_size) {
4296 CH_ERR(adap, "FW image too large, max is %u bytes\n",
4297 fw_size);
4298 return -EFBIG;
4299 }
4300 if (!t4_fw_matches_chip(adap, hdr))
4301 return -EINVAL;
4302
4303 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4304 csum += be32_to_cpu(p[i]);
4305
4306 if (csum != 0xffffffff) {
4307 CH_ERR(adap,
4308 "corrupted firmware image, checksum %#x\n", csum);
4309 return -EINVAL;
4310 }
4311
4312 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
4313 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4314 if (ret)
4315 goto out;
4316
4317 /*
4318 * We write the correct version at the end so the driver can see a bad
4319 * version if the FW write fails. Start by writing a copy of the
4320 * first page with a bad version.
4321 */
4322 memcpy(first_page, fw_data, SF_PAGE_SIZE);
4323 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4324 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4325 if (ret)
4326 goto out;
4327
4328 addr = fw_start;
4329 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4330 addr += SF_PAGE_SIZE;
4331 fw_data += SF_PAGE_SIZE;
4332 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4333 if (ret)
4334 goto out;
4335 }
4336
4337 ret = t4_write_flash(adap,
4338 fw_start + offsetof(struct fw_hdr, fw_ver),
4339 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4340 out:
4341 if (ret)
4342 CH_ERR(adap, "firmware download failed, error %d\n",
4343 ret);
4344 else {
4345 if (bootstrap)
4346 ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4347 else
4348 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4349 }
4350 return ret;
4351 }
4352
4353 /**
4354 * t4_phy_fw_ver - return current PHY firmware version
4355 * @adap: the adapter
4356 * @phy_fw_ver: return value buffer for PHY firmware version
4357 *
4358 * Returns the current version of external PHY firmware on the
4359 * adapter.
4360 */
t4_phy_fw_ver(struct adapter * adap,int * phy_fw_ver)4361 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4362 {
4363 u32 param, val;
4364 int ret;
4365
4366 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4367 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4368 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4369 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4370 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4371 ¶m, &val);
4372 if (ret < 0)
4373 return ret;
4374 *phy_fw_ver = val;
4375 return 0;
4376 }
4377
4378 /**
4379 * t4_load_phy_fw - download port PHY firmware
4380 * @adap: the adapter
4381 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
4382 * @lock: the lock to use to guard the memory copy
4383 * @phy_fw_version: function to check PHY firmware versions
4384 * @phy_fw_data: the PHY firmware image to write
4385 * @phy_fw_size: image size
4386 *
4387 * Transfer the specified PHY firmware to the adapter. If a non-NULL
4388 * @phy_fw_version is supplied, then it will be used to determine if
4389 * it's necessary to perform the transfer by comparing the version
4390 * of any existing adapter PHY firmware with that of the passed in
4391 * PHY firmware image. If @lock is non-NULL then it will be used
4392 * around the call to t4_memory_rw() which transfers the PHY firmware
4393 * to the adapter.
4394 *
4395 * A negative error number will be returned if an error occurs. If
4396 * version number support is available and there's no need to upgrade
4397 * the firmware, 0 will be returned. If firmware is successfully
4398 * transferred to the adapter, 1 will be retured.
4399 *
4400 * NOTE: some adapters only have local RAM to store the PHY firmware. As
4401 * a result, a RESET of the adapter would cause that RAM to lose its
4402 * contents. Thus, loading PHY firmware on such adapters must happen after any
4403 * FW_RESET_CMDs ...
4404 */
t4_load_phy_fw(struct adapter * adap,int win,t4_os_lock_t * lock,int (* phy_fw_version)(const u8 *,size_t),const u8 * phy_fw_data,size_t phy_fw_size)4405 int t4_load_phy_fw(struct adapter *adap,
4406 int win, t4_os_lock_t *lock,
4407 int (*phy_fw_version)(const u8 *, size_t),
4408 const u8 *phy_fw_data, size_t phy_fw_size)
4409 {
4410 unsigned long mtype = 0, maddr = 0;
4411 u32 param, val;
4412 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4413 int ret;
4414
4415 /*
4416 * If we have version number support, then check to see if the adapter
4417 * already has up-to-date PHY firmware loaded.
4418 */
4419 if (phy_fw_version) {
4420 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4421 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4422 if (ret < 0)
4423 return ret;;
4424
4425 if (cur_phy_fw_ver >= new_phy_fw_vers) {
4426 CH_WARN(adap, "PHY Firmware already up-to-date, "
4427 "version %#x\n", cur_phy_fw_ver);
4428 return 0;
4429 }
4430 }
4431
4432 /*
4433 * Ask the firmware where it wants us to copy the PHY firmware image.
4434 * The size of the file requires a special version of the READ coommand
4435 * which will pass the file size via the values field in PARAMS_CMD and
4436 * retreive the return value from firmware and place it in the same
4437 * buffer values
4438 */
4439 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4440 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4441 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4442 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4443 val = phy_fw_size;
4444 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4445 ¶m, &val, 1, true);
4446 if (ret < 0)
4447 return ret;
4448 mtype = val >> 8;
4449 maddr = (val & 0xff) << 16;
4450
4451 /*
4452 * Copy the supplied PHY Firmware image to the adapter memory location
4453 * allocated by the adapter firmware.
4454 */
4455 if (lock)
4456 t4_os_lock(lock);
4457 ret = t4_memory_rw(adap, win, mtype, maddr,
4458 phy_fw_size, (__be32*)phy_fw_data,
4459 T4_MEMORY_WRITE);
4460 if (lock)
4461 t4_os_unlock(lock);
4462 if (ret)
4463 return ret;
4464
4465 /*
4466 * Tell the firmware that the PHY firmware image has been written to
4467 * RAM and it can now start copying it over to the PHYs. The chip
4468 * firmware will RESET the affected PHYs as part of this operation
4469 * leaving them running the new PHY firmware image.
4470 */
4471 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4472 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4473 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4474 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4475 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4476 ¶m, &val, 30000);
4477
4478 /*
4479 * If we have version number support, then check to see that the new
4480 * firmware got loaded properly.
4481 */
4482 if (phy_fw_version) {
4483 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4484 if (ret < 0)
4485 return ret;
4486
4487 if (cur_phy_fw_ver != new_phy_fw_vers) {
4488 CH_WARN(adap, "PHY Firmware did not update: "
4489 "version on adapter %#x, "
4490 "version flashed %#x\n",
4491 cur_phy_fw_ver, new_phy_fw_vers);
4492 return -ENXIO;
4493 }
4494 }
4495
4496 return 1;
4497 }
4498
4499 /**
4500 * t4_fwcache - firmware cache operation
4501 * @adap: the adapter
4502 * @op : the operation (flush or flush and invalidate)
4503 */
t4_fwcache(struct adapter * adap,enum fw_params_param_dev_fwcache op)4504 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4505 {
4506 struct fw_params_cmd c;
4507
4508 memset(&c, 0, sizeof(c));
4509 c.op_to_vfn =
4510 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4511 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4512 V_FW_PARAMS_CMD_PFN(adap->pf) |
4513 V_FW_PARAMS_CMD_VFN(0));
4514 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4515 c.param[0].mnem =
4516 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4517 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4518 c.param[0].val = (__force __be32)op;
4519
4520 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4521 }
4522
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)4523 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4524 unsigned int *pif_req_wrptr,
4525 unsigned int *pif_rsp_wrptr)
4526 {
4527 int i, j;
4528 u32 cfg, val, req, rsp;
4529
4530 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4531 if (cfg & F_LADBGEN)
4532 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4533
4534 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4535 req = G_POLADBGWRPTR(val);
4536 rsp = G_PILADBGWRPTR(val);
4537 if (pif_req_wrptr)
4538 *pif_req_wrptr = req;
4539 if (pif_rsp_wrptr)
4540 *pif_rsp_wrptr = rsp;
4541
4542 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4543 for (j = 0; j < 6; j++) {
4544 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4545 V_PILADBGRDPTR(rsp));
4546 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4547 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4548 req++;
4549 rsp++;
4550 }
4551 req = (req + 2) & M_POLADBGRDPTR;
4552 rsp = (rsp + 2) & M_PILADBGRDPTR;
4553 }
4554 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4555 }
4556
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)4557 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4558 {
4559 u32 cfg;
4560 int i, j, idx;
4561
4562 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4563 if (cfg & F_LADBGEN)
4564 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4565
4566 for (i = 0; i < CIM_MALA_SIZE; i++) {
4567 for (j = 0; j < 5; j++) {
4568 idx = 8 * i + j;
4569 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4570 V_PILADBGRDPTR(idx));
4571 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4572 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4573 }
4574 }
4575 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4576 }
4577
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)4578 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4579 {
4580 unsigned int i, j;
4581
4582 for (i = 0; i < 8; i++) {
4583 u32 *p = la_buf + i;
4584
4585 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4586 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4587 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4588 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4589 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4590 }
4591 }
4592
4593 typedef void (*int_handler_t)(struct adapter *adap);
4594
4595 struct intr_info {
4596 unsigned int mask; /* bits to check in interrupt status */
4597 const char *msg; /* message to print or NULL */
4598 short stat_idx; /* stat counter to increment or -1 */
4599 unsigned short fatal; /* whether the condition reported is fatal */
4600 int_handler_t int_handler; /* platform-specific int handler */
4601 };
4602
4603 /**
4604 * t4_handle_intr_status - table driven interrupt handler
4605 * @adapter: the adapter that generated the interrupt
4606 * @reg: the interrupt status register to process
4607 * @acts: table of interrupt actions
4608 *
4609 * A table driven interrupt handler that applies a set of masks to an
4610 * interrupt status word and performs the corresponding actions if the
4611 * interrupts described by the mask have occurred. The actions include
4612 * optionally emitting a warning or alert message. The table is terminated
4613 * by an entry specifying mask 0. Returns the number of fatal interrupt
4614 * conditions.
4615 */
t4_handle_intr_status(struct adapter * adapter,unsigned int reg,const struct intr_info * acts)4616 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4617 const struct intr_info *acts)
4618 {
4619 int fatal = 0;
4620 unsigned int mask = 0;
4621 unsigned int status = t4_read_reg(adapter, reg);
4622
4623 for ( ; acts->mask; ++acts) {
4624 if (!(status & acts->mask))
4625 continue;
4626 if (acts->fatal) {
4627 fatal++;
4628 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4629 status & acts->mask);
4630 } else if (acts->msg)
4631 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4632 status & acts->mask);
4633 if (acts->int_handler)
4634 acts->int_handler(adapter);
4635 mask |= acts->mask;
4636 }
4637 status &= mask;
4638 if (status) /* clear processed interrupts */
4639 t4_write_reg(adapter, reg, status);
4640 return fatal;
4641 }
4642
4643 /*
4644 * Interrupt handler for the PCIE module.
4645 */
pcie_intr_handler(struct adapter * adapter)4646 static void pcie_intr_handler(struct adapter *adapter)
4647 {
4648 static const struct intr_info sysbus_intr_info[] = {
4649 { F_RNPP, "RXNP array parity error", -1, 1 },
4650 { F_RPCP, "RXPC array parity error", -1, 1 },
4651 { F_RCIP, "RXCIF array parity error", -1, 1 },
4652 { F_RCCP, "Rx completions control array parity error", -1, 1 },
4653 { F_RFTP, "RXFT array parity error", -1, 1 },
4654 { 0 }
4655 };
4656 static const struct intr_info pcie_port_intr_info[] = {
4657 { F_TPCP, "TXPC array parity error", -1, 1 },
4658 { F_TNPP, "TXNP array parity error", -1, 1 },
4659 { F_TFTP, "TXFT array parity error", -1, 1 },
4660 { F_TCAP, "TXCA array parity error", -1, 1 },
4661 { F_TCIP, "TXCIF array parity error", -1, 1 },
4662 { F_RCAP, "RXCA array parity error", -1, 1 },
4663 { F_OTDD, "outbound request TLP discarded", -1, 1 },
4664 { F_RDPE, "Rx data parity error", -1, 1 },
4665 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
4666 { 0 }
4667 };
4668 static const struct intr_info pcie_intr_info[] = {
4669 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4670 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4671 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4672 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4673 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4674 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4675 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4676 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4677 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4678 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4679 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4680 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4681 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4682 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4683 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4684 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4685 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4686 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4687 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4688 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4689 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4690 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4691 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4692 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4693 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4694 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4695 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4696 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
4697 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
4698 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4699 0 },
4700 { 0 }
4701 };
4702
4703 static struct intr_info t5_pcie_intr_info[] = {
4704 { F_MSTGRPPERR, "Master Response Read Queue parity error",
4705 -1, 1 },
4706 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4707 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4708 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4709 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4710 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4711 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4712 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4713 -1, 1 },
4714 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4715 -1, 1 },
4716 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4717 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4718 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4719 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4720 { F_DREQWRPERR, "PCI DMA channel write request parity error",
4721 -1, 1 },
4722 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4723 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4724 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4725 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4726 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4727 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4728 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4729 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4730 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4731 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4732 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4733 -1, 1 },
4734 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4735 -1, 1 },
4736 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4737 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4738 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4739 { F_READRSPERR, "Outbound read error", -1,
4740 0 },
4741 { 0 }
4742 };
4743
4744 int fat;
4745
4746 if (is_t4(adapter->params.chip))
4747 fat = t4_handle_intr_status(adapter,
4748 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4749 sysbus_intr_info) +
4750 t4_handle_intr_status(adapter,
4751 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4752 pcie_port_intr_info) +
4753 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4754 pcie_intr_info);
4755 else
4756 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4757 t5_pcie_intr_info);
4758 if (fat)
4759 t4_fatal_err(adapter);
4760 }
4761
4762 /*
4763 * TP interrupt handler.
4764 */
tp_intr_handler(struct adapter * adapter)4765 static void tp_intr_handler(struct adapter *adapter)
4766 {
4767 static const struct intr_info tp_intr_info[] = {
4768 { 0x3fffffff, "TP parity error", -1, 1 },
4769 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4770 { 0 }
4771 };
4772
4773 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4774 t4_fatal_err(adapter);
4775 }
4776
4777 /*
4778 * SGE interrupt handler.
4779 */
sge_intr_handler(struct adapter * adapter)4780 static void sge_intr_handler(struct adapter *adapter)
4781 {
4782 u32 v = 0, perr;
4783 u32 err;
4784
4785 static const struct intr_info sge_intr_info[] = {
4786 { F_ERR_CPL_EXCEED_IQE_SIZE,
4787 "SGE received CPL exceeding IQE size", -1, 1 },
4788 { F_ERR_INVALID_CIDX_INC,
4789 "SGE GTS CIDX increment too large", -1, 0 },
4790 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4791 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4792 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4793 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4794 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4795 0 },
4796 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4797 0 },
4798 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4799 0 },
4800 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4801 0 },
4802 { F_ERR_ING_CTXT_PRIO,
4803 "SGE too many priority ingress contexts", -1, 0 },
4804 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4805 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4806 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4807 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4808 "SGE PCIe error for a DBP thread", -1, 0 },
4809 { 0 }
4810 };
4811
4812 static struct intr_info t4t5_sge_intr_info[] = {
4813 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4814 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4815 { F_ERR_EGR_CTXT_PRIO,
4816 "SGE too many priority egress contexts", -1, 0 },
4817 { 0 }
4818 };
4819
4820 /*
4821 * For now, treat below interrupts as fatal so that we disable SGE and
4822 * get better debug */
4823 static struct intr_info t6_sge_intr_info[] = {
4824 { F_FATAL_WRE_LEN,
4825 "SGE Actual WRE packet is less than advertized length",
4826 -1, 1 },
4827 { 0 }
4828 };
4829
4830 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE1);
4831 if (perr) {
4832 v |= perr;
4833 CH_ALERT(adapter, "SGE Cause1 Parity Error %#x\n", perr);
4834 }
4835 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE2);
4836 if (perr) {
4837 v |= perr;
4838 CH_ALERT(adapter, "SGE Cause2 Parity Error %#x\n", perr);
4839 }
4840 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4841 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE5);
4842 if (perr) {
4843 v |= perr;
4844 CH_ALERT(adapter, "SGE Cause5 Parity Error %#x\n", perr);
4845 }
4846 }
4847
4848 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4849 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4850 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4851 t4t5_sge_intr_info);
4852 else
4853 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4854 t6_sge_intr_info);
4855
4856 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4857 if (err & F_ERROR_QID_VALID) {
4858 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4859 if (err & F_UNCAPTURED_ERROR)
4860 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4861 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4862 F_UNCAPTURED_ERROR);
4863 }
4864
4865 if (v != 0)
4866 t4_fatal_err(adapter);
4867 }
4868
4869 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4870 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4871 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4872 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4873
4874 /*
4875 * CIM interrupt handler.
4876 */
cim_intr_handler(struct adapter * adapter)4877 static void cim_intr_handler(struct adapter *adapter)
4878 {
4879 static const struct intr_info cim_intr_info[] = {
4880 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4881 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4882 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4883 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4884 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4885 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4886 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4887 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4888 { 0 }
4889 };
4890 static const struct intr_info cim_upintr_info[] = {
4891 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4892 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4893 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4894 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4895 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4896 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4897 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4898 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4899 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4900 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4901 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4902 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4903 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4904 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4905 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4906 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4907 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4908 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4909 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4910 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4911 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4912 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4913 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4914 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4915 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4916 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4917 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4918 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4919 { 0 }
4920 };
4921 u32 val, fw_err;
4922 int fat;
4923
4924 fw_err = t4_read_reg(adapter, A_PCIE_FW);
4925 if (fw_err & F_PCIE_FW_ERR)
4926 t4_report_fw_error(adapter);
4927
4928 /* When the Firmware detects an internal error which normally wouldn't
4929 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4930 * to make sure the Host sees the Firmware Crash. So if we have a
4931 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4932 * interrupt.
4933 */
4934 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4935 if (val & F_TIMER0INT)
4936 if (!(fw_err & F_PCIE_FW_ERR) ||
4937 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4938 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4939 F_TIMER0INT);
4940
4941 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4942 cim_intr_info) +
4943 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4944 cim_upintr_info);
4945 if (fat)
4946 t4_fatal_err(adapter);
4947 }
4948
4949 /*
4950 * ULP RX interrupt handler.
4951 */
ulprx_intr_handler(struct adapter * adapter)4952 static void ulprx_intr_handler(struct adapter *adapter)
4953 {
4954 static const struct intr_info ulprx_intr_info[] = {
4955 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4956 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4957 { 0x7fffff, "ULPRX parity error", -1, 1 },
4958 { 0 }
4959 };
4960
4961 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4962 t4_fatal_err(adapter);
4963 }
4964
4965 /*
4966 * ULP TX interrupt handler.
4967 */
ulptx_intr_handler(struct adapter * adapter)4968 static void ulptx_intr_handler(struct adapter *adapter)
4969 {
4970 static const struct intr_info ulptx_intr_info[] = {
4971 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4972 0 },
4973 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4974 0 },
4975 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4976 0 },
4977 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4978 0 },
4979 { 0xfffffff, "ULPTX parity error", -1, 1 },
4980 { 0 }
4981 };
4982
4983 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4984 t4_fatal_err(adapter);
4985 }
4986
4987 /*
4988 * PM TX interrupt handler.
4989 */
pmtx_intr_handler(struct adapter * adapter)4990 static void pmtx_intr_handler(struct adapter *adapter)
4991 {
4992 static const struct intr_info pmtx_intr_info[] = {
4993 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4994 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4995 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4996 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4997 { 0xffffff0, "PMTX framing error", -1, 1 },
4998 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4999 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
5000 1 },
5001 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
5002 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
5003 { 0 }
5004 };
5005
5006 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
5007 t4_fatal_err(adapter);
5008 }
5009
5010 /*
5011 * PM RX interrupt handler.
5012 */
pmrx_intr_handler(struct adapter * adapter)5013 static void pmrx_intr_handler(struct adapter *adapter)
5014 {
5015 static const struct intr_info pmrx_intr_info[] = {
5016 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
5017 { 0x3ffff0, "PMRX framing error", -1, 1 },
5018 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
5019 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
5020 1 },
5021 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
5022 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
5023 { 0 }
5024 };
5025
5026 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
5027 t4_fatal_err(adapter);
5028 }
5029
5030 /*
5031 * CPL switch interrupt handler.
5032 */
cplsw_intr_handler(struct adapter * adapter)5033 static void cplsw_intr_handler(struct adapter *adapter)
5034 {
5035 static const struct intr_info cplsw_intr_info[] = {
5036 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
5037 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
5038 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
5039 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
5040 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
5041 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
5042 { 0 }
5043 };
5044
5045 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
5046 t4_fatal_err(adapter);
5047 }
5048
5049 /*
5050 * LE interrupt handler.
5051 */
le_intr_handler(struct adapter * adap)5052 static void le_intr_handler(struct adapter *adap)
5053 {
5054 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
5055 static const struct intr_info le_intr_info[] = {
5056 { F_LIPMISS, "LE LIP miss", -1, 0 },
5057 { F_LIP0, "LE 0 LIP error", -1, 0 },
5058 { F_PARITYERR, "LE parity error", -1, 1 },
5059 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
5060 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
5061 { 0 }
5062 };
5063
5064 static struct intr_info t6_le_intr_info[] = {
5065 /* log an error for HASHTBLMEMCRCERR and clear the bit */
5066 { F_T6_HASHTBLMEMCRCERR, "LE hash table mem crc error", -1, 0 },
5067 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
5068 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
5069 { F_TCAMINTPERR, "LE parity error", -1, 1 },
5070 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
5071 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
5072 { 0 }
5073 };
5074
5075 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
5076 (chip_ver <= CHELSIO_T5) ?
5077 le_intr_info : t6_le_intr_info))
5078 t4_fatal_err(adap);
5079 }
5080
5081 /*
5082 * MPS interrupt handler.
5083 */
mps_intr_handler(struct adapter * adapter)5084 static void mps_intr_handler(struct adapter *adapter)
5085 {
5086 static const struct intr_info mps_rx_intr_info[] = {
5087 { 0xffffff, "MPS Rx parity error", -1, 1 },
5088 { 0 }
5089 };
5090 static const struct intr_info mps_tx_intr_info[] = {
5091 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5092 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5093 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5094 -1, 1 },
5095 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5096 -1, 1 },
5097 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
5098 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5099 { F_FRMERR, "MPS Tx framing error", -1, 1 },
5100 { 0 }
5101 };
5102 static const struct intr_info t6_mps_tx_intr_info[] = {
5103 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5104 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5105 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5106 -1, 1 },
5107 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5108 -1, 1 },
5109 /* MPS Tx Bubble is normal for T6 */
5110 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5111 { F_FRMERR, "MPS Tx framing error", -1, 1 },
5112 { 0 }
5113 };
5114 static const struct intr_info mps_trc_intr_info[] = {
5115 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5116 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5117 1 },
5118 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5119 { 0 }
5120 };
5121 static const struct intr_info mps_stat_sram_intr_info[] = {
5122 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5123 { 0 }
5124 };
5125 static const struct intr_info mps_stat_tx_intr_info[] = {
5126 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5127 { 0 }
5128 };
5129 static const struct intr_info mps_stat_rx_intr_info[] = {
5130 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5131 { 0 }
5132 };
5133 static const struct intr_info mps_cls_intr_info[] = {
5134 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5135 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5136 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5137 { 0 }
5138 };
5139
5140 int fat;
5141
5142 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5143 mps_rx_intr_info) +
5144 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5145 is_t6(adapter->params.chip)
5146 ? t6_mps_tx_intr_info
5147 : mps_tx_intr_info) +
5148 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5149 mps_trc_intr_info) +
5150 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5151 mps_stat_sram_intr_info) +
5152 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5153 mps_stat_tx_intr_info) +
5154 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5155 mps_stat_rx_intr_info) +
5156 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5157 mps_cls_intr_info);
5158
5159 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5160 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
5161 if (fat)
5162 t4_fatal_err(adapter);
5163 }
5164
5165 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5166 F_ECC_UE_INT_CAUSE)
5167
5168 /*
5169 * EDC/MC interrupt handler.
5170 */
mem_intr_handler(struct adapter * adapter,int idx)5171 static void mem_intr_handler(struct adapter *adapter, int idx)
5172 {
5173 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5174
5175 unsigned int addr, cnt_addr, v;
5176
5177 if (idx <= MEM_EDC1) {
5178 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5179 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5180 } else if (idx == MEM_MC) {
5181 if (is_t4(adapter->params.chip)) {
5182 addr = A_MC_INT_CAUSE;
5183 cnt_addr = A_MC_ECC_STATUS;
5184 } else {
5185 addr = A_MC_P_INT_CAUSE;
5186 cnt_addr = A_MC_P_ECC_STATUS;
5187 }
5188 } else {
5189 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5190 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5191 }
5192
5193 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5194 if (v & F_PERR_INT_CAUSE)
5195 CH_ALERT(adapter, "%s FIFO parity error\n",
5196 name[idx]);
5197 if (v & F_ECC_CE_INT_CAUSE) {
5198 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5199
5200 if (idx <= MEM_EDC1)
5201 t4_edc_err_read(adapter, idx);
5202
5203 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5204 CH_WARN_RATELIMIT(adapter,
5205 "%u %s correctable ECC data error%s\n",
5206 cnt, name[idx], cnt > 1 ? "s" : "");
5207 }
5208 if (v & F_ECC_UE_INT_CAUSE)
5209 CH_ALERT(adapter,
5210 "%s uncorrectable ECC data error\n", name[idx]);
5211
5212 t4_write_reg(adapter, addr, v);
5213 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5214 t4_fatal_err(adapter);
5215 }
5216
5217 /*
5218 * MA interrupt handler.
5219 */
ma_intr_handler(struct adapter * adapter)5220 static void ma_intr_handler(struct adapter *adapter)
5221 {
5222 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5223
5224 if (status & F_MEM_PERR_INT_CAUSE) {
5225 CH_ALERT(adapter,
5226 "MA parity error, parity status %#x\n",
5227 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5228 if (is_t5(adapter->params.chip))
5229 CH_ALERT(adapter,
5230 "MA parity error, parity status %#x\n",
5231 t4_read_reg(adapter,
5232 A_MA_PARITY_ERROR_STATUS2));
5233 }
5234 if (status & F_MEM_WRAP_INT_CAUSE) {
5235 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5236 CH_ALERT(adapter, "MA address wrap-around error by "
5237 "client %u to address %#x\n",
5238 G_MEM_WRAP_CLIENT_NUM(v),
5239 G_MEM_WRAP_ADDRESS(v) << 4);
5240 }
5241 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5242 t4_fatal_err(adapter);
5243 }
5244
5245 /*
5246 * SMB interrupt handler.
5247 */
smb_intr_handler(struct adapter * adap)5248 static void smb_intr_handler(struct adapter *adap)
5249 {
5250 static const struct intr_info smb_intr_info[] = {
5251 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5252 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5253 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5254 { 0 }
5255 };
5256
5257 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5258 t4_fatal_err(adap);
5259 }
5260
5261 /*
5262 * NC-SI interrupt handler.
5263 */
ncsi_intr_handler(struct adapter * adap)5264 static void ncsi_intr_handler(struct adapter *adap)
5265 {
5266 static const struct intr_info ncsi_intr_info[] = {
5267 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5268 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5269 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5270 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5271 { 0 }
5272 };
5273
5274 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5275 t4_fatal_err(adap);
5276 }
5277
5278 /*
5279 * XGMAC interrupt handler.
5280 */
xgmac_intr_handler(struct adapter * adap,int port)5281 static void xgmac_intr_handler(struct adapter *adap, int port)
5282 {
5283 u32 v, int_cause_reg;
5284
5285 if (is_t4(adap->params.chip))
5286 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5287 else
5288 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5289
5290 v = t4_read_reg(adap, int_cause_reg);
5291
5292 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5293 if (!v)
5294 return;
5295
5296 if (v & F_TXFIFO_PRTY_ERR)
5297 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5298 port);
5299 if (v & F_RXFIFO_PRTY_ERR)
5300 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5301 port);
5302 t4_write_reg(adap, int_cause_reg, v);
5303 t4_fatal_err(adap);
5304 }
5305
5306 /*
5307 * PL Parity Error interrupt handler.
5308 */
pl_perr_intr_handler(struct adapter * adap)5309 static void pl_perr_intr_handler(struct adapter *adap)
5310 {
5311 static const struct intr_info pl_perr_info[] = {
5312 { F_UART, "UART Parity Error", -1, },
5313 { F_ULP_TX, "ULP TX Parity Error", -1 },
5314 { F_SGE, "SGE Parity Error", -1 },
5315 { F_HMA, "HMA Parity Error", -1 },
5316 { F_CPL_SWITCH, "CPL Switch Parity Error", -1 },
5317 { F_ULP_RX, "ULP RX Parity Error", -1 },
5318 { F_PM_RX, "PM RX Parity Error", -1 },
5319 { F_PM_TX, "PM TX Parity Error", -1 },
5320 { F_MA, "MA Parity Error", -1 },
5321 { F_TP, "TP Parity Error", -1 },
5322 { F_LE, "LE Parity Error", -1 },
5323 { F_EDC1, "EDC1 Parity Error", -1 },
5324 { F_EDC0, "EDC0 Parity Error", -1 },
5325 { F_MC, "MC Parity Error", -1 },
5326 { F_PCIE, "PCIE Parity Error", -1 },
5327 { F_PMU, "PMU Parity Error", -1 },
5328 { F_XGMAC_KR1, "XGMAC_KR1 Parity Error", -1 },
5329 { F_XGMAC_KR0, "XGMAC_KR0 Parity Error", -1 },
5330 { F_XGMAC1, "XGMAC1 Parity Error", -1 },
5331 { F_XGMAC0, "XGMAC0 Parity Error", -1 },
5332 { F_SMB, "SMB Parity Error", -1 },
5333 { F_SF, "SF Parity Error", -1 },
5334 { F_PL, "PL Parity Error", -1 },
5335 { F_NCSI, "NCSI Parity Error", -1 },
5336 { F_MPS, "MPS Parity Error", -1 },
5337 { F_MI, "MI Parity Error", -1 },
5338 { F_DBG, "DBG Parity Error", -1 },
5339 { F_I2CM, "I2CM Parity Error", -1 },
5340 { F_CIM, "CIM Parity Error", -1 },
5341 };
5342
5343 t4_handle_intr_status(adap, A_PL_PERR_CAUSE, pl_perr_info);
5344 /* pl_intr_handler() will do the t4_fatal_err(adap) */
5345 }
5346
5347 /*
5348 * PL interrupt handler.
5349 */
pl_intr_handler(struct adapter * adap)5350 static void pl_intr_handler(struct adapter *adap)
5351 {
5352 static const struct intr_info pl_intr_info[] = {
5353 { F_FATALPERR, "Fatal parity error", -1, 1,
5354 pl_perr_intr_handler },
5355 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5356 { 0 }
5357 };
5358
5359 static struct intr_info t5_pl_intr_info[] = {
5360 { F_FATALPERR, "Fatal parity error", -1, 1,
5361 pl_perr_intr_handler },
5362 { 0 }
5363 };
5364
5365 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5366 is_t4(adap->params.chip) ?
5367 pl_intr_info : t5_pl_intr_info))
5368 t4_fatal_err(adap);
5369 }
5370
5371 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5372
5373 /**
5374 * t4_slow_intr_handler - control path interrupt handler
5375 * @adapter: the adapter
5376 *
5377 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5378 * The designation 'slow' is because it involves register reads, while
5379 * data interrupts typically don't involve any MMIOs.
5380 */
t4_slow_intr_handler(struct adapter * adapter)5381 int t4_slow_intr_handler(struct adapter *adapter)
5382 {
5383 /* There are rare cases where a PL_INT_CAUSE bit may end up getting
5384 * set when the corresponding PL_INT_ENABLE bit isn't set. It's
5385 * easiest just to mask that case here.
5386 */
5387 u32 raw_cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5388 u32 enable = t4_read_reg(adapter, A_PL_INT_ENABLE);
5389 u32 cause = raw_cause & enable;
5390
5391 if (!(cause & GLBL_INTR_MASK))
5392 return 0;
5393
5394 /* Disable all the interrupt(bits) in PL_INT_ENABLE */
5395 t4_write_reg(adapter, A_PL_INT_ENABLE, 0);
5396 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5397
5398 if (cause & F_CIM)
5399 cim_intr_handler(adapter);
5400 if (cause & F_MPS)
5401 mps_intr_handler(adapter);
5402 if (cause & F_NCSI)
5403 ncsi_intr_handler(adapter);
5404 if (cause & F_PL)
5405 pl_intr_handler(adapter);
5406 if (cause & F_SMB)
5407 smb_intr_handler(adapter);
5408 if (cause & F_MAC0)
5409 xgmac_intr_handler(adapter, 0);
5410 if (cause & F_MAC1)
5411 xgmac_intr_handler(adapter, 1);
5412 if (cause & F_MAC2)
5413 xgmac_intr_handler(adapter, 2);
5414 if (cause & F_MAC3)
5415 xgmac_intr_handler(adapter, 3);
5416 if (cause & F_PCIE)
5417 pcie_intr_handler(adapter);
5418 if (cause & F_MC0)
5419 mem_intr_handler(adapter, MEM_MC);
5420 if (is_t5(adapter->params.chip) && (cause & F_MC1))
5421 mem_intr_handler(adapter, MEM_MC1);
5422 if (cause & F_EDC0)
5423 mem_intr_handler(adapter, MEM_EDC0);
5424 if (cause & F_EDC1)
5425 mem_intr_handler(adapter, MEM_EDC1);
5426 if (cause & F_LE)
5427 le_intr_handler(adapter);
5428 if (cause & F_TP)
5429 tp_intr_handler(adapter);
5430 if (cause & F_MA)
5431 ma_intr_handler(adapter);
5432 if (cause & F_PM_TX)
5433 pmtx_intr_handler(adapter);
5434 if (cause & F_PM_RX)
5435 pmrx_intr_handler(adapter);
5436 if (cause & F_ULP_RX)
5437 ulprx_intr_handler(adapter);
5438 if (cause & F_CPL_SWITCH)
5439 cplsw_intr_handler(adapter);
5440 if (cause & F_SGE)
5441 sge_intr_handler(adapter);
5442 if (cause & F_ULP_TX)
5443 ulptx_intr_handler(adapter);
5444
5445 /* Clear the interrupts just processed for which we are the master. */
5446 t4_write_reg(adapter, A_PL_INT_CAUSE, raw_cause & GLBL_INTR_MASK);
5447
5448 /* re-enable the interrupts (bits that were disabled
5449 * earlier in PL_INT_ENABLE)
5450 */
5451 t4_write_reg(adapter, A_PL_INT_ENABLE, enable);
5452 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5453 return 1;
5454 }
5455
5456 /**
5457 * t4_intr_enable - enable interrupts
5458 * @adapter: the adapter whose interrupts should be enabled
5459 *
5460 * Enable PF-specific interrupts for the calling function and the top-level
5461 * interrupt concentrator for global interrupts. Interrupts are already
5462 * enabled at each module, here we just enable the roots of the interrupt
5463 * hierarchies.
5464 *
5465 * Note: this function should be called only when the driver manages
5466 * non PF-specific interrupts from the various HW modules. Only one PCI
5467 * function at a time should be doing this.
5468 */
t4_intr_enable(struct adapter * adapter)5469 void t4_intr_enable(struct adapter *adapter)
5470 {
5471 u32 val = 0;
5472 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5473 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5474 ? G_SOURCEPF(whoami)
5475 : G_T6_SOURCEPF(whoami));
5476
5477 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5478 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5479 else
5480 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5481 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5482 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5483 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5484 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5485 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5486 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5487 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5488 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5489 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5490 }
5491
5492 /**
5493 * t4_intr_disable - disable interrupts
5494 * @adapter: the adapter whose interrupts should be disabled
5495 *
5496 * Disable interrupts. We only disable the top-level interrupt
5497 * concentrators. The caller must be a PCI function managing global
5498 * interrupts.
5499 */
t4_intr_disable(struct adapter * adapter)5500 void t4_intr_disable(struct adapter *adapter)
5501 {
5502 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5503 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5504 ? G_SOURCEPF(whoami)
5505 : G_T6_SOURCEPF(whoami));
5506
5507 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5508 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5509 }
5510
t4_chip_rss_size(struct adapter * adap)5511 unsigned int t4_chip_rss_size(struct adapter *adap)
5512 {
5513 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5514 return RSS_NENTRIES;
5515 else
5516 return T6_RSS_NENTRIES;
5517 }
5518
5519 /**
5520 * t4_config_rss_range - configure a portion of the RSS mapping table
5521 * @adapter: the adapter
5522 * @mbox: mbox to use for the FW command
5523 * @viid: virtual interface whose RSS subtable is to be written
5524 * @start: start entry in the table to write
5525 * @n: how many table entries to write
5526 * @rspq: values for the "response queue" (Ingress Queue) lookup table
5527 * @nrspq: number of values in @rspq
5528 *
5529 * Programs the selected part of the VI's RSS mapping table with the
5530 * provided values. If @nrspq < @n the supplied values are used repeatedly
5531 * until the full table range is populated.
5532 *
5533 * The caller must ensure the values in @rspq are in the range allowed for
5534 * @viid.
5535 */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)5536 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5537 int start, int n, const u16 *rspq, unsigned int nrspq)
5538 {
5539 int ret;
5540 const u16 *rsp = rspq;
5541 const u16 *rsp_end = rspq + nrspq;
5542 struct fw_rss_ind_tbl_cmd cmd;
5543
5544 memset(&cmd, 0, sizeof(cmd));
5545 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5546 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5547 V_FW_RSS_IND_TBL_CMD_VIID(viid));
5548 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5549
5550 /* Each firmware RSS command can accommodate up to 32 RSS Ingress
5551 * Queue Identifiers. These Ingress Queue IDs are packed three to
5552 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5553 * reserved.
5554 */
5555 while (n > 0) {
5556 int nq = min(n, 32);
5557 int nq_packed = 0;
5558 __be32 *qp = &cmd.iq0_to_iq2;
5559
5560 /* Set up the firmware RSS command header to send the next
5561 * "nq" Ingress Queue IDs to the firmware.
5562 */
5563 cmd.niqid = cpu_to_be16(nq);
5564 cmd.startidx = cpu_to_be16(start);
5565
5566 /* "nq" more done for the start of the next loop.
5567 */
5568 start += nq;
5569 n -= nq;
5570
5571 /* While there are still Ingress Queue IDs to stuff into the
5572 * current firmware RSS command, retrieve them from the
5573 * Ingress Queue ID array and insert them into the command.
5574 */
5575 while (nq > 0) {
5576 /* Grab up to the next 3 Ingress Queue IDs (wrapping
5577 * around the Ingress Queue ID array if necessary) and
5578 * insert them into the firmware RSS command at the
5579 * current 3-tuple position within the commad.
5580 */
5581 u16 qbuf[3];
5582 u16 *qbp = qbuf;
5583 int nqbuf = min(3, nq);
5584
5585 nq -= nqbuf;
5586 qbuf[0] = qbuf[1] = qbuf[2] = 0;
5587 while (nqbuf && nq_packed < 32) {
5588 nqbuf--;
5589 nq_packed++;
5590 *qbp++ = *rsp++;
5591 if (rsp >= rsp_end)
5592 rsp = rspq;
5593 }
5594 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5595 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5596 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5597 }
5598
5599 /* Send this portion of the RRS table update to the firmware;
5600 * bail out on any errors.
5601 */
5602 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5603 if (ret)
5604 return ret;
5605 }
5606 return 0;
5607 }
5608
5609 /**
5610 * t4_config_glbl_rss - configure the global RSS mode
5611 * @adapter: the adapter
5612 * @mbox: mbox to use for the FW command
5613 * @mode: global RSS mode
5614 * @flags: mode-specific flags
5615 *
5616 * Sets the global RSS mode.
5617 */
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)5618 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5619 unsigned int flags)
5620 {
5621 struct fw_rss_glb_config_cmd c;
5622
5623 memset(&c, 0, sizeof(c));
5624 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5625 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5626 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5627 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5628 c.u.manual.mode_pkd =
5629 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5630 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5631 c.u.basicvirtual.mode_keymode =
5632 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5633 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5634 } else
5635 return -EINVAL;
5636 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5637 }
5638
5639 /**
5640 * t4_config_vi_rss - configure per VI RSS settings
5641 * @adapter: the adapter
5642 * @mbox: mbox to use for the FW command
5643 * @viid: the VI id
5644 * @flags: RSS flags
5645 * @defq: id of the default RSS queue for the VI.
5646 * @skeyidx: RSS secret key table index for non-global mode
5647 * @skey: RSS vf_scramble key for VI.
5648 *
5649 * Configures VI-specific RSS properties.
5650 */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq,unsigned int skeyidx,unsigned int skey)5651 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5652 unsigned int flags, unsigned int defq, unsigned int skeyidx,
5653 unsigned int skey)
5654 {
5655 struct fw_rss_vi_config_cmd c;
5656
5657 memset(&c, 0, sizeof(c));
5658 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5659 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5660 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5661 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5662 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5663 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5664 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5665 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5666 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5667
5668 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5669 }
5670
5671 /* Read an RSS table row */
rd_rss_row(struct adapter * adap,int row,u32 * val)5672 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5673 {
5674 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5675 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5676 5, 0, val);
5677 }
5678
5679 /**
5680 * t4_read_rss - read the contents of the RSS mapping table
5681 * @adapter: the adapter
5682 * @map: holds the contents of the RSS mapping table
5683 *
5684 * Reads the contents of the RSS hash->queue mapping table.
5685 */
t4_read_rss(struct adapter * adapter,u16 * map)5686 int t4_read_rss(struct adapter *adapter, u16 *map)
5687 {
5688 u32 val;
5689 int i, ret, nentries;
5690
5691 nentries = t4_chip_rss_size(adapter);
5692 for (i = 0; i < nentries / 2; ++i) {
5693 ret = rd_rss_row(adapter, i, &val);
5694 if (ret)
5695 return ret;
5696 *map++ = G_LKPTBLQUEUE0(val);
5697 *map++ = G_LKPTBLQUEUE1(val);
5698 }
5699 return 0;
5700 }
5701
5702 /**
5703 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5704 * @adap: the adapter
5705 * @cmd: TP fw ldst address space type
5706 * @vals: where the indirect register values are stored/written
5707 * @nregs: how many indirect registers to read/write
5708 * @start_idx: index of first indirect register to read/write
5709 * @rw: Read (1) or Write (0)
5710 * @sleep_ok: if true we may sleep while awaiting command completion
5711 *
5712 * Access TP indirect registers through LDST
5713 **/
t4_tp_fw_ldst_rw(struct adapter * adap,int cmd,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw,bool sleep_ok)5714 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5715 unsigned int nregs, unsigned int start_index,
5716 unsigned int rw, bool sleep_ok)
5717 {
5718 int ret = 0;
5719 unsigned int i;
5720 struct fw_ldst_cmd c;
5721
5722 for (i = 0; i < nregs; i++) {
5723 memset(&c, 0, sizeof(c));
5724 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5725 F_FW_CMD_REQUEST |
5726 (rw ? F_FW_CMD_READ :
5727 F_FW_CMD_WRITE) |
5728 V_FW_LDST_CMD_ADDRSPACE(cmd));
5729 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5730
5731 c.u.addrval.addr = cpu_to_be32(start_index + i);
5732 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5733 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5734 sleep_ok);
5735 if (ret)
5736 return ret;
5737
5738 if (rw)
5739 vals[i] = be32_to_cpu(c.u.addrval.val);
5740 }
5741 return 0;
5742 }
5743
5744 /**
5745 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5746 * @adap: the adapter
5747 * @reg_addr: Address Register
5748 * @reg_data: Data register
5749 * @buff: where the indirect register values are stored/written
5750 * @nregs: how many indirect registers to read/write
5751 * @start_index: index of first indirect register to read/write
5752 * @rw: READ(1) or WRITE(0)
5753 * @sleep_ok: if true we may sleep while awaiting command completion
5754 *
5755 * Read/Write TP indirect registers through LDST if possible.
5756 * Else, use backdoor access
5757 **/
t4_tp_indirect_rw(struct adapter * adap,u32 reg_addr,u32 reg_data,u32 * buff,u32 nregs,u32 start_index,int rw,bool sleep_ok)5758 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5759 u32 *buff, u32 nregs, u32 start_index, int rw,
5760 bool sleep_ok)
5761 {
5762 int rc = -EINVAL;
5763 int cmd;
5764
5765 switch (reg_addr) {
5766 case A_TP_PIO_ADDR:
5767 cmd = FW_LDST_ADDRSPC_TP_PIO;
5768 break;
5769 case A_TP_TM_PIO_ADDR:
5770 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5771 break;
5772 case A_TP_MIB_INDEX:
5773 cmd = FW_LDST_ADDRSPC_TP_MIB;
5774 break;
5775 default:
5776 goto indirect_access;
5777 }
5778
5779 if (t4_use_ldst(adap))
5780 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5781 sleep_ok);
5782
5783 indirect_access:
5784
5785 if (rc) {
5786 if (rw)
5787 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5788 start_index);
5789 else
5790 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5791 start_index);
5792 }
5793 }
5794
5795 /**
5796 * t4_tp_pio_read - Read TP PIO registers
5797 * @adap: the adapter
5798 * @buff: where the indirect register values are written
5799 * @nregs: how many indirect registers to read
5800 * @start_index: index of first indirect register to read
5801 * @sleep_ok: if true we may sleep while awaiting command completion
5802 *
5803 * Read TP PIO Registers
5804 **/
t4_tp_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5805 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5806 u32 start_index, bool sleep_ok)
5807 {
5808 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5809 start_index, 1, sleep_ok);
5810 }
5811
5812 /**
5813 * t4_tp_pio_write - Write TP PIO registers
5814 * @adap: the adapter
5815 * @buff: where the indirect register values are stored
5816 * @nregs: how many indirect registers to write
5817 * @start_index: index of first indirect register to write
5818 * @sleep_ok: if true we may sleep while awaiting command completion
5819 *
5820 * Write TP PIO Registers
5821 **/
t4_tp_pio_write(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5822 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5823 u32 start_index, bool sleep_ok)
5824 {
5825 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5826 start_index, 0, sleep_ok);
5827 }
5828
5829 /**
5830 * t4_tp_tm_pio_read - Read TP TM PIO registers
5831 * @adap: the adapter
5832 * @buff: where the indirect register values are written
5833 * @nregs: how many indirect registers to read
5834 * @start_index: index of first indirect register to read
5835 * @sleep_ok: if true we may sleep while awaiting command completion
5836 *
5837 * Read TP TM PIO Registers
5838 **/
t4_tp_tm_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5839 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5840 u32 start_index, bool sleep_ok)
5841 {
5842 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5843 nregs, start_index, 1, sleep_ok);
5844 }
5845
5846 /**
5847 * t4_tp_mib_read - Read TP MIB registers
5848 * @adap: the adapter
5849 * @buff: where the indirect register values are written
5850 * @nregs: how many indirect registers to read
5851 * @start_index: index of first indirect register to read
5852 * @sleep_ok: if true we may sleep while awaiting command completion
5853 *
5854 * Read TP MIB Registers
5855 **/
t4_tp_mib_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5856 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5857 bool sleep_ok)
5858 {
5859 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5860 start_index, 1, sleep_ok);
5861 }
5862
5863 /**
5864 * t4_read_rss_key - read the global RSS key
5865 * @adap: the adapter
5866 * @key: 10-entry array holding the 320-bit RSS key
5867 * @sleep_ok: if true we may sleep while awaiting command completion
5868 *
5869 * Reads the global 320-bit RSS key.
5870 */
t4_read_rss_key(struct adapter * adap,u32 * key,bool sleep_ok)5871 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5872 {
5873 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5874 }
5875
5876 /**
5877 * t4_write_rss_key - program one of the RSS keys
5878 * @adap: the adapter
5879 * @key: 10-entry array holding the 320-bit RSS key
5880 * @idx: which RSS key to write
5881 * @sleep_ok: if true we may sleep while awaiting command completion
5882 *
5883 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5884 * 0..15 the corresponding entry in the RSS key table is written,
5885 * otherwise the global RSS key is written.
5886 */
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx,bool sleep_ok)5887 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5888 bool sleep_ok)
5889 {
5890 u8 rss_key_addr_cnt = 16;
5891 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5892
5893 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5894 * allows access to key addresses 16-63 by using KeyWrAddrX
5895 * as index[5:4](upper 2) into key table
5896 */
5897 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5898 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5899 rss_key_addr_cnt = 32;
5900
5901 t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5902
5903 if (idx >= 0 && idx < rss_key_addr_cnt) {
5904 if (rss_key_addr_cnt > 16)
5905 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5906 vrt | V_KEYWRADDRX(idx >> 4) |
5907 V_T6_VFWRADDR(idx) | F_KEYWREN);
5908 else
5909 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5910 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5911 }
5912 }
5913
5914 /**
5915 * t4_read_rss_pf_config - read PF RSS Configuration Table
5916 * @adapter: the adapter
5917 * @index: the entry in the PF RSS table to read
5918 * @valp: where to store the returned value
5919 * @sleep_ok: if true we may sleep while awaiting command completion
5920 *
5921 * Reads the PF RSS Configuration Table at the specified index and returns
5922 * the value found there.
5923 */
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp,bool sleep_ok)5924 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5925 u32 *valp, bool sleep_ok)
5926 {
5927 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5928 }
5929
5930 /**
5931 * t4_write_rss_pf_config - write PF RSS Configuration Table
5932 * @adapter: the adapter
5933 * @index: the entry in the VF RSS table to read
5934 * @val: the value to store
5935 * @sleep_ok: if true we may sleep while awaiting command completion
5936 *
5937 * Writes the PF RSS Configuration Table at the specified index with the
5938 * specified value.
5939 */
t4_write_rss_pf_config(struct adapter * adapter,unsigned int index,u32 val,bool sleep_ok)5940 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5941 u32 val, bool sleep_ok)
5942 {
5943 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5944 sleep_ok);
5945 }
5946
5947 /**
5948 * t4_read_rss_vf_config - read VF RSS Configuration Table
5949 * @adapter: the adapter
5950 * @index: the entry in the VF RSS table to read
5951 * @vfl: where to store the returned VFL
5952 * @vfh: where to store the returned VFH
5953 * @sleep_ok: if true we may sleep while awaiting command completion
5954 *
5955 * Reads the VF RSS Configuration Table at the specified index and returns
5956 * the (VFL, VFH) values found there.
5957 */
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh,bool sleep_ok)5958 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5959 u32 *vfl, u32 *vfh, bool sleep_ok)
5960 {
5961 u32 vrt, mask, data;
5962
5963 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5964 mask = V_VFWRADDR(M_VFWRADDR);
5965 data = V_VFWRADDR(index);
5966 } else {
5967 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5968 data = V_T6_VFWRADDR(index);
5969 }
5970 /*
5971 * Request that the index'th VF Table values be read into VFL/VFH.
5972 */
5973 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5974 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5975 vrt |= data | F_VFRDEN;
5976 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5977
5978 /*
5979 * Grab the VFL/VFH values ...
5980 */
5981 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5982 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5983 }
5984
5985 /**
5986 * t4_read_rss_pf_map - read PF RSS Map
5987 * @adapter: the adapter
5988 * @sleep_ok: if true we may sleep while awaiting command completion
5989 *
5990 * Reads the PF RSS Map register and returns its value.
5991 */
t4_read_rss_pf_map(struct adapter * adapter,bool sleep_ok)5992 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5993 {
5994 u32 pfmap;
5995
5996 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5997
5998 return pfmap;
5999 }
6000
6001 /**
6002 * t4_read_rss_pf_mask - read PF RSS Mask
6003 * @adapter: the adapter
6004 * @sleep_ok: if true we may sleep while awaiting command completion
6005 *
6006 * Reads the PF RSS Mask register and returns its value.
6007 */
t4_read_rss_pf_mask(struct adapter * adapter,bool sleep_ok)6008 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
6009 {
6010 u32 pfmask;
6011
6012 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6013
6014 return pfmask;
6015 }
6016
6017 /**
6018 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
6019 * @adap: the adapter
6020 * @v4: holds the TCP/IP counter values
6021 * @v6: holds the TCP/IPv6 counter values
6022 * @sleep_ok: if true we may sleep while awaiting command completion
6023 *
6024 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6025 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6026 */
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6,bool sleep_ok)6027 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6028 struct tp_tcp_stats *v6, bool sleep_ok)
6029 {
6030 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6031
6032 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6033 #define STAT(x) val[STAT_IDX(x)]
6034 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6035
6036 if (v4) {
6037 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6038 A_TP_MIB_TCP_OUT_RST, sleep_ok);
6039 v4->tcp_out_rsts = STAT(OUT_RST);
6040 v4->tcp_in_segs = STAT64(IN_SEG);
6041 v4->tcp_out_segs = STAT64(OUT_SEG);
6042 v4->tcp_retrans_segs = STAT64(RXT_SEG);
6043 }
6044 if (v6) {
6045 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6046 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6047 v6->tcp_out_rsts = STAT(OUT_RST);
6048 v6->tcp_in_segs = STAT64(IN_SEG);
6049 v6->tcp_out_segs = STAT64(OUT_SEG);
6050 v6->tcp_retrans_segs = STAT64(RXT_SEG);
6051 }
6052 #undef STAT64
6053 #undef STAT
6054 #undef STAT_IDX
6055 }
6056
6057 /**
6058 * t4_tp_get_err_stats - read TP's error MIB counters
6059 * @adap: the adapter
6060 * @st: holds the counter values
6061 * @sleep_ok: if true we may sleep while awaiting command completion
6062 *
6063 * Returns the values of TP's error counters.
6064 */
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st,bool sleep_ok)6065 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6066 bool sleep_ok)
6067 {
6068 int nchan = adap->params.arch.nchan;
6069
6070 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6071 sleep_ok);
6072
6073 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6074 sleep_ok);
6075
6076 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6077 sleep_ok);
6078
6079 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6080 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6081
6082 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6083 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6084
6085 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6086 sleep_ok);
6087
6088 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6089 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6090
6091 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6092 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6093
6094 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6095 sleep_ok);
6096 }
6097
6098 /**
6099 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
6100 * @adap: the adapter
6101 * @st: holds the counter values
6102 * @sleep_ok: if true we may sleep while awaiting command completion
6103 *
6104 * Returns the values of TP's CPL counters.
6105 */
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st,bool sleep_ok)6106 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6107 bool sleep_ok)
6108 {
6109 int nchan = adap->params.arch.nchan;
6110
6111 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6112
6113 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6114 }
6115
6116 /**
6117 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6118 * @adap: the adapter
6119 * @st: holds the counter values
6120 *
6121 * Returns the values of TP's RDMA counters.
6122 */
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st,bool sleep_ok)6123 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6124 bool sleep_ok)
6125 {
6126 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6127 sleep_ok);
6128 }
6129
6130 /**
6131 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6132 * @adap: the adapter
6133 * @idx: the port index
6134 * @st: holds the counter values
6135 * @sleep_ok: if true we may sleep while awaiting command completion
6136 *
6137 * Returns the values of TP's FCoE counters for the selected port.
6138 */
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st,bool sleep_ok)6139 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6140 struct tp_fcoe_stats *st, bool sleep_ok)
6141 {
6142 u32 val[2];
6143
6144 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6145 sleep_ok);
6146
6147 t4_tp_mib_read(adap, &st->frames_drop, 1,
6148 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6149
6150 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6151 sleep_ok);
6152
6153 st->octets_ddp = ((u64)val[0] << 32) | val[1];
6154 }
6155
6156 /**
6157 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6158 * @adap: the adapter
6159 * @st: holds the counter values
6160 * @sleep_ok: if true we may sleep while awaiting command completion
6161 *
6162 * Returns the values of TP's counters for non-TCP directly-placed packets.
6163 */
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st,bool sleep_ok)6164 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6165 bool sleep_ok)
6166 {
6167 u32 val[4];
6168
6169 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6170
6171 st->frames = val[0];
6172 st->drops = val[1];
6173 st->octets = ((u64)val[2] << 32) | val[3];
6174 }
6175
6176 /**
6177 * t4_read_mtu_tbl - returns the values in the HW path MTU table
6178 * @adap: the adapter
6179 * @mtus: where to store the MTU values
6180 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
6181 *
6182 * Reads the HW path MTU table.
6183 */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)6184 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6185 {
6186 u32 v;
6187 int i;
6188
6189 for (i = 0; i < NMTUS; ++i) {
6190 t4_write_reg(adap, A_TP_MTU_TABLE,
6191 V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6192 v = t4_read_reg(adap, A_TP_MTU_TABLE);
6193 mtus[i] = G_MTUVALUE(v);
6194 if (mtu_log)
6195 mtu_log[i] = G_MTUWIDTH(v);
6196 }
6197 }
6198
6199 /**
6200 * t4_read_cong_tbl - reads the congestion control table
6201 * @adap: the adapter
6202 * @incr: where to store the alpha values
6203 *
6204 * Reads the additive increments programmed into the HW congestion
6205 * control table.
6206 */
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])6207 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6208 {
6209 unsigned int mtu, w;
6210
6211 for (mtu = 0; mtu < NMTUS; ++mtu)
6212 for (w = 0; w < NCCTRL_WIN; ++w) {
6213 t4_write_reg(adap, A_TP_CCTRL_TABLE,
6214 V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6215 incr[mtu][w] = (u16)t4_read_reg(adap,
6216 A_TP_CCTRL_TABLE) & 0x1fff;
6217 }
6218 }
6219
6220 /**
6221 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6222 * @adap: the adapter
6223 * @addr: the indirect TP register address
6224 * @mask: specifies the field within the register to modify
6225 * @val: new value for the field
6226 *
6227 * Sets a field of an indirect TP register to the given value.
6228 */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)6229 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6230 unsigned int mask, unsigned int val)
6231 {
6232 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6233 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6234 t4_write_reg(adap, A_TP_PIO_DATA, val);
6235 }
6236
6237 /**
6238 * init_cong_ctrl - initialize congestion control parameters
6239 * @a: the alpha values for congestion control
6240 * @b: the beta values for congestion control
6241 *
6242 * Initialize the congestion control parameters.
6243 */
init_cong_ctrl(unsigned short * a,unsigned short * b)6244 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6245 {
6246 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6247 a[9] = 2;
6248 a[10] = 3;
6249 a[11] = 4;
6250 a[12] = 5;
6251 a[13] = 6;
6252 a[14] = 7;
6253 a[15] = 8;
6254 a[16] = 9;
6255 a[17] = 10;
6256 a[18] = 14;
6257 a[19] = 17;
6258 a[20] = 21;
6259 a[21] = 25;
6260 a[22] = 30;
6261 a[23] = 35;
6262 a[24] = 45;
6263 a[25] = 60;
6264 a[26] = 80;
6265 a[27] = 100;
6266 a[28] = 200;
6267 a[29] = 300;
6268 a[30] = 400;
6269 a[31] = 500;
6270
6271 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6272 b[9] = b[10] = 1;
6273 b[11] = b[12] = 2;
6274 b[13] = b[14] = b[15] = b[16] = 3;
6275 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6276 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6277 b[28] = b[29] = 6;
6278 b[30] = b[31] = 7;
6279 }
6280
6281 /* The minimum additive increment value for the congestion control table */
6282 #define CC_MIN_INCR 2U
6283
6284 /**
6285 * t4_load_mtus - write the MTU and congestion control HW tables
6286 * @adap: the adapter
6287 * @mtus: the values for the MTU table
6288 * @alpha: the values for the congestion control alpha parameter
6289 * @beta: the values for the congestion control beta parameter
6290 *
6291 * Write the HW MTU table with the supplied MTUs and the high-speed
6292 * congestion control table with the supplied alpha, beta, and MTUs.
6293 * We write the two tables together because the additive increments
6294 * depend on the MTUs.
6295 */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)6296 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6297 const unsigned short *alpha, const unsigned short *beta)
6298 {
6299 static const unsigned int avg_pkts[NCCTRL_WIN] = {
6300 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6301 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6302 28672, 40960, 57344, 81920, 114688, 163840, 229376
6303 };
6304
6305 unsigned int i, w;
6306
6307 for (i = 0; i < NMTUS; ++i) {
6308 unsigned int mtu = mtus[i];
6309 unsigned int log2 = fls(mtu);
6310
6311 if (!(mtu & ((1 << log2) >> 2))) /* round */
6312 log2--;
6313 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6314 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6315
6316 for (w = 0; w < NCCTRL_WIN; ++w) {
6317 unsigned int inc;
6318
6319 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6320 CC_MIN_INCR);
6321
6322 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6323 (w << 16) | (beta[w] << 13) | inc);
6324 }
6325 }
6326 }
6327
6328 /*
6329 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6330 * clocks. The formula is
6331 *
6332 * bytes/s = bytes256 * 256 * ClkFreq / 4096
6333 *
6334 * which is equivalent to
6335 *
6336 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6337 */
chan_rate(struct adapter * adap,unsigned int bytes256)6338 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6339 {
6340 u64 v = bytes256 * adap->params.vpd.cclk;
6341
6342 return v * 62 + v / 2;
6343 }
6344
6345 /**
6346 * t4_get_chan_txrate - get the current per channel Tx rates
6347 * @adap: the adapter
6348 * @nic_rate: rates for NIC traffic
6349 * @ofld_rate: rates for offloaded traffic
6350 *
6351 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
6352 * for each channel.
6353 */
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)6354 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6355 {
6356 u32 v;
6357
6358 v = t4_read_reg(adap, A_TP_TX_TRATE);
6359 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6360 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6361 if (adap->params.arch.nchan == NCHAN) {
6362 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6363 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6364 }
6365
6366 v = t4_read_reg(adap, A_TP_TX_ORATE);
6367 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6368 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6369 if (adap->params.arch.nchan == NCHAN) {
6370 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6371 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6372 }
6373 }
6374
6375 /**
6376 * t4_set_trace_filter - configure one of the tracing filters
6377 * @adap: the adapter
6378 * @tp: the desired trace filter parameters
6379 * @idx: which filter to configure
6380 * @enable: whether to enable or disable the filter
6381 *
6382 * Configures one of the tracing filters available in HW. If @enable is
6383 * %0 @tp is not examined and may be %NULL. The user is responsible to
6384 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6385 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6386 * docs/readme.txt for a complete description of how to setup traceing on
6387 * T4.
6388 */
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)6389 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6390 int enable)
6391 {
6392 int i, ofst = idx * 4;
6393 u32 data_reg, mask_reg, cfg;
6394
6395 if (!enable) {
6396 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6397 return 0;
6398 }
6399
6400 /*
6401 * TODO - After T4 data book is updated, specify the exact
6402 * section below.
6403 *
6404 * See T4 data book - MPS section for a complete description
6405 * of the below if..else handling of A_MPS_TRC_CFG register
6406 * value.
6407 */
6408 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6409 if (cfg & F_TRCMULTIFILTER) {
6410 /*
6411 * If multiple tracers are enabled, then maximum
6412 * capture size is 2.5KB (FIFO size of a single channel)
6413 * minus 2 flits for CPL_TRACE_PKT header.
6414 */
6415 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6416 return -EINVAL;
6417 }
6418 else {
6419 /*
6420 * If multiple tracers are disabled, to avoid deadlocks
6421 * maximum packet capture size of 9600 bytes is recommended.
6422 * Also in this mode, only trace0 can be enabled and running.
6423 */
6424 if (tp->snap_len > 9600 || idx)
6425 return -EINVAL;
6426 }
6427
6428 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6429 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6430 tp->min_len > M_TFMINPKTSIZE)
6431 return -EINVAL;
6432
6433 /* stop the tracer we'll be changing */
6434 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6435
6436 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6437 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6438 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6439
6440 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6441 t4_write_reg(adap, data_reg, tp->data[i]);
6442 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6443 }
6444 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6445 V_TFCAPTUREMAX(tp->snap_len) |
6446 V_TFMINPKTSIZE(tp->min_len));
6447 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6448 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6449 (is_t4(adap->params.chip) ?
6450 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6451 V_T5_TFPORT(tp->port) | F_T5_TFEN |
6452 V_T5_TFINVERTMATCH(tp->invert)));
6453
6454 return 0;
6455 }
6456
6457 /**
6458 * t4_get_trace_filter - query one of the tracing filters
6459 * @adap: the adapter
6460 * @tp: the current trace filter parameters
6461 * @idx: which trace filter to query
6462 * @enabled: non-zero if the filter is enabled
6463 *
6464 * Returns the current settings of one of the HW tracing filters.
6465 */
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)6466 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6467 int *enabled)
6468 {
6469 u32 ctla, ctlb;
6470 int i, ofst = idx * 4;
6471 u32 data_reg, mask_reg;
6472
6473 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6474 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6475
6476 if (is_t4(adap->params.chip)) {
6477 *enabled = !!(ctla & F_TFEN);
6478 tp->port = G_TFPORT(ctla);
6479 tp->invert = !!(ctla & F_TFINVERTMATCH);
6480 } else {
6481 *enabled = !!(ctla & F_T5_TFEN);
6482 tp->port = G_T5_TFPORT(ctla);
6483 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6484 }
6485 tp->snap_len = G_TFCAPTUREMAX(ctlb);
6486 tp->min_len = G_TFMINPKTSIZE(ctlb);
6487 tp->skip_ofst = G_TFOFFSET(ctla);
6488 tp->skip_len = G_TFLENGTH(ctla);
6489
6490 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6491 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6492 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6493
6494 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6495 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6496 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6497 }
6498 }
6499
6500 /**
6501 * t4_read_tcb - read a hardware TCP Control Block structure
6502 * @adap: the adapter
6503 * @win: PCI-E Memory Window to use
6504 * @tid: the TCB ID
6505 * @tcb: the buffer to return the TCB in
6506 *
6507 * Reads the indicated hardware TCP Control Block and returns it in
6508 * the supplied buffer. Returns 0 on success.
6509 */
t4_read_tcb(struct adapter * adap,int win,int tid,u32 tcb[TCB_SIZE/4])6510 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6511 {
6512 u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6513 u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6514 __be32 raw_tcb[TCB_SIZE/4];
6515 int ret, word;
6516
6517 ret = t4_memory_rw_addr(adap, win,
6518 tcb_addr, sizeof raw_tcb, raw_tcb,
6519 T4_MEMORY_READ);
6520 if (ret)
6521 return ret;
6522
6523 for (word = 0; word < 32; word++)
6524 tcb[word] = be32_to_cpu(raw_tcb[word]);
6525 return 0;
6526 }
6527
6528 /**
6529 * t4_pmtx_get_stats - returns the HW stats from PMTX
6530 * @adap: the adapter
6531 * @cnt: where to store the count statistics
6532 * @cycles: where to store the cycle statistics
6533 *
6534 * Returns performance statistics from PMTX.
6535 */
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6536 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6537 {
6538 int i;
6539 u32 data[2];
6540
6541 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6542 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6543 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6544 if (is_t4(adap->params.chip)) {
6545 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6546 } else {
6547 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6548 A_PM_TX_DBG_DATA, data, 2,
6549 A_PM_TX_DBG_STAT_MSB);
6550 cycles[i] = (((u64)data[0] << 32) | data[1]);
6551 }
6552 }
6553 }
6554
6555 /**
6556 * t4_pmrx_get_stats - returns the HW stats from PMRX
6557 * @adap: the adapter
6558 * @cnt: where to store the count statistics
6559 * @cycles: where to store the cycle statistics
6560 *
6561 * Returns performance statistics from PMRX.
6562 */
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6563 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6564 {
6565 int i;
6566 u32 data[2];
6567
6568 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6569 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6570 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6571 if (is_t4(adap->params.chip)) {
6572 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6573 } else {
6574 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6575 A_PM_RX_DBG_DATA, data, 2,
6576 A_PM_RX_DBG_STAT_MSB);
6577 cycles[i] = (((u64)data[0] << 32) | data[1]);
6578 }
6579 }
6580 }
6581
6582 /**
6583 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6584 * @adapter: the adapter
6585 * @pidx: the port index
6586 *
6587 * Compuytes and returns a bitmap indicating which MPS buffer groups are
6588 * associated with the given Port. Bit i is set if buffer group i is
6589 * used by the Port.
6590 */
compute_mps_bg_map(struct adapter * adapter,int pidx)6591 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6592 int pidx)
6593 {
6594 unsigned int chip_version, nports;
6595
6596 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6597 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6598
6599 switch (chip_version) {
6600 case CHELSIO_T4:
6601 case CHELSIO_T5:
6602 switch (nports) {
6603 case 1: return 0xf;
6604 case 2: return 3 << (2 * pidx);
6605 case 4: return 1 << pidx;
6606 }
6607 break;
6608
6609 case CHELSIO_T6:
6610 switch (nports) {
6611 case 2: return 1 << (2 * pidx);
6612 }
6613 break;
6614 }
6615
6616 CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6617 chip_version, nports);
6618
6619 return 0;
6620 }
6621
6622 /**
6623 * t4_get_mps_bg_map - return the buffer groups associated with a port
6624 * @adapter: the adapter
6625 * @pidx: the port index
6626 *
6627 * Returns a bitmap indicating which MPS buffer groups are associated
6628 * with the given Port. Bit i is set if buffer group i is used by the
6629 * Port.
6630 */
t4_get_mps_bg_map(struct adapter * adapter,int pidx)6631 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6632 {
6633 u8 *mps_bg_map;
6634 unsigned int nports;
6635
6636 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6637 if (pidx >= nports) {
6638 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6639 return 0;
6640 }
6641
6642 /* If we've already retrieved/computed this, just return the result.
6643 */
6644 mps_bg_map = adapter->params.mps_bg_map;
6645 if (mps_bg_map[pidx])
6646 return mps_bg_map[pidx];
6647
6648 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6649 * If we're talking to such Firmware, let it tell us. If the new
6650 * API isn't supported, revert back to old hardcoded way. The value
6651 * obtained from Firmware is encoded in below format:
6652 *
6653 * val = (( MPSBGMAP[Port 3] << 24 ) |
6654 * ( MPSBGMAP[Port 2] << 16 ) |
6655 * ( MPSBGMAP[Port 1] << 8 ) |
6656 * ( MPSBGMAP[Port 0] << 0 ))
6657 */
6658 if (adapter->flags & FW_OK) {
6659 u32 param, val;
6660 int ret;
6661
6662 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6663 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6664 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6665 0, 1, ¶m, &val);
6666 if (!ret) {
6667 int p;
6668
6669 /* Store the BG Map for all of the Ports in order to
6670 * avoid more calls to the Firmware in the future.
6671 */
6672 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6673 mps_bg_map[p] = val & 0xff;
6674
6675 return mps_bg_map[pidx];
6676 }
6677 }
6678
6679 /* Either we're not talking to the Firmware or we're dealing with
6680 * older Firmware which doesn't support the new API to get the MPS
6681 * Buffer Group Map. Fall back to computing it ourselves.
6682 */
6683 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6684 return mps_bg_map[pidx];
6685 }
6686
6687 /**
6688 * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6689 * @adapter: the adapter
6690 * @pidx: the port index
6691 */
t4_get_tp_e2c_map(struct adapter * adapter,int pidx)6692 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6693 {
6694 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6695 u32 param, val = 0;
6696 int ret;
6697
6698 if (pidx >= nports) {
6699 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6700 return 0;
6701 }
6702
6703 /* FW version >= 1.16.44.0 can determine E2C channel map using
6704 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6705 */
6706 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6707 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6708 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6709 0, 1, ¶m, &val);
6710 if (!ret)
6711 return (val >> (8*pidx)) & 0xff;
6712
6713 return 0;
6714 }
6715
6716 /**
6717 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6718 * @adapter: the adapter
6719 * @pidx: the port index
6720 *
6721 * Returns a bitmap indicating which TP Ingress Channels are associated with
6722 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
6723 */
t4_get_tp_ch_map(struct adapter * adapter,int pidx)6724 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6725 {
6726 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6727 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6728
6729 if (pidx >= nports) {
6730 CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6731 return 0;
6732 }
6733
6734 switch (chip_version) {
6735 case CHELSIO_T4:
6736 case CHELSIO_T5:
6737 /*
6738 * Note that this happens to be the same values as the MPS
6739 * Buffer Group Map for these Chips. But we replicate the code
6740 * here because they're really separate concepts.
6741 */
6742 switch (nports) {
6743 case 1: return 0xf;
6744 case 2: return 3 << (2 * pidx);
6745 case 4: return 1 << pidx;
6746 }
6747 break;
6748
6749 case CHELSIO_T6:
6750 switch (nports) {
6751 case 1: return 1 << pidx;
6752 case 2: return 1 << pidx;
6753 }
6754 break;
6755 }
6756
6757 CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6758 chip_version, nports);
6759 return 0;
6760 }
6761
6762 /**
6763 * t4_get_port_type_description - return Port Type string description
6764 * @port_type: firmware Port Type enumeration
6765 */
t4_get_port_type_description(enum fw_port_type port_type)6766 const char *t4_get_port_type_description(enum fw_port_type port_type)
6767 {
6768 static const char *const port_type_description[] = {
6769 "Fiber_XFI",
6770 "Fiber_XAUI",
6771 "BT_SGMII",
6772 "BT_XFI",
6773 "BT_XAUI",
6774 "KX4",
6775 "CX4",
6776 "KX",
6777 "KR",
6778 "SFP",
6779 "BP_AP",
6780 "BP4_AP",
6781 "QSFP_10G",
6782 "QSA",
6783 "QSFP",
6784 "BP40_BA",
6785 "KR4_100G",
6786 "CR4_QSFP",
6787 "CR_QSFP",
6788 "CR2_QSFP",
6789 "SFP28",
6790 "KR_SFP28",
6791 "KR_XLAUI",
6792 };
6793
6794 if (port_type < ARRAY_SIZE(port_type_description))
6795 return port_type_description[port_type];
6796 return "UNKNOWN";
6797 }
6798
6799 /**
6800 * t4_get_port_stats_offset - collect port stats relative to a previous
6801 * snapshot
6802 * @adap: The adapter
6803 * @idx: The port
6804 * @stats: Current stats to fill
6805 * @offset: Previous stats snapshot
6806 */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)6807 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6808 struct port_stats *stats,
6809 struct port_stats *offset)
6810 {
6811 u64 *s, *o;
6812 int i;
6813
6814 t4_get_port_stats(adap, idx, stats);
6815 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6816 i < (sizeof(struct port_stats)/sizeof(u64)) ;
6817 i++, s++, o++)
6818 *s -= *o;
6819 }
6820
6821 /**
6822 * t4_get_port_stats - collect port statistics
6823 * @adap: the adapter
6824 * @idx: the port index
6825 * @p: the stats structure to fill
6826 *
6827 * Collect statistics related to the given port from HW.
6828 */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)6829 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6830 {
6831 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6832 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6833
6834 #define GET_STAT(name) \
6835 t4_read_reg64(adap, \
6836 (is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6837 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6838 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6839
6840 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6841 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6842 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6843 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6844 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6845 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6846 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6847 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6848 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6849 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6850 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6851 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6852 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6853 p->tx_drop = GET_STAT(TX_PORT_DROP);
6854 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6855 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6856 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6857 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6858 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6859 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6860 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6861 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6862 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6863
6864 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6865 if (stat_ctl & F_COUNTPAUSESTATTX)
6866 p->tx_frames_64 -= p->tx_pause;
6867 if (stat_ctl & F_COUNTPAUSEMCTX)
6868 p->tx_mcast_frames -= p->tx_pause;
6869 }
6870
6871 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6872 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6873 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6874 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6875 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6876 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6877 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6878 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6879 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6880 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6881 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6882 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6883 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6884 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6885 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6886 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6887 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6888 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6889 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6890 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6891 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6892 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6893 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6894 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6895 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6896 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6897 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6898
6899 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6900 if (stat_ctl & F_COUNTPAUSESTATRX)
6901 p->rx_frames_64 -= p->rx_pause;
6902 if (stat_ctl & F_COUNTPAUSEMCRX)
6903 p->rx_mcast_frames -= p->rx_pause;
6904 }
6905
6906 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6907 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6908 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6909 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6910 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6911 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6912 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6913 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6914
6915 #undef GET_STAT
6916 #undef GET_STAT_COM
6917 }
6918
6919 /**
6920 * t4_get_lb_stats - collect loopback port statistics
6921 * @adap: the adapter
6922 * @idx: the loopback port index
6923 * @p: the stats structure to fill
6924 *
6925 * Return HW statistics for the given loopback port.
6926 */
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)6927 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6928 {
6929 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6930
6931 #define GET_STAT(name) \
6932 t4_read_reg64(adap, \
6933 (is_t4(adap->params.chip) ? \
6934 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6935 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6936 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6937
6938 p->octets = GET_STAT(BYTES);
6939 p->frames = GET_STAT(FRAMES);
6940 p->bcast_frames = GET_STAT(BCAST);
6941 p->mcast_frames = GET_STAT(MCAST);
6942 p->ucast_frames = GET_STAT(UCAST);
6943 p->error_frames = GET_STAT(ERROR);
6944
6945 p->frames_64 = GET_STAT(64B);
6946 p->frames_65_127 = GET_STAT(65B_127B);
6947 p->frames_128_255 = GET_STAT(128B_255B);
6948 p->frames_256_511 = GET_STAT(256B_511B);
6949 p->frames_512_1023 = GET_STAT(512B_1023B);
6950 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6951 p->frames_1519_max = GET_STAT(1519B_MAX);
6952 p->drop = GET_STAT(DROP_FRAMES);
6953
6954 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6955 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6956 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6957 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6958 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6959 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6960 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6961 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6962
6963 #undef GET_STAT
6964 #undef GET_STAT_COM
6965 }
6966
6967 /* t4_mk_filtdelwr - create a delete filter WR
6968 * @ftid: the filter ID
6969 * @wr: the filter work request to populate
6970 * @rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
6971 * @qid: ingress queue to receive the delete notification
6972 *
6973 * Creates a filter work request to delete the supplied filter. If @qid
6974 * is negative the delete notification is suppressed.
6975 */
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int rqtype,int qid)6976 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
6977 int rqtype, int qid)
6978 {
6979 memset(wr, 0, sizeof(*wr));
6980 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6981 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6982 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6983 V_FW_FILTER_WR_RQTYPE(rqtype) |
6984 V_FW_FILTER_WR_NOREPLY(qid < 0));
6985 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6986 if (qid >= 0)
6987 wr->rx_chan_rx_rpl_iq =
6988 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6989 }
6990
6991 #define INIT_CMD(var, cmd, rd_wr) do { \
6992 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6993 F_FW_CMD_REQUEST | \
6994 F_FW_CMD_##rd_wr); \
6995 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6996 } while (0)
6997
t4_fwaddrspace_write(struct adapter * adap,unsigned int mbox,u32 addr,u32 val)6998 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6999 u32 addr, u32 val)
7000 {
7001 u32 ldst_addrspace;
7002 struct fw_ldst_cmd c;
7003
7004 memset(&c, 0, sizeof(c));
7005 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
7006 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7007 F_FW_CMD_REQUEST |
7008 F_FW_CMD_WRITE |
7009 ldst_addrspace);
7010 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7011 c.u.addrval.addr = cpu_to_be32(addr);
7012 c.u.addrval.val = cpu_to_be32(val);
7013
7014 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7015 }
7016
7017 /**
7018 * t4_mdio_rd - read a PHY register through MDIO
7019 * @adap: the adapter
7020 * @mbox: mailbox to use for the FW command
7021 * @phy_addr: the PHY address
7022 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
7023 * @reg: the register to read
7024 * @valp: where to store the value
7025 *
7026 * Issues a FW command through the given mailbox to read a PHY register.
7027 */
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int * valp)7028 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7029 unsigned int mmd, unsigned int reg, unsigned int *valp)
7030 {
7031 int ret;
7032 u32 ldst_addrspace;
7033 struct fw_ldst_cmd c;
7034
7035 memset(&c, 0, sizeof(c));
7036 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7037 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7038 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7039 ldst_addrspace);
7040 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7041 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7042 V_FW_LDST_CMD_MMD(mmd));
7043 c.u.mdio.raddr = cpu_to_be16(reg);
7044
7045 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7046 if (ret == 0)
7047 *valp = be16_to_cpu(c.u.mdio.rval);
7048 return ret;
7049 }
7050
7051 /**
7052 * t4_mdio_wr - write a PHY register through MDIO
7053 * @adap: the adapter
7054 * @mbox: mailbox to use for the FW command
7055 * @phy_addr: the PHY address
7056 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
7057 * @reg: the register to write
7058 * @valp: value to write
7059 *
7060 * Issues a FW command through the given mailbox to write a PHY register.
7061 */
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int val)7062 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7063 unsigned int mmd, unsigned int reg, unsigned int val)
7064 {
7065 u32 ldst_addrspace;
7066 struct fw_ldst_cmd c;
7067
7068 memset(&c, 0, sizeof(c));
7069 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7070 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7071 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7072 ldst_addrspace);
7073 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7074 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7075 V_FW_LDST_CMD_MMD(mmd));
7076 c.u.mdio.raddr = cpu_to_be16(reg);
7077 c.u.mdio.rval = cpu_to_be16(val);
7078
7079 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7080 }
7081
7082 /**
7083 *
7084 * t4_sge_decode_idma_state - decode the idma state
7085 * @adap: the adapter
7086 * @state: the state idma is stuck in
7087 */
t4_sge_decode_idma_state(struct adapter * adapter,int state)7088 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7089 {
7090 static const char * const t4_decode[] = {
7091 "IDMA_IDLE",
7092 "IDMA_PUSH_MORE_CPL_FIFO",
7093 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7094 "Not used",
7095 "IDMA_PHYSADDR_SEND_PCIEHDR",
7096 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7097 "IDMA_PHYSADDR_SEND_PAYLOAD",
7098 "IDMA_SEND_FIFO_TO_IMSG",
7099 "IDMA_FL_REQ_DATA_FL_PREP",
7100 "IDMA_FL_REQ_DATA_FL",
7101 "IDMA_FL_DROP",
7102 "IDMA_FL_H_REQ_HEADER_FL",
7103 "IDMA_FL_H_SEND_PCIEHDR",
7104 "IDMA_FL_H_PUSH_CPL_FIFO",
7105 "IDMA_FL_H_SEND_CPL",
7106 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7107 "IDMA_FL_H_SEND_IP_HDR",
7108 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7109 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7110 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7111 "IDMA_FL_D_SEND_PCIEHDR",
7112 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7113 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7114 "IDMA_FL_SEND_PCIEHDR",
7115 "IDMA_FL_PUSH_CPL_FIFO",
7116 "IDMA_FL_SEND_CPL",
7117 "IDMA_FL_SEND_PAYLOAD_FIRST",
7118 "IDMA_FL_SEND_PAYLOAD",
7119 "IDMA_FL_REQ_NEXT_DATA_FL",
7120 "IDMA_FL_SEND_NEXT_PCIEHDR",
7121 "IDMA_FL_SEND_PADDING",
7122 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7123 "IDMA_FL_SEND_FIFO_TO_IMSG",
7124 "IDMA_FL_REQ_DATAFL_DONE",
7125 "IDMA_FL_REQ_HEADERFL_DONE",
7126 };
7127 static const char * const t5_decode[] = {
7128 "IDMA_IDLE",
7129 "IDMA_ALMOST_IDLE",
7130 "IDMA_PUSH_MORE_CPL_FIFO",
7131 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7132 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7133 "IDMA_PHYSADDR_SEND_PCIEHDR",
7134 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7135 "IDMA_PHYSADDR_SEND_PAYLOAD",
7136 "IDMA_SEND_FIFO_TO_IMSG",
7137 "IDMA_FL_REQ_DATA_FL",
7138 "IDMA_FL_DROP",
7139 "IDMA_FL_DROP_SEND_INC",
7140 "IDMA_FL_H_REQ_HEADER_FL",
7141 "IDMA_FL_H_SEND_PCIEHDR",
7142 "IDMA_FL_H_PUSH_CPL_FIFO",
7143 "IDMA_FL_H_SEND_CPL",
7144 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7145 "IDMA_FL_H_SEND_IP_HDR",
7146 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7147 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7148 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7149 "IDMA_FL_D_SEND_PCIEHDR",
7150 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7151 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7152 "IDMA_FL_SEND_PCIEHDR",
7153 "IDMA_FL_PUSH_CPL_FIFO",
7154 "IDMA_FL_SEND_CPL",
7155 "IDMA_FL_SEND_PAYLOAD_FIRST",
7156 "IDMA_FL_SEND_PAYLOAD",
7157 "IDMA_FL_REQ_NEXT_DATA_FL",
7158 "IDMA_FL_SEND_NEXT_PCIEHDR",
7159 "IDMA_FL_SEND_PADDING",
7160 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7161 };
7162 static const char * const t6_decode[] = {
7163 "IDMA_IDLE",
7164 "IDMA_PUSH_MORE_CPL_FIFO",
7165 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7166 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7167 "IDMA_PHYSADDR_SEND_PCIEHDR",
7168 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7169 "IDMA_PHYSADDR_SEND_PAYLOAD",
7170 "IDMA_FL_REQ_DATA_FL",
7171 "IDMA_FL_DROP",
7172 "IDMA_FL_DROP_SEND_INC",
7173 "IDMA_FL_H_REQ_HEADER_FL",
7174 "IDMA_FL_H_SEND_PCIEHDR",
7175 "IDMA_FL_H_PUSH_CPL_FIFO",
7176 "IDMA_FL_H_SEND_CPL",
7177 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7178 "IDMA_FL_H_SEND_IP_HDR",
7179 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7180 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7181 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7182 "IDMA_FL_D_SEND_PCIEHDR",
7183 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7184 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7185 "IDMA_FL_SEND_PCIEHDR",
7186 "IDMA_FL_PUSH_CPL_FIFO",
7187 "IDMA_FL_SEND_CPL",
7188 "IDMA_FL_SEND_PAYLOAD_FIRST",
7189 "IDMA_FL_SEND_PAYLOAD",
7190 "IDMA_FL_REQ_NEXT_DATA_FL",
7191 "IDMA_FL_SEND_NEXT_PCIEHDR",
7192 "IDMA_FL_SEND_PADDING",
7193 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7194 };
7195 static const u32 sge_regs[] = {
7196 A_SGE_DEBUG_DATA_LOW_INDEX_2,
7197 A_SGE_DEBUG_DATA_LOW_INDEX_3,
7198 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7199 };
7200 const char **sge_idma_decode;
7201 int sge_idma_decode_nstates;
7202 int i;
7203 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7204
7205 /* Select the right set of decode strings to dump depending on the
7206 * adapter chip type.
7207 */
7208 switch (chip_version) {
7209 case CHELSIO_T4:
7210 sge_idma_decode = (const char **)t4_decode;
7211 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7212 break;
7213
7214 case CHELSIO_T5:
7215 sge_idma_decode = (const char **)t5_decode;
7216 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7217 break;
7218
7219 case CHELSIO_T6:
7220 sge_idma_decode = (const char **)t6_decode;
7221 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7222 break;
7223
7224 default:
7225 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
7226 return;
7227 }
7228
7229 if (state < sge_idma_decode_nstates)
7230 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7231 else
7232 CH_WARN(adapter, "idma state %d unknown\n", state);
7233
7234 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7235 CH_WARN(adapter, "SGE register %#x value %#x\n",
7236 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7237 }
7238
7239 /**
7240 * t4_sge_ctxt_flush - flush the SGE context cache
7241 * @adap: the adapter
7242 * @mbox: mailbox to use for the FW command
7243 *
7244 * Issues a FW command through the given mailbox to flush the
7245 * SGE context cache.
7246 */
t4_sge_ctxt_flush(struct adapter * adap,unsigned int mbox,int ctxt_type)7247 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
7248 {
7249 int ret;
7250 u32 ldst_addrspace;
7251 struct fw_ldst_cmd c;
7252
7253 memset(&c, 0, sizeof(c));
7254 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
7255 FW_LDST_ADDRSPC_SGE_EGRC :
7256 FW_LDST_ADDRSPC_SGE_INGC);
7257 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7258 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7259 ldst_addrspace);
7260 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7261 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7262
7263 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7264 return ret;
7265 }
7266
7267 /**
7268 * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
7269 * @adap - the adapter
7270 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
7271 * @dbqtimers: SGE Doorbell Queue Timer table
7272 *
7273 * Reads the SGE Doorbell Queue Timer values into the provided table.
7274 * Returns 0 on success (Firmware and Hardware support this feature),
7275 * an error on failure.
7276 */
t4_read_sge_dbqtimers(struct adapter * adap,unsigned int ndbqtimers,u16 * dbqtimers)7277 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
7278 u16 *dbqtimers)
7279 {
7280 int ret, dbqtimerix;
7281
7282 ret = 0;
7283 dbqtimerix = 0;
7284 while (dbqtimerix < ndbqtimers) {
7285 int nparams, param;
7286 u32 params[7], vals[7];
7287
7288 nparams = ndbqtimers - dbqtimerix;
7289 if (nparams > ARRAY_SIZE(params))
7290 nparams = ARRAY_SIZE(params);
7291
7292 for (param = 0; param < nparams; param++)
7293 params[param] =
7294 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7295 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
7296 V_FW_PARAMS_PARAM_Y(dbqtimerix + param));
7297 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
7298 nparams, params, vals);
7299 if (ret)
7300 break;
7301
7302 for (param = 0; param < nparams; param++)
7303 dbqtimers[dbqtimerix++] = vals[param];
7304 }
7305 return ret;
7306 }
7307
7308 /**
7309 * t4_fw_hello - establish communication with FW
7310 * @adap: the adapter
7311 * @mbox: mailbox to use for the FW command
7312 * @evt_mbox: mailbox to receive async FW events
7313 * @master: specifies the caller's willingness to be the device master
7314 * @state: returns the current device state (if non-NULL)
7315 *
7316 * Issues a command to establish communication with FW. Returns either
7317 * an error (negative integer) or the mailbox of the Master PF.
7318 */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)7319 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7320 enum dev_master master, enum dev_state *state)
7321 {
7322 int ret;
7323 struct fw_hello_cmd c;
7324 u32 v;
7325 unsigned int master_mbox;
7326 int retries = FW_CMD_HELLO_RETRIES;
7327
7328 retry:
7329 memset(&c, 0, sizeof(c));
7330 INIT_CMD(c, HELLO, WRITE);
7331 c.err_to_clearinit = cpu_to_be32(
7332 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7333 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7334 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7335 mbox : M_FW_HELLO_CMD_MBMASTER) |
7336 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7337 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7338 F_FW_HELLO_CMD_CLEARINIT);
7339
7340 /*
7341 * Issue the HELLO command to the firmware. If it's not successful
7342 * but indicates that we got a "busy" or "timeout" condition, retry
7343 * the HELLO until we exhaust our retry limit. If we do exceed our
7344 * retry limit, check to see if the firmware left us any error
7345 * information and report that if so ...
7346 */
7347 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7348 if (ret != FW_SUCCESS) {
7349 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7350 goto retry;
7351 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7352 t4_report_fw_error(adap);
7353 return ret;
7354 }
7355
7356 v = be32_to_cpu(c.err_to_clearinit);
7357 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7358 if (state) {
7359 if (v & F_FW_HELLO_CMD_ERR)
7360 *state = DEV_STATE_ERR;
7361 else if (v & F_FW_HELLO_CMD_INIT)
7362 *state = DEV_STATE_INIT;
7363 else
7364 *state = DEV_STATE_UNINIT;
7365 }
7366
7367 /*
7368 * If we're not the Master PF then we need to wait around for the
7369 * Master PF Driver to finish setting up the adapter.
7370 *
7371 * Note that we also do this wait if we're a non-Master-capable PF and
7372 * there is no current Master PF; a Master PF may show up momentarily
7373 * and we wouldn't want to fail pointlessly. (This can happen when an
7374 * OS loads lots of different drivers rapidly at the same time). In
7375 * this case, the Master PF returned by the firmware will be
7376 * M_PCIE_FW_MASTER so the test below will work ...
7377 */
7378 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7379 master_mbox != mbox) {
7380 int waiting = FW_CMD_HELLO_TIMEOUT;
7381
7382 /*
7383 * Wait for the firmware to either indicate an error or
7384 * initialized state. If we see either of these we bail out
7385 * and report the issue to the caller. If we exhaust the
7386 * "hello timeout" and we haven't exhausted our retries, try
7387 * again. Otherwise bail with a timeout error.
7388 */
7389 for (;;) {
7390 u32 pcie_fw;
7391
7392 msleep(50);
7393 waiting -= 50;
7394
7395 /*
7396 * If neither Error nor Initialialized are indicated
7397 * by the firmware keep waiting till we exaust our
7398 * timeout ... and then retry if we haven't exhausted
7399 * our retries ...
7400 */
7401 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7402 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7403 if (waiting <= 0) {
7404 if (retries-- > 0)
7405 goto retry;
7406
7407 return -ETIMEDOUT;
7408 }
7409 continue;
7410 }
7411
7412 /*
7413 * We either have an Error or Initialized condition
7414 * report errors preferentially.
7415 */
7416 if (state) {
7417 if (pcie_fw & F_PCIE_FW_ERR)
7418 *state = DEV_STATE_ERR;
7419 else if (pcie_fw & F_PCIE_FW_INIT)
7420 *state = DEV_STATE_INIT;
7421 }
7422
7423 /*
7424 * If we arrived before a Master PF was selected and
7425 * there's not a valid Master PF, grab its identity
7426 * for our caller.
7427 */
7428 if (master_mbox == M_PCIE_FW_MASTER &&
7429 (pcie_fw & F_PCIE_FW_MASTER_VLD))
7430 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7431 break;
7432 }
7433 }
7434
7435 return master_mbox;
7436 }
7437
7438 /**
7439 * t4_fw_bye - end communication with FW
7440 * @adap: the adapter
7441 * @mbox: mailbox to use for the FW command
7442 *
7443 * Issues a command to terminate communication with FW.
7444 */
t4_fw_bye(struct adapter * adap,unsigned int mbox)7445 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7446 {
7447 struct fw_bye_cmd c;
7448
7449 memset(&c, 0, sizeof(c));
7450 INIT_CMD(c, BYE, WRITE);
7451 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7452 }
7453
7454 /**
7455 * t4_fw_reset - issue a reset to FW
7456 * @adap: the adapter
7457 * @mbox: mailbox to use for the FW command
7458 * @reset: specifies the type of reset to perform
7459 *
7460 * Issues a reset command of the specified type to FW.
7461 */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)7462 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7463 {
7464 struct fw_reset_cmd c;
7465
7466 memset(&c, 0, sizeof(c));
7467 INIT_CMD(c, RESET, WRITE);
7468 c.val = cpu_to_be32(reset);
7469 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7470 }
7471
7472 /**
7473 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7474 * @adap: the adapter
7475 * @mbox: mailbox to use for the FW RESET command (if desired)
7476 * @force: force uP into RESET even if FW RESET command fails
7477 *
7478 * Issues a RESET command to firmware (if desired) with a HALT indication
7479 * and then puts the microprocessor into RESET state. The RESET command
7480 * will only be issued if a legitimate mailbox is provided (mbox <=
7481 * M_PCIE_FW_MASTER).
7482 *
7483 * This is generally used in order for the host to safely manipulate the
7484 * adapter without fear of conflicting with whatever the firmware might
7485 * be doing. The only way out of this state is to RESTART the firmware
7486 * ...
7487 */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)7488 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7489 {
7490 int ret = 0;
7491
7492 /*
7493 * If a legitimate mailbox is provided, issue a RESET command
7494 * with a HALT indication.
7495 */
7496 if (mbox <= M_PCIE_FW_MASTER) {
7497 struct fw_reset_cmd c;
7498
7499 memset(&c, 0, sizeof(c));
7500 INIT_CMD(c, RESET, WRITE);
7501 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7502 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7503 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7504 }
7505
7506 /*
7507 * Normally we won't complete the operation if the firmware RESET
7508 * command fails but if our caller insists we'll go ahead and put the
7509 * uP into RESET. This can be useful if the firmware is hung or even
7510 * missing ... We'll have to take the risk of putting the uP into
7511 * RESET without the cooperation of firmware in that case.
7512 *
7513 * We also force the firmware's HALT flag to be on in case we bypassed
7514 * the firmware RESET command above or we're dealing with old firmware
7515 * which doesn't have the HALT capability. This will serve as a flag
7516 * for the incoming firmware to know that it's coming out of a HALT
7517 * rather than a RESET ... if it's new enough to understand that ...
7518 */
7519 if (ret == 0 || force) {
7520 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7521 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7522 F_PCIE_FW_HALT);
7523 }
7524
7525 /*
7526 * And we always return the result of the firmware RESET command
7527 * even when we force the uP into RESET ...
7528 */
7529 return ret;
7530 }
7531
7532 /**
7533 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7534 * @adap: the adapter
7535 * @reset: if we want to do a RESET to restart things
7536 *
7537 * Restart firmware previously halted by t4_fw_halt(). On successful
7538 * return the previous PF Master remains as the new PF Master and there
7539 * is no need to issue a new HELLO command, etc.
7540 *
7541 * We do this in two ways:
7542 *
7543 * 1. If we're dealing with newer firmware we'll simply want to take
7544 * the chip's microprocessor out of RESET. This will cause the
7545 * firmware to start up from its start vector. And then we'll loop
7546 * until the firmware indicates it's started again (PCIE_FW.HALT
7547 * reset to 0) or we timeout.
7548 *
7549 * 2. If we're dealing with older firmware then we'll need to RESET
7550 * the chip since older firmware won't recognize the PCIE_FW.HALT
7551 * flag and automatically RESET itself on startup.
7552 */
t4_fw_restart(struct adapter * adap,unsigned int mbox,int reset)7553 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7554 {
7555 if (reset) {
7556 /*
7557 * Since we're directing the RESET instead of the firmware
7558 * doing it automatically, we need to clear the PCIE_FW.HALT
7559 * bit.
7560 */
7561 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7562
7563 /*
7564 * If we've been given a valid mailbox, first try to get the
7565 * firmware to do the RESET. If that works, great and we can
7566 * return success. Otherwise, if we haven't been given a
7567 * valid mailbox or the RESET command failed, fall back to
7568 * hitting the chip with a hammer.
7569 */
7570 if (mbox <= M_PCIE_FW_MASTER) {
7571 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7572 msleep(100);
7573 if (t4_fw_reset(adap, mbox,
7574 F_PIORST | F_PIORSTMODE) == 0)
7575 return 0;
7576 }
7577
7578 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7579 msleep(2000);
7580 } else {
7581 int ms;
7582
7583 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7584 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7585 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7586 return FW_SUCCESS;
7587 msleep(100);
7588 ms += 100;
7589 }
7590 return -ETIMEDOUT;
7591 }
7592 return 0;
7593 }
7594
7595 /**
7596 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7597 * @adap: the adapter
7598 * @mbox: mailbox to use for the FW RESET command (if desired)
7599 * @fw_data: the firmware image to write
7600 * @size: image size
7601 * @force: force upgrade even if firmware doesn't cooperate
7602 *
7603 * Perform all of the steps necessary for upgrading an adapter's
7604 * firmware image. Normally this requires the cooperation of the
7605 * existing firmware in order to halt all existing activities
7606 * but if an invalid mailbox token is passed in we skip that step
7607 * (though we'll still put the adapter microprocessor into RESET in
7608 * that case).
7609 *
7610 * On successful return the new firmware will have been loaded and
7611 * the adapter will have been fully RESET losing all previous setup
7612 * state. On unsuccessful return the adapter may be completely hosed ...
7613 * positive errno indicates that the adapter is ~probably~ intact, a
7614 * negative errno indicates that things are looking bad ...
7615 */
t4_fw_upgrade(struct adapter * adap,unsigned int mbox,const u8 * fw_data,unsigned int size,int force)7616 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7617 const u8 *fw_data, unsigned int size, int force)
7618 {
7619 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7620 unsigned int bootstrap =
7621 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7622 int reset, ret;
7623
7624 if (!t4_fw_matches_chip(adap, fw_hdr))
7625 return -EINVAL;
7626
7627 /* Disable FW_OK flags so that mbox commands with FW_OK flags check
7628 * wont be send when we are flashing FW.
7629 */
7630 adap->flags &= ~FW_OK;
7631
7632 if (!bootstrap) {
7633 ret = t4_fw_halt(adap, mbox, force);
7634 if (ret < 0 && !force)
7635 goto out;
7636 }
7637
7638 ret = t4_load_fw(adap, fw_data, size, bootstrap);
7639 if (ret < 0 || bootstrap)
7640 goto out;
7641
7642 /*
7643 * If there was a Firmware Configuration File staored in FLASH,
7644 * there's a good chance that it won't be compatible with the new
7645 * Firmware. In order to prevent difficult to diagnose adapter
7646 * initialization issues, we clear out the Firmware Configuration File
7647 * portion of the FLASH . The user will need to re-FLASH a new
7648 * Firmware Configuration File which is compatible with the new
7649 * Firmware if that's desired.
7650 */
7651 (void)t4_load_cfg(adap, NULL, 0);
7652
7653 /*
7654 * Older versions of the firmware don't understand the new
7655 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7656 * restart. So for newly loaded older firmware we'll have to do the
7657 * RESET for it so it starts up on a clean slate. We can tell if
7658 * the newly loaded firmware will handle this right by checking
7659 * its header flags to see if it advertises the capability.
7660 */
7661 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7662 ret = t4_fw_restart(adap, mbox, reset);
7663
7664 /* Grab potentially new Firmware Device Log parameters so we can see
7665 * how helthy the new Firmware is. It's okay to contact the new
7666 * Firmware for these parameters even though, as far as it's
7667 * concerned, we've never said "HELLO" to it ...
7668 */
7669 (void)t4_init_devlog_params(adap, 1);
7670
7671 out:
7672 adap->flags |= FW_OK;
7673 return ret;
7674 }
7675
7676 /**
7677 * t4_fl_pkt_align - return the fl packet alignment
7678 * @adap: the adapter
7679 * is_packed: True when the driver uses packed FLM mode
7680 *
7681 * T4 has a single field to specify the packing and padding boundary.
7682 * T5 onwards has separate fields for this and hence the alignment for
7683 * next packet offset is maximum of these two.
7684 *
7685 */
t4_fl_pkt_align(struct adapter * adap,bool is_packed)7686 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7687 {
7688 u32 sge_control, sge_control2;
7689 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7690
7691 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7692
7693 /* T4 uses a single control field to specify both the PCIe Padding and
7694 * Packing Boundary. T5 introduced the ability to specify these
7695 * separately. The actual Ingress Packet Data alignment boundary
7696 * within Packed Buffer Mode is the maximum of these two
7697 * specifications. (Note that it makes no real practical sense to
7698 * have the Pading Boudary be larger than the Packing Boundary but you
7699 * could set the chip up that way and, in fact, legacy T4 code would
7700 * end doing this because it would initialize the Padding Boundary and
7701 * leave the Packing Boundary initialized to 0 (16 bytes).)
7702 * Padding Boundary values in T6 starts from 8B,
7703 * where as it is 32B for T4 and T5.
7704 */
7705 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7706 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7707 else
7708 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7709
7710 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7711
7712 fl_align = ingpadboundary;
7713 if (!is_t4(adap->params.chip) && is_packed) {
7714 /* T5 has a weird interpretation of one of the PCIe Packing
7715 * Boundary values. No idea why ...
7716 */
7717 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7718 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7719 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7720 ingpackboundary = 16;
7721 else
7722 ingpackboundary = 1 << (ingpackboundary +
7723 X_INGPACKBOUNDARY_SHIFT);
7724
7725 fl_align = max(ingpadboundary, ingpackboundary);
7726 }
7727 return fl_align;
7728 }
7729
7730 /**
7731 * t4_fixup_host_params_compat - fix up host-dependent parameters
7732 * @adap: the adapter
7733 * @page_size: the host's Base Page Size
7734 * @cache_line_size: the host's Cache Line Size
7735 * @chip_compat: maintain compatibility with designated chip
7736 *
7737 * Various registers in the chip contain values which are dependent on the
7738 * host's Base Page and Cache Line Sizes. This function will fix all of
7739 * those registers with the appropriate values as passed in ...
7740 *
7741 * @chip_compat is used to limit the set of changes that are made
7742 * to be compatible with the indicated chip release. This is used by
7743 * drivers to maintain compatibility with chip register settings when
7744 * the drivers haven't [yet] been updated with new chip support.
7745 */
t4_fixup_host_params_compat(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size,enum chip_type chip_compat)7746 int t4_fixup_host_params_compat(struct adapter *adap,
7747 unsigned int page_size,
7748 unsigned int cache_line_size,
7749 enum chip_type chip_compat)
7750 {
7751 unsigned int page_shift = fls(page_size) - 1;
7752 unsigned int sge_hps = page_shift - 10;
7753 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7754 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7755 unsigned int fl_align_log = fls(fl_align) - 1;
7756
7757 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7758 V_HOSTPAGESIZEPF0(sge_hps) |
7759 V_HOSTPAGESIZEPF1(sge_hps) |
7760 V_HOSTPAGESIZEPF2(sge_hps) |
7761 V_HOSTPAGESIZEPF3(sge_hps) |
7762 V_HOSTPAGESIZEPF4(sge_hps) |
7763 V_HOSTPAGESIZEPF5(sge_hps) |
7764 V_HOSTPAGESIZEPF6(sge_hps) |
7765 V_HOSTPAGESIZEPF7(sge_hps));
7766
7767 if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
7768 t4_set_reg_field(adap, A_SGE_CONTROL,
7769 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7770 F_EGRSTATUSPAGESIZE,
7771 V_INGPADBOUNDARY(fl_align_log -
7772 X_INGPADBOUNDARY_SHIFT) |
7773 V_EGRSTATUSPAGESIZE(stat_len != 64));
7774 } else {
7775 unsigned int pack_align;
7776 unsigned int ingpad, ingpack;
7777 unsigned int pcie_cap;
7778
7779 /* T5 introduced the separation of the Free List Padding and
7780 * Packing Boundaries. Thus, we can select a smaller Padding
7781 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7782 * Bandwidth, and use a Packing Boundary which is large enough
7783 * to avoid false sharing between CPUs, etc.
7784 *
7785 * For the PCI Link, the smaller the Padding Boundary the
7786 * better. For the Memory Controller, a smaller Padding
7787 * Boundary is better until we cross under the Memory Line
7788 * Size (the minimum unit of transfer to/from Memory). If we
7789 * have a Padding Boundary which is smaller than the Memory
7790 * Line Size, that'll involve a Read-Modify-Write cycle on the
7791 * Memory Controller which is never good.
7792 */
7793
7794 /* We want the Packing Boundary to be based on the Cache Line
7795 * Size in order to help avoid False Sharing performance
7796 * issues between CPUs, etc. We also want the Packing
7797 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7798 * get best performance when the Packing Boundary is a
7799 * multiple of the Maximum Payload Size.
7800 */
7801 pack_align = fl_align;
7802 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
7803 if (pcie_cap) {
7804 unsigned int mps, mps_log;
7805 u16 devctl;
7806
7807 /*
7808 * The PCIe Device Control Maximum Payload Size field
7809 * [bits 7:5] encodes sizes as powers of 2 starting at
7810 * 128 bytes.
7811 */
7812 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
7813 &devctl);
7814 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7815 mps = 1 << mps_log;
7816 if (mps > pack_align)
7817 pack_align = mps;
7818 }
7819
7820 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7821 * value for the Packing Boundary. This corresponds to 16
7822 * bytes instead of the expected 32 bytes. So if we want 32
7823 * bytes, the best we can really do is 64 bytes ...
7824 */
7825 if (pack_align <= 16) {
7826 ingpack = X_INGPACKBOUNDARY_16B;
7827 fl_align = 16;
7828 } else if (pack_align == 32) {
7829 ingpack = X_INGPACKBOUNDARY_64B;
7830 fl_align = 64;
7831 } else {
7832 unsigned int pack_align_log = fls(pack_align) - 1;
7833 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
7834 fl_align = pack_align;
7835 }
7836
7837 /* Use the smallest Ingress Padding which isn't smaller than
7838 * the Memory Controller Read/Write Size. We'll take that as
7839 * being 8 bytes since we don't know of any system with a
7840 * wider Memory Controller Bus Width.
7841 */
7842 if (is_t5(adap->params.chip))
7843 ingpad = X_INGPADBOUNDARY_32B;
7844 else
7845 ingpad = X_T6_INGPADBOUNDARY_8B;
7846
7847 t4_set_reg_field(adap, A_SGE_CONTROL,
7848 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7849 F_EGRSTATUSPAGESIZE,
7850 V_INGPADBOUNDARY(ingpad) |
7851 V_EGRSTATUSPAGESIZE(stat_len != 64));
7852 t4_set_reg_field(adap, A_SGE_CONTROL2,
7853 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
7854 V_INGPACKBOUNDARY(ingpack));
7855 }
7856 /*
7857 * Adjust various SGE Free List Host Buffer Sizes.
7858 *
7859 * This is something of a crock since we're using fixed indices into
7860 * the array which are also known by the sge.c code and the T4
7861 * Firmware Configuration File. We need to come up with a much better
7862 * approach to managing this array. For now, the first four entries
7863 * are:
7864 *
7865 * 0: Host Page Size
7866 * 1: 64KB
7867 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7868 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7869 *
7870 * For the single-MTU buffers in unpacked mode we need to include
7871 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7872 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7873 * Padding boundary. All of these are accommodated in the Factory
7874 * Default Firmware Configuration File but we need to adjust it for
7875 * this host's cache line size.
7876 */
7877 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
7878 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
7879 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
7880 & ~(fl_align-1));
7881 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
7882 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
7883 & ~(fl_align-1));
7884
7885 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
7886
7887 return 0;
7888 }
7889
7890 /**
7891 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
7892 * @adap: the adapter
7893 * @page_size: the host's Base Page Size
7894 * @cache_line_size: the host's Cache Line Size
7895 *
7896 * Various registers in T4 contain values which are dependent on the
7897 * host's Base Page and Cache Line Sizes. This function will fix all of
7898 * those registers with the appropriate values as passed in ...
7899 *
7900 * This routine makes changes which are compatible with T4 chips.
7901 */
t4_fixup_host_params(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size)7902 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7903 unsigned int cache_line_size)
7904 {
7905 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
7906 T4_LAST_REV);
7907 }
7908
7909 /**
7910 * t4_fw_initialize - ask FW to initialize the device
7911 * @adap: the adapter
7912 * @mbox: mailbox to use for the FW command
7913 *
7914 * Issues a command to FW to partially initialize the device. This
7915 * performs initialization that generally doesn't depend on user input.
7916 */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)7917 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7918 {
7919 struct fw_initialize_cmd c;
7920
7921 memset(&c, 0, sizeof(c));
7922 INIT_CMD(c, INITIALIZE, WRITE);
7923 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7924 }
7925
7926 /**
7927 * t4_query_params_rw - query FW or device parameters
7928 * @adap: the adapter
7929 * @mbox: mailbox to use for the FW command
7930 * @pf: the PF
7931 * @vf: the VF
7932 * @nparams: the number of parameters
7933 * @params: the parameter names
7934 * @val: the parameter values
7935 * @rw: Write and read flag
7936 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7937 *
7938 * Reads the value of FW or device parameters. Up to 7 parameters can be
7939 * queried at once.
7940 */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw,bool sleep_ok)7941 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7942 unsigned int vf, unsigned int nparams, const u32 *params,
7943 u32 *val, int rw, bool sleep_ok)
7944 {
7945 int i, ret;
7946 struct fw_params_cmd c;
7947 __be32 *p = &c.param[0].mnem;
7948
7949 if (nparams > 7)
7950 return -EINVAL;
7951
7952 memset(&c, 0, sizeof(c));
7953 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7954 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7955 V_FW_PARAMS_CMD_PFN(pf) |
7956 V_FW_PARAMS_CMD_VFN(vf));
7957 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7958
7959 for (i = 0; i < nparams; i++) {
7960 *p++ = cpu_to_be32(*params++);
7961 if (rw)
7962 *p = cpu_to_be32(*(val + i));
7963 p++;
7964 }
7965
7966 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7967
7968 /*
7969 * We always copy back the reults, even if there's an error. We'll
7970 * get an error if any of the parameters was unknown to the Firmware,
7971 * but there will be results for the others ... (Older Firmware
7972 * stopped at the first unknown parameter; newer Firmware processes
7973 * them all and flags the unknown parameters with a return value of
7974 * ~0UL.)
7975 */
7976 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7977 *val++ = be32_to_cpu(*p);
7978
7979 return ret;
7980 }
7981
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7982 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7983 unsigned int vf, unsigned int nparams, const u32 *params,
7984 u32 *val)
7985 {
7986 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7987 true);
7988 }
7989
t4_query_params_ns(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7990 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7991 unsigned int vf, unsigned int nparams, const u32 *params,
7992 u32 *val)
7993 {
7994 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7995 false);
7996 }
7997
7998 /**
7999 * t4_set_params_timeout - sets FW or device parameters
8000 * @adap: the adapter
8001 * @mbox: mailbox to use for the FW command
8002 * @pf: the PF
8003 * @vf: the VF
8004 * @nparams: the number of parameters
8005 * @params: the parameter names
8006 * @val: the parameter values
8007 * @timeout: the timeout time
8008 *
8009 * Sets the value of FW or device parameters. Up to 7 parameters can be
8010 * specified at once.
8011 */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)8012 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
8013 unsigned int pf, unsigned int vf,
8014 unsigned int nparams, const u32 *params,
8015 const u32 *val, int timeout)
8016 {
8017 struct fw_params_cmd c;
8018 __be32 *p = &c.param[0].mnem;
8019
8020 if (nparams > 7)
8021 return -EINVAL;
8022
8023 memset(&c, 0, sizeof(c));
8024 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
8025 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8026 V_FW_PARAMS_CMD_PFN(pf) |
8027 V_FW_PARAMS_CMD_VFN(vf));
8028 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8029
8030 while (nparams--) {
8031 *p++ = cpu_to_be32(*params++);
8032 *p++ = cpu_to_be32(*val++);
8033 }
8034
8035 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
8036 }
8037
8038 /**
8039 * t4_set_params - sets FW or device parameters
8040 * @adap: the adapter
8041 * @mbox: mailbox to use for the FW command
8042 * @pf: the PF
8043 * @vf: the VF
8044 * @nparams: the number of parameters
8045 * @params: the parameter names
8046 * @val: the parameter values
8047 *
8048 * Sets the value of FW or device parameters. Up to 7 parameters can be
8049 * specified at once.
8050 */
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)8051 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
8052 unsigned int vf, unsigned int nparams, const u32 *params,
8053 const u32 *val)
8054 {
8055 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
8056 FW_CMD_MAX_TIMEOUT);
8057 }
8058
8059 /**
8060 * t4_cfg_pfvf - configure PF/VF resource limits
8061 * @adap: the adapter
8062 * @mbox: mailbox to use for the FW command
8063 * @pf: the PF being configured
8064 * @vf: the VF being configured
8065 * @txq: the max number of egress queues
8066 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
8067 * @rxqi: the max number of interrupt-capable ingress queues
8068 * @rxq: the max number of interruptless ingress queues
8069 * @tc: the PCI traffic class
8070 * @vi: the max number of virtual interfaces
8071 * @cmask: the channel access rights mask for the PF/VF
8072 * @pmask: the port access rights mask for the PF/VF
8073 * @nexact: the maximum number of exact MPS filters
8074 * @rcaps: read capabilities
8075 * @wxcaps: write/execute capabilities
8076 *
8077 * Configures resource limits and capabilities for a physical or virtual
8078 * function.
8079 */
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)8080 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
8081 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
8082 unsigned int rxqi, unsigned int rxq, unsigned int tc,
8083 unsigned int vi, unsigned int cmask, unsigned int pmask,
8084 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
8085 {
8086 struct fw_pfvf_cmd c;
8087
8088 memset(&c, 0, sizeof(c));
8089 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
8090 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
8091 V_FW_PFVF_CMD_VFN(vf));
8092 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8093 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
8094 V_FW_PFVF_CMD_NIQ(rxq));
8095 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
8096 V_FW_PFVF_CMD_PMASK(pmask) |
8097 V_FW_PFVF_CMD_NEQ(txq));
8098 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
8099 V_FW_PFVF_CMD_NVI(vi) |
8100 V_FW_PFVF_CMD_NEXACTF(nexact));
8101 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
8102 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
8103 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
8104 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8105 }
8106
8107 /**
8108 * t4_alloc_vi_func - allocate a virtual interface
8109 * @adap: the adapter
8110 * @mbox: mailbox to use for the FW command
8111 * @port: physical port associated with the VI
8112 * @pf: the PF owning the VI
8113 * @vf: the VF owning the VI
8114 * @nmac: number of MAC addresses needed (1 to 5)
8115 * @mac: the MAC addresses of the VI
8116 * @rss_size: size of RSS table slice associated with this VI
8117 * @portfunc: which Port Application Function MAC Address is desired
8118 * @idstype: Intrusion Detection Type
8119 *
8120 * Allocates a virtual interface for the given physical port. If @mac is
8121 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
8122 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
8123 * @mac should be large enough to hold @nmac Ethernet addresses, they are
8124 * stored consecutively so the space needed is @nmac * 6 bytes.
8125 * Returns a negative error number or the non-negative VI id.
8126 */
t4_alloc_vi_func(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin,unsigned int portfunc,unsigned int idstype)8127 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
8128 unsigned int port, unsigned int pf, unsigned int vf,
8129 unsigned int nmac, u8 *mac, unsigned int *rss_size,
8130 u8 *vivld, u8 *vin,
8131 unsigned int portfunc, unsigned int idstype)
8132 {
8133 int ret;
8134 struct fw_vi_cmd c;
8135
8136 memset(&c, 0, sizeof(c));
8137 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
8138 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
8139 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
8140 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
8141 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
8142 V_FW_VI_CMD_FUNC(portfunc));
8143 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
8144 c.nmac = nmac - 1;
8145 if(!rss_size)
8146 c.norss_rsssize = F_FW_VI_CMD_NORSS;
8147
8148 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8149 if (ret)
8150 return ret;
8151
8152 if (mac) {
8153 memcpy(mac, c.mac, sizeof(c.mac));
8154 switch (nmac) {
8155 case 5:
8156 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
8157 /* FALLTHRU */
8158 case 4:
8159 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
8160 /* FALLTHRU */
8161 case 3:
8162 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
8163 /* FALLTHRU */
8164 case 2:
8165 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
8166 }
8167 }
8168 if (rss_size)
8169 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
8170
8171 if (vivld)
8172 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
8173
8174 if (vin)
8175 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
8176
8177 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
8178 }
8179
8180 /**
8181 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
8182 * @adap: the adapter
8183 * @mbox: mailbox to use for the FW command
8184 * @port: physical port associated with the VI
8185 * @pf: the PF owning the VI
8186 * @vf: the VF owning the VI
8187 * @nmac: number of MAC addresses needed (1 to 5)
8188 * @mac: the MAC addresses of the VI
8189 * @rss_size: size of RSS table slice associated with this VI
8190 *
8191 * backwards compatible and convieniance routine to allocate a Virtual
8192 * Interface with a Ethernet Port Application Function and Intrustion
8193 * Detection System disabled.
8194 */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin)8195 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
8196 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
8197 unsigned int *rss_size, u8 *vivld, u8 *vin)
8198 {
8199 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
8200 vivld, vin, FW_VI_FUNC_ETH, 0);
8201 }
8202
8203
8204 /**
8205 * t4_free_vi - free a virtual interface
8206 * @adap: the adapter
8207 * @mbox: mailbox to use for the FW command
8208 * @pf: the PF owning the VI
8209 * @vf: the VF owning the VI
8210 * @viid: virtual interface identifiler
8211 *
8212 * Free a previously allocated virtual interface.
8213 */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)8214 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8215 unsigned int vf, unsigned int viid)
8216 {
8217 struct fw_vi_cmd c;
8218
8219 memset(&c, 0, sizeof(c));
8220 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8221 F_FW_CMD_REQUEST |
8222 F_FW_CMD_EXEC |
8223 V_FW_VI_CMD_PFN(pf) |
8224 V_FW_VI_CMD_VFN(vf));
8225 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8226 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8227
8228 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8229 }
8230
8231 /**
8232 * t4_set_rxmode - set Rx properties of a virtual interface
8233 * @adap: the adapter
8234 * @mbox: mailbox to use for the FW command
8235 * @viid: the VI id
8236 * @mtu: the new MTU or -1
8237 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8238 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8239 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8240 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8241 * @sleep_ok: if true we may sleep while awaiting command completion
8242 *
8243 * Sets Rx properties of a virtual interface.
8244 */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)8245 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8246 int mtu, int promisc, int all_multi, int bcast, int vlanex,
8247 bool sleep_ok)
8248 {
8249 struct fw_vi_rxmode_cmd c;
8250
8251 /* convert to FW values */
8252 if (mtu < 0)
8253 mtu = M_FW_VI_RXMODE_CMD_MTU;
8254 if (promisc < 0)
8255 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8256 if (all_multi < 0)
8257 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8258 if (bcast < 0)
8259 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8260 if (vlanex < 0)
8261 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8262
8263 memset(&c, 0, sizeof(c));
8264 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8265 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8266 V_FW_VI_RXMODE_CMD_VIID(viid));
8267 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8268 c.mtu_to_vlanexen =
8269 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8270 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8271 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8272 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8273 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8274 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8275 }
8276
8277 /**
8278 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
8279 * @adap: the adapter
8280 * @viid: the VI id
8281 * @mac: the MAC address
8282 * @mask: the mask
8283 * @vni: the VNI id for the tunnel protocol
8284 * @vni_mask: mask for the VNI id
8285 * @dip_hit: to enable DIP match for the MPS entry
8286 * @lookup_type: MAC address for inner (1) or outer (0) header
8287 * @sleep_ok: call is allowed to sleep
8288 *
8289 * Allocates an MPS entry with specified MAC address and VNI value.
8290 *
8291 * Returns a negative error number or the allocated index for this mac.
8292 */
t4_alloc_encap_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int vni,unsigned int vni_mask,u8 dip_hit,u8 lookup_type,bool sleep_ok)8293 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
8294 const u8 *addr, const u8 *mask, unsigned int vni,
8295 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
8296 bool sleep_ok)
8297 {
8298 struct fw_vi_mac_cmd c;
8299 struct fw_vi_mac_vni *p = c.u.exact_vni;
8300 int ret = 0;
8301 u32 val;
8302
8303 memset(&c, 0, sizeof(c));
8304 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8305 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8306 V_FW_VI_MAC_CMD_VIID(viid));
8307 val = V_FW_CMD_LEN16(1) |
8308 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
8309 c.freemacs_to_len16 = cpu_to_be32(val);
8310 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8311 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8312 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8313 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
8314
8315 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
8316 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
8317 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
8318 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
8319
8320 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8321 if (ret == 0)
8322 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8323 return ret;
8324 }
8325
8326 /**
8327 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8328 * @adap: the adapter
8329 * @viid: the VI id
8330 * @mac: the MAC address
8331 * @mask: the mask
8332 * @idx: index at which to add this entry
8333 * @port_id: the port index
8334 * @lookup_type: MAC address for inner (1) or outer (0) header
8335 * @sleep_ok: call is allowed to sleep
8336 *
8337 * Adds the mac entry at the specified index using raw mac interface.
8338 *
8339 * Returns a negative error number or the allocated index for this mac.
8340 */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)8341 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8342 const u8 *addr, const u8 *mask, unsigned int idx,
8343 u8 lookup_type, u8 port_id, bool sleep_ok)
8344 {
8345 int ret = 0;
8346 struct fw_vi_mac_cmd c;
8347 struct fw_vi_mac_raw *p = &c.u.raw;
8348 u32 val;
8349
8350 memset(&c, 0, sizeof(c));
8351 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8352 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8353 V_FW_VI_MAC_CMD_VIID(viid));
8354 val = V_FW_CMD_LEN16(1) |
8355 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8356 c.freemacs_to_len16 = cpu_to_be32(val);
8357
8358 /* Specify that this is an inner mac address */
8359 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8360
8361 /* Lookup Type. Outer header: 0, Inner header: 1 */
8362 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8363 V_DATAPORTNUM(port_id));
8364 /* Lookup mask and port mask */
8365 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8366 V_DATAPORTNUM(M_DATAPORTNUM));
8367
8368 /* Copy the address and the mask */
8369 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8370 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8371
8372 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8373 if (ret == 0) {
8374 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8375 if (ret != idx)
8376 ret = -ENOMEM;
8377 }
8378
8379 return ret;
8380 }
8381
8382 /**
8383 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8384 * @adap: the adapter
8385 * @mbox: mailbox to use for the FW command
8386 * @viid: the VI id
8387 * @free: if true any existing filters for this VI id are first removed
8388 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8389 * @addr: the MAC address(es)
8390 * @idx: where to store the index of each allocated filter
8391 * @hash: pointer to hash address filter bitmap
8392 * @sleep_ok: call is allowed to sleep
8393 *
8394 * Allocates an exact-match filter for each of the supplied addresses and
8395 * sets it to the corresponding address. If @idx is not %NULL it should
8396 * have at least @naddr entries, each of which will be set to the index of
8397 * the filter allocated for the corresponding MAC address. If a filter
8398 * could not be allocated for an address its index is set to 0xffff.
8399 * If @hash is not %NULL addresses that fail to allocate an exact filter
8400 * are hashed and update the hash filter bitmap pointed at by @hash.
8401 *
8402 * Returns a negative error number or the number of filters allocated.
8403 */
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)8404 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8405 unsigned int viid, bool free, unsigned int naddr,
8406 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8407 {
8408 int offset, ret = 0;
8409 struct fw_vi_mac_cmd c;
8410 unsigned int nfilters = 0;
8411 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8412 unsigned int rem = naddr;
8413
8414 if (naddr > max_naddr)
8415 return -EINVAL;
8416
8417 for (offset = 0; offset < naddr ; /**/) {
8418 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8419 ? rem
8420 : ARRAY_SIZE(c.u.exact));
8421 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8422 u.exact[fw_naddr]), 16);
8423 struct fw_vi_mac_exact *p;
8424 int i;
8425
8426 memset(&c, 0, sizeof(c));
8427 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8428 F_FW_CMD_REQUEST |
8429 F_FW_CMD_WRITE |
8430 V_FW_CMD_EXEC(free) |
8431 V_FW_VI_MAC_CMD_VIID(viid));
8432 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8433 V_FW_CMD_LEN16(len16));
8434
8435 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8436 p->valid_to_idx =
8437 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8438 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8439 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8440 }
8441
8442 /*
8443 * It's okay if we run out of space in our MAC address arena.
8444 * Some of the addresses we submit may get stored so we need
8445 * to run through the reply to see what the results were ...
8446 */
8447 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8448 if (ret && ret != -FW_ENOMEM)
8449 break;
8450
8451 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8452 u16 index = G_FW_VI_MAC_CMD_IDX(
8453 be16_to_cpu(p->valid_to_idx));
8454
8455 if (idx)
8456 idx[offset+i] = (index >= max_naddr
8457 ? 0xffff
8458 : index);
8459 if (index < max_naddr)
8460 nfilters++;
8461 else if (hash)
8462 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8463 }
8464
8465 free = false;
8466 offset += fw_naddr;
8467 rem -= fw_naddr;
8468 }
8469
8470 if (ret == 0 || ret == -FW_ENOMEM)
8471 ret = nfilters;
8472 return ret;
8473 }
8474
8475 /**
8476 * t4_free_encap_mac_filt - frees MPS entry at given index
8477 * @adap: the adapter
8478 * @viid: the VI id
8479 * @idx: index of MPS entry to be freed
8480 * @sleep_ok: call is allowed to sleep
8481 *
8482 * Frees the MPS entry at supplied index
8483 *
8484 * Returns a negative error number or zero on success
8485 */
t4_free_encap_mac_filt(struct adapter * adap,unsigned int viid,int idx,bool sleep_ok)8486 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
8487 int idx, bool sleep_ok)
8488 {
8489 struct fw_vi_mac_exact *p;
8490 struct fw_vi_mac_cmd c;
8491 u8 addr[] = {0,0,0,0,0,0};
8492 int ret = 0;
8493 u32 exact;
8494
8495 memset(&c, 0, sizeof(c));
8496 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8497 F_FW_CMD_REQUEST |
8498 F_FW_CMD_WRITE |
8499 V_FW_CMD_EXEC(0) |
8500 V_FW_VI_MAC_CMD_VIID(viid));
8501 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
8502 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8503 exact |
8504 V_FW_CMD_LEN16(1));
8505 p = c.u.exact;
8506 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8507 V_FW_VI_MAC_CMD_IDX(idx));
8508 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8509
8510 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8511 return ret;
8512 }
8513
8514 /**
8515 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
8516 * @adap: the adapter
8517 * @viid: the VI id
8518 * @addr: the MAC address
8519 * @mask: the mask
8520 * @idx: index of the entry in mps tcam
8521 * @lookup_type: MAC address for inner (1) or outer (0) header
8522 * @port_id: the port index
8523 * @sleep_ok: call is allowed to sleep
8524 *
8525 * Removes the mac entry at the specified index using raw mac interface.
8526 *
8527 * Returns a negative error number on failure.
8528 */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)8529 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
8530 const u8 *addr, const u8 *mask, unsigned int idx,
8531 u8 lookup_type, u8 port_id, bool sleep_ok)
8532 {
8533 struct fw_vi_mac_cmd c;
8534 struct fw_vi_mac_raw *p = &c.u.raw;
8535 u32 raw;
8536
8537 memset(&c, 0, sizeof(c));
8538 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8539 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8540 V_FW_CMD_EXEC(0) |
8541 V_FW_VI_MAC_CMD_VIID(viid));
8542 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8543 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8544 raw |
8545 V_FW_CMD_LEN16(1));
8546
8547 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
8548 FW_VI_MAC_ID_BASED_FREE);
8549
8550 /* Lookup Type. Outer header: 0, Inner header: 1 */
8551 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8552 V_DATAPORTNUM(port_id));
8553 /* Lookup mask and port mask */
8554 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8555 V_DATAPORTNUM(M_DATAPORTNUM));
8556
8557 /* Copy the address and the mask */
8558 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8559 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8560
8561 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8562 }
8563
8564 /**
8565 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8566 * @adap: the adapter
8567 * @mbox: mailbox to use for the FW command
8568 * @viid: the VI id
8569 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8570 * @addr: the MAC address(es)
8571 * @sleep_ok: call is allowed to sleep
8572 *
8573 * Frees the exact-match filter for each of the supplied addresses
8574 *
8575 * Returns a negative error number or the number of filters freed.
8576 */
t4_free_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)8577 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8578 unsigned int viid, unsigned int naddr,
8579 const u8 **addr, bool sleep_ok)
8580 {
8581 int offset, ret = 0;
8582 struct fw_vi_mac_cmd c;
8583 unsigned int nfilters = 0;
8584 unsigned int max_naddr = is_t4(adap->params.chip) ?
8585 NUM_MPS_CLS_SRAM_L_INSTANCES :
8586 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8587 unsigned int rem = naddr;
8588
8589 if (naddr > max_naddr)
8590 return -EINVAL;
8591
8592 for (offset = 0; offset < (int)naddr ; /**/) {
8593 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8594 ? rem
8595 : ARRAY_SIZE(c.u.exact));
8596 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8597 u.exact[fw_naddr]), 16);
8598 struct fw_vi_mac_exact *p;
8599 int i;
8600
8601 memset(&c, 0, sizeof(c));
8602 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8603 F_FW_CMD_REQUEST |
8604 F_FW_CMD_WRITE |
8605 V_FW_CMD_EXEC(0) |
8606 V_FW_VI_MAC_CMD_VIID(viid));
8607 c.freemacs_to_len16 =
8608 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8609 V_FW_CMD_LEN16(len16));
8610
8611 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8612 p->valid_to_idx = cpu_to_be16(
8613 F_FW_VI_MAC_CMD_VALID |
8614 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8615 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8616 }
8617
8618 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8619 if (ret)
8620 break;
8621
8622 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8623 u16 index = G_FW_VI_MAC_CMD_IDX(
8624 be16_to_cpu(p->valid_to_idx));
8625
8626 if (index < max_naddr)
8627 nfilters++;
8628 }
8629
8630 offset += fw_naddr;
8631 rem -= fw_naddr;
8632 }
8633
8634 if (ret == 0)
8635 ret = nfilters;
8636 return ret;
8637 }
8638
8639 /**
8640 * t4_change_mac - modifies the exact-match filter for a MAC address
8641 * @adap: the adapter
8642 * @mbox: mailbox to use for the FW command
8643 * @viid: the VI id
8644 * @idx: index of existing filter for old value of MAC address, or -1
8645 * @addr: the new MAC address value
8646 * @persist: whether a new MAC allocation should be persistent
8647 * @add_smt: if true also add the address to the HW SMT
8648 *
8649 * Modifies an exact-match filter and sets it to the new MAC address if
8650 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
8651 * latter case the address is added persistently if @persist is %true.
8652 *
8653 * Note that in general it is not possible to modify the value of a given
8654 * filter so the generic way to modify an address filter is to free the one
8655 * being used by the old address value and allocate a new filter for the
8656 * new address value.
8657 *
8658 * Returns a negative error number or the index of the filter with the new
8659 * MAC value. Note that this index may differ from @idx.
8660 */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx)8661 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8662 int idx, const u8 *addr, bool persist, u8 *smt_idx)
8663 {
8664 /* This will add this mac address to the destination TCAM region */
8665 return t4_add_mac(adap, mbox, viid, idx, addr, persist, smt_idx, 0);
8666 }
8667
8668 /**
8669 * t4_set_addr_hash - program the MAC inexact-match hash filter
8670 * @adap: the adapter
8671 * @mbox: mailbox to use for the FW command
8672 * @viid: the VI id
8673 * @ucast: whether the hash filter should also match unicast addresses
8674 * @vec: the value to be written to the hash filter
8675 * @sleep_ok: call is allowed to sleep
8676 *
8677 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8678 */
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)8679 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8680 bool ucast, u64 vec, bool sleep_ok)
8681 {
8682 struct fw_vi_mac_cmd c;
8683 u32 val;
8684
8685 memset(&c, 0, sizeof(c));
8686 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8687 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8688 V_FW_VI_ENABLE_CMD_VIID(viid));
8689 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8690 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8691 c.freemacs_to_len16 = cpu_to_be32(val);
8692 c.u.hash.hashvec = cpu_to_be64(vec);
8693 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8694 }
8695
8696 /**
8697 * t4_enable_vi_params - enable/disable a virtual interface
8698 * @adap: the adapter
8699 * @mbox: mailbox to use for the FW command
8700 * @viid: the VI id
8701 * @rx_en: 1=enable Rx, 0=disable Rx
8702 * @tx_en: 1=enable Tx, 0=disable Tx
8703 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8704 *
8705 * Enables/disables a virtual interface. Note that setting DCB Enable
8706 * only makes sense when enabling a Virtual Interface ...
8707 */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)8708 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8709 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8710 {
8711 struct fw_vi_enable_cmd c;
8712
8713 memset(&c, 0, sizeof(c));
8714 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8715 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8716 V_FW_VI_ENABLE_CMD_VIID(viid));
8717 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8718 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8719 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8720 FW_LEN16(c));
8721 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8722 }
8723
8724 /**
8725 * t4_enable_vi - enable/disable a virtual interface
8726 * @adap: the adapter
8727 * @mbox: mailbox to use for the FW command
8728 * @viid: the VI id
8729 * @rx_en: 1=enable Rx, 0=disable Rx
8730 * @tx_en: 1=enable Tx, 0=disable Tx
8731 *
8732 * Enables/disables a virtual interface. Note that setting DCB Enable
8733 * only makes sense when enabling a Virtual Interface ...
8734 */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)8735 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8736 bool rx_en, bool tx_en)
8737 {
8738 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8739 }
8740
8741 /**
8742 * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8743 * @adap: the adapter
8744 * @mbox: mailbox to use for the FW command
8745 * @pi: the Port Information structure
8746 * @rx_en: 1=enable Rx, 0=disable Rx
8747 * @tx_en: 1=enable Tx, 0=disable Tx
8748 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8749 *
8750 * Enables/disables a Port's Virtual Interface. Note that setting DCB
8751 * Enable only makes sense when enabling a Virtual Interface ...
8752 * If the Virtual Interface enable/disable operation is successful,
8753 * we notify the OS-specific code of a potential Link Status change
8754 * via the OS Contract API t4_os_link_changed().
8755 */
t4_enable_pi_params(struct adapter * adap,unsigned int mbox,struct port_info * pi,bool rx_en,bool tx_en,bool dcb_en)8756 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8757 struct port_info *pi,
8758 bool rx_en, bool tx_en, bool dcb_en)
8759 {
8760 int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8761 rx_en, tx_en, dcb_en);
8762 if (ret)
8763 return ret;
8764 t4_os_link_changed(adap, pi->port_id,
8765 rx_en && tx_en && pi->link_cfg.link_ok);
8766 return 0;
8767 }
8768
8769 /**
8770 * t4_identify_port - identify a VI's port by blinking its LED
8771 * @adap: the adapter
8772 * @mbox: mailbox to use for the FW command
8773 * @viid: the VI id
8774 * @nblinks: how many times to blink LED at 2.5 Hz
8775 *
8776 * Identifies a VI's port by blinking its LED.
8777 */
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)8778 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8779 unsigned int nblinks)
8780 {
8781 struct fw_vi_enable_cmd c;
8782
8783 memset(&c, 0, sizeof(c));
8784 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8785 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8786 V_FW_VI_ENABLE_CMD_VIID(viid));
8787 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8788 c.blinkdur = cpu_to_be16(nblinks);
8789 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8790 }
8791
8792 /**
8793 * t4_iq_stop - stop an ingress queue and its FLs
8794 * @adap: the adapter
8795 * @mbox: mailbox to use for the FW command
8796 * @pf: the PF owning the queues
8797 * @vf: the VF owning the queues
8798 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8799 * @iqid: ingress queue id
8800 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8801 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8802 *
8803 * Stops an ingress queue and its associated FLs, if any. This causes
8804 * any current or future data/messages destined for these queues to be
8805 * tossed.
8806 */
t4_iq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8807 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8808 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8809 unsigned int fl0id, unsigned int fl1id)
8810 {
8811 struct fw_iq_cmd c;
8812
8813 memset(&c, 0, sizeof(c));
8814 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8815 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8816 V_FW_IQ_CMD_VFN(vf));
8817 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8818 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8819 c.iqid = cpu_to_be16(iqid);
8820 c.fl0id = cpu_to_be16(fl0id);
8821 c.fl1id = cpu_to_be16(fl1id);
8822 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8823 }
8824
8825 /**
8826 * t4_iq_free - free an ingress queue and its FLs
8827 * @adap: the adapter
8828 * @mbox: mailbox to use for the FW command
8829 * @pf: the PF owning the queues
8830 * @vf: the VF owning the queues
8831 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8832 * @iqid: ingress queue id
8833 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8834 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8835 *
8836 * Frees an ingress queue and its associated FLs, if any.
8837 */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8838 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8839 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8840 unsigned int fl0id, unsigned int fl1id)
8841 {
8842 struct fw_iq_cmd c;
8843
8844 memset(&c, 0, sizeof(c));
8845 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8846 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8847 V_FW_IQ_CMD_VFN(vf));
8848 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8849 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8850 c.iqid = cpu_to_be16(iqid);
8851 c.fl0id = cpu_to_be16(fl0id);
8852 c.fl1id = cpu_to_be16(fl1id);
8853 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8854 }
8855
8856 /**
8857 * t4_eth_eq_free - free an Ethernet egress queue
8858 * @adap: the adapter
8859 * @mbox: mailbox to use for the FW command
8860 * @pf: the PF owning the queue
8861 * @vf: the VF owning the queue
8862 * @eqid: egress queue id
8863 *
8864 * Frees an Ethernet egress queue.
8865 */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8866 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8867 unsigned int vf, unsigned int eqid)
8868 {
8869 struct fw_eq_eth_cmd c;
8870
8871 memset(&c, 0, sizeof(c));
8872 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8873 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8874 V_FW_EQ_ETH_CMD_PFN(pf) |
8875 V_FW_EQ_ETH_CMD_VFN(vf));
8876 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8877 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8878 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8879 }
8880
8881 /**
8882 * t4_ctrl_eq_free - free a control egress queue
8883 * @adap: the adapter
8884 * @mbox: mailbox to use for the FW command
8885 * @pf: the PF owning the queue
8886 * @vf: the VF owning the queue
8887 * @eqid: egress queue id
8888 *
8889 * Frees a control egress queue.
8890 */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8891 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8892 unsigned int vf, unsigned int eqid)
8893 {
8894 struct fw_eq_ctrl_cmd c;
8895
8896 memset(&c, 0, sizeof(c));
8897 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8898 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8899 V_FW_EQ_CTRL_CMD_PFN(pf) |
8900 V_FW_EQ_CTRL_CMD_VFN(vf));
8901 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8902 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8903 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8904 }
8905
8906 /**
8907 * t4_ofld_eq_free - free an offload egress queue
8908 * @adap: the adapter
8909 * @mbox: mailbox to use for the FW command
8910 * @pf: the PF owning the queue
8911 * @vf: the VF owning the queue
8912 * @eqid: egress queue id
8913 *
8914 * Frees a control egress queue.
8915 */
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8916 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8917 unsigned int vf, unsigned int eqid)
8918 {
8919 struct fw_eq_ofld_cmd c;
8920
8921 memset(&c, 0, sizeof(c));
8922 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8923 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8924 V_FW_EQ_OFLD_CMD_PFN(pf) |
8925 V_FW_EQ_OFLD_CMD_VFN(vf));
8926 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8927 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8928 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8929 }
8930
8931 /**
8932 * Return the highest speed set in the port capabilities, in Mb/s.
8933 */
t4_link_fwcap_to_speed(fw_port_cap32_t caps)8934 unsigned int t4_link_fwcap_to_speed(fw_port_cap32_t caps)
8935 {
8936 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8937 do { \
8938 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8939 return __speed; \
8940 } while (0)
8941
8942 TEST_SPEED_RETURN(400G, 400000);
8943 TEST_SPEED_RETURN(200G, 200000);
8944 TEST_SPEED_RETURN(100G, 100000);
8945 TEST_SPEED_RETURN(50G, 50000);
8946 TEST_SPEED_RETURN(40G, 40000);
8947 TEST_SPEED_RETURN(25G, 25000);
8948 TEST_SPEED_RETURN(10G, 10000);
8949 TEST_SPEED_RETURN(1G, 1000);
8950 TEST_SPEED_RETURN(100M, 100);
8951
8952 #undef TEST_SPEED_RETURN
8953
8954 return 0;
8955 }
8956
8957 /**
8958 * t4_link_fwcap_to_fwspeed - return highest speed in Port Capabilities
8959 * @acaps: advertised Port Capabilities
8960 *
8961 * Get the highest speed for the port from the advertised Port
8962 * Capabilities. It will be either the highest speed from the list of
8963 * speeds or whatever user has set using ethtool.
8964 */
t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps)8965 fw_port_cap32_t t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps)
8966 {
8967 #define TEST_SPEED_RETURN(__caps_speed) \
8968 do { \
8969 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8970 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8971 } while (0)
8972
8973 TEST_SPEED_RETURN(400G);
8974 TEST_SPEED_RETURN(200G);
8975 TEST_SPEED_RETURN(100G);
8976 TEST_SPEED_RETURN(50G);
8977 TEST_SPEED_RETURN(40G);
8978 TEST_SPEED_RETURN(25G);
8979 TEST_SPEED_RETURN(10G);
8980 TEST_SPEED_RETURN(1G);
8981 TEST_SPEED_RETURN(100M);
8982
8983 #undef TEST_SPEED_RETURN
8984
8985 return 0;
8986 }
8987
8988 /**
8989 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
8990 * @caps16: a 16-bit Port Capabilities value
8991 *
8992 * Returns the equivalent 32-bit Port Capabilities value.
8993 */
fwcaps16_to_caps32(fw_port_cap16_t caps16)8994 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
8995 {
8996 fw_port_cap32_t caps32 = 0;
8997
8998 #define CAP16_TO_CAP32(__cap) \
8999 do { \
9000 if (caps16 & FW_PORT_CAP_##__cap) \
9001 caps32 |= FW_PORT_CAP32_##__cap; \
9002 } while (0)
9003
9004 CAP16_TO_CAP32(SPEED_100M);
9005 CAP16_TO_CAP32(SPEED_1G);
9006 CAP16_TO_CAP32(SPEED_25G);
9007 CAP16_TO_CAP32(SPEED_10G);
9008 CAP16_TO_CAP32(SPEED_40G);
9009 CAP16_TO_CAP32(SPEED_100G);
9010 CAP16_TO_CAP32(FC_RX);
9011 CAP16_TO_CAP32(FC_TX);
9012 CAP16_TO_CAP32(ANEG);
9013 CAP16_TO_CAP32(FORCE_PAUSE);
9014 CAP16_TO_CAP32(MDIAUTO);
9015 CAP16_TO_CAP32(MDISTRAIGHT);
9016 CAP16_TO_CAP32(FEC_RS);
9017 CAP16_TO_CAP32(FEC_BASER_RS);
9018 CAP16_TO_CAP32(802_3_PAUSE);
9019 CAP16_TO_CAP32(802_3_ASM_DIR);
9020
9021 #undef CAP16_TO_CAP32
9022
9023 return caps32;
9024 }
9025
9026 /**
9027 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
9028 * @caps32: a 32-bit Port Capabilities value
9029 *
9030 * Returns the equivalent 16-bit Port Capabilities value. Note that
9031 * not all 32-bit Port Capabilities can be represented in the 16-bit
9032 * Port Capabilities and some fields/values may not make it.
9033 */
fwcaps32_to_caps16(fw_port_cap32_t caps32)9034 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
9035 {
9036 fw_port_cap16_t caps16 = 0;
9037
9038 #define CAP32_TO_CAP16(__cap) \
9039 do { \
9040 if (caps32 & FW_PORT_CAP32_##__cap) \
9041 caps16 |= FW_PORT_CAP_##__cap; \
9042 } while (0)
9043
9044 CAP32_TO_CAP16(SPEED_100M);
9045 CAP32_TO_CAP16(SPEED_1G);
9046 CAP32_TO_CAP16(SPEED_10G);
9047 CAP32_TO_CAP16(SPEED_25G);
9048 CAP32_TO_CAP16(SPEED_40G);
9049 CAP32_TO_CAP16(SPEED_100G);
9050 CAP32_TO_CAP16(FC_RX);
9051 CAP32_TO_CAP16(FC_TX);
9052 CAP32_TO_CAP16(802_3_PAUSE);
9053 CAP32_TO_CAP16(802_3_ASM_DIR);
9054 CAP32_TO_CAP16(ANEG);
9055 CAP32_TO_CAP16(FORCE_PAUSE);
9056 CAP32_TO_CAP16(MDIAUTO);
9057 CAP32_TO_CAP16(MDISTRAIGHT);
9058 CAP32_TO_CAP16(FEC_RS);
9059 CAP32_TO_CAP16(FEC_BASER_RS);
9060
9061 #undef CAP32_TO_CAP16
9062
9063 return caps16;
9064 }
9065
t4_link_set_autoneg(struct port_info * pi,u8 autoneg,fw_port_cap32_t * new_caps)9066 int t4_link_set_autoneg(struct port_info *pi, u8 autoneg,
9067 fw_port_cap32_t *new_caps)
9068 {
9069 struct link_config *lc = &pi->link_cfg;
9070 fw_port_cap32_t caps = *new_caps;
9071
9072 if (autoneg) {
9073 if (!(lc->pcaps & FW_PORT_CAP32_ANEG))
9074 return -ENOTSUP;
9075
9076 caps |= FW_PORT_CAP32_ANEG;
9077 } else {
9078 caps &= ~FW_PORT_CAP32_ANEG;
9079 }
9080
9081 caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI);
9082 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO)
9083 caps |= FW_PORT_CAP32_MDIAUTO;
9084
9085 *new_caps = caps;
9086 return 0;
9087 }
9088
t4_link_set_pause(struct port_info * pi,cc_pause_t pause,fw_port_cap32_t * new_caps)9089 void t4_link_set_pause(struct port_info *pi, cc_pause_t pause,
9090 fw_port_cap32_t *new_caps)
9091 {
9092 struct link_config *lc = &pi->link_cfg;
9093 fw_port_cap32_t caps = *new_caps;
9094
9095 caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
9096 caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
9097
9098 if ((pause & PAUSE_TX) && (pause & PAUSE_RX)) {
9099 caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX;
9100 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
9101 caps |= FW_PORT_CAP32_802_3_PAUSE;
9102 } else if (pause & PAUSE_TX) {
9103 caps |= FW_PORT_CAP32_FC_TX;
9104 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
9105 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
9106 } else if (pause & PAUSE_RX) {
9107 caps |= FW_PORT_CAP32_FC_RX;
9108 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
9109 caps |= FW_PORT_CAP32_802_3_PAUSE;
9110 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
9111 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
9112 }
9113
9114 if (!(pause & PAUSE_AUTONEG))
9115 caps |= FW_PORT_CAP32_FORCE_PAUSE;
9116
9117 *new_caps = caps;
9118 }
9119
9120 #define T4_LINK_FEC_MASK V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)
9121
t4_link_supported_speed_to_fec(u32 speed)9122 static fw_port_cap32_t t4_link_supported_speed_to_fec(u32 speed)
9123 {
9124 fw_port_cap32_t caps = 0;
9125
9126 switch (speed) {
9127 case 100000:
9128 caps |= FW_PORT_CAP32_FEC_RS;
9129 break;
9130 case 50000:
9131 caps |= FW_PORT_CAP32_FEC_BASER_RS;
9132 break;
9133 case 25000:
9134 caps |= FW_PORT_CAP32_FEC_RS |
9135 FW_PORT_CAP32_FEC_BASER_RS;
9136 break;
9137 default:
9138 break;
9139 }
9140
9141 caps |= FW_PORT_CAP32_FEC_NO_FEC;
9142 return caps;
9143 }
9144
t4_link_update_fec(struct port_info * pi,u32 max_speed,cc_fec_t fec,fw_port_cap32_t * new_caps)9145 static void t4_link_update_fec(struct port_info *pi, u32 max_speed,
9146 cc_fec_t fec, fw_port_cap32_t *new_caps)
9147 {
9148 fw_port_cap32_t caps = *new_caps;
9149
9150 caps &= ~T4_LINK_FEC_MASK;
9151 if (fec & FEC_RS) {
9152 switch (max_speed) {
9153 case 100000:
9154 case 25000:
9155 caps |= FW_PORT_CAP32_FEC_RS;
9156 break;
9157 default:
9158 CH_ERR(pi->adapter,
9159 "Ignoring unsupported RS FEC for speed %u\n",
9160 max_speed);
9161 break;
9162 }
9163 }
9164
9165 if (fec & FEC_BASER_RS) {
9166 switch (max_speed) {
9167 case 50000:
9168 case 25000:
9169 caps |= FW_PORT_CAP32_FEC_BASER_RS;
9170 break;
9171 default:
9172 CH_ERR(pi->adapter,
9173 "Ignoring unsupported BASER FEC for speed %u\n",
9174 max_speed);
9175 break;
9176 }
9177 }
9178
9179 if (fec & FEC_NONE)
9180 caps |= FW_PORT_CAP32_FEC_NO_FEC;
9181
9182 if (!(caps & T4_LINK_FEC_MASK)) {
9183 /* No explicit encoding is requested.
9184 * So, default back to AUTO.
9185 */
9186 caps |= t4_link_supported_speed_to_fec(max_speed);
9187 caps &= ~FW_PORT_CAP32_FORCE_FEC;
9188 }
9189
9190 if (fec & FEC_FORCE)
9191 caps |= FW_PORT_CAP32_FORCE_FEC;
9192
9193 *new_caps = caps;
9194 }
9195
t4_link_set_fec(struct port_info * pi,cc_fec_t fec,fw_port_cap32_t * new_caps)9196 int t4_link_set_fec(struct port_info *pi, cc_fec_t fec,
9197 fw_port_cap32_t *new_caps)
9198 {
9199 struct link_config *lc = &pi->link_cfg;
9200 u32 max_speed;
9201
9202 if (!(lc->pcaps & T4_LINK_FEC_MASK))
9203 return -ENOTSUP;
9204
9205 max_speed = t4_link_fwcap_to_speed(lc->link_caps);
9206 /* Link might be down. In that case consider the max
9207 * speed advertised
9208 */
9209 if (!max_speed)
9210 max_speed = t4_link_fwcap_to_speed(lc->acaps);
9211
9212 t4_link_update_fec(pi, max_speed, fec, new_caps);
9213 return 0;
9214 }
9215
9216 #define T4_LINK_SPEED_MASK V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED)
9217
t4_link_set_speed(struct port_info * pi,fw_port_cap32_t speed,u8 en,fw_port_cap32_t * new_caps)9218 int t4_link_set_speed(struct port_info *pi, fw_port_cap32_t speed, u8 en,
9219 fw_port_cap32_t *new_caps)
9220 {
9221 fw_port_cap32_t tcaps, caps = *new_caps;
9222 struct link_config *lc = &pi->link_cfg;
9223
9224 if (((lc->pcaps & T4_LINK_SPEED_MASK) & speed) != speed)
9225 return -ENOTSUP;
9226
9227 if (en)
9228 caps |= speed;
9229 else
9230 caps &= ~speed;
9231
9232 /* If no speeds are left, then pick the next highest speed. */
9233 if (!(caps & T4_LINK_SPEED_MASK)) {
9234 tcaps = CAP32_SPEED(lc->pcaps);
9235 tcaps &= ~speed;
9236 tcaps &= (speed - 1);
9237 if (tcaps == 0)
9238 return -EINVAL;
9239
9240 caps |= t4_link_fwcap_to_fwspeed(tcaps);
9241 }
9242
9243 *new_caps = caps;
9244 return 0;
9245 }
9246
t4_link_sanitize_speed_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9247 static void t4_link_sanitize_speed_caps(struct link_config *lc,
9248 fw_port_cap32_t *new_caps)
9249 {
9250 fw_port_cap32_t tcaps, caps = *new_caps;
9251
9252 /* Sanitize Speeds when AN is disabled */
9253 if (!(caps & FW_PORT_CAP32_ANEG)) {
9254 tcaps = CAP32_SPEED(caps);
9255 caps &= ~T4_LINK_SPEED_MASK;
9256 caps |= t4_link_fwcap_to_fwspeed(tcaps);
9257 }
9258
9259 *new_caps = caps;
9260 }
9261
t4_link_sanitize_fec_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9262 static void t4_link_sanitize_fec_caps(struct link_config *lc,
9263 fw_port_cap32_t *new_caps)
9264 {
9265 fw_port_cap32_t tcaps, caps = *new_caps;
9266 u32 max_speed;
9267
9268 /* Sanitize FECs when supported */
9269 if (CAP32_FEC(lc->pcaps)) {
9270 max_speed = t4_link_fwcap_to_speed(caps);
9271 tcaps = t4_link_supported_speed_to_fec(max_speed);
9272 if (caps & FW_PORT_CAP32_FORCE_FEC) {
9273 /* If the current chosen FEC params are
9274 * completely invalid, then disable FEC.
9275 * Else, pick only the FECs requested
9276 * by user or the defaults supported by
9277 * the speed.
9278 */
9279 if (!(tcaps & CAP32_FEC(caps)))
9280 tcaps = FW_PORT_CAP32_FEC_NO_FEC;
9281 else
9282 tcaps &= CAP32_FEC(caps);
9283 }
9284 } else {
9285 /* Always force NO_FEC when FECs are not supported */
9286 tcaps = FW_PORT_CAP32_FEC_NO_FEC;
9287 }
9288
9289 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) {
9290 tcaps |= FW_PORT_CAP32_FORCE_FEC;
9291 } else {
9292 /* Older firmware doesn't allow driver to send request
9293 * to try multiple FECs for FEC_AUTO case. So, clear
9294 * the FEC caps for FEC_AUTO case because the older
9295 * firmware will try all supported FECs on its own.
9296 */
9297 caps &= ~FW_PORT_CAP32_FORCE_FEC;
9298 if (tcaps & (tcaps - 1))
9299 tcaps = 0;
9300 }
9301
9302 caps &= ~T4_LINK_FEC_MASK;
9303 caps |= tcaps;
9304
9305 *new_caps = caps;
9306 }
9307
t4_link_sanitize_caps(struct link_config * lc,fw_port_cap32_t * new_caps)9308 static void t4_link_sanitize_caps(struct link_config *lc,
9309 fw_port_cap32_t *new_caps)
9310 {
9311 fw_port_cap32_t caps = *new_caps;
9312
9313 t4_link_sanitize_speed_caps(lc, &caps);
9314 t4_link_sanitize_fec_caps(lc, &caps);
9315
9316 /* Remove all unsupported caps */
9317 if ((lc->pcaps | caps) != lc->pcaps)
9318 caps &= lc->pcaps;
9319
9320 *new_caps = caps;
9321 }
9322
9323 /**
9324 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
9325 * @adapter: the adapter
9326 * @mbox: the Firmware Mailbox to use
9327 * @port: the Port ID
9328 * @lc: the Port's Link Configuration
9329 * @rcap: new link configuration
9330 * @sleep_ok: if true we may sleep while awaiting command completion
9331 * @timeout: time to wait for command to finish before timing out
9332 * (negative implies @sleep_ok=false)
9333 *
9334 * Set up a port's MAC and PHY according to a desired link configuration.
9335 * - If the PHY can auto-negotiate first decide what to advertise, then
9336 * enable/disable auto-negotiation as desired, and reset.
9337 * - If the PHY does not auto-negotiate just reset it.
9338 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
9339 * otherwise do it later based on the outcome of auto-negotiation.
9340 */
t4_link_l1cfg_core(struct adapter * adapter,unsigned int mbox,unsigned int port,struct link_config * lc,fw_port_cap32_t rcap,bool sleep_ok,int timeout)9341 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
9342 unsigned int port, struct link_config *lc,
9343 fw_port_cap32_t rcap, bool sleep_ok, int timeout)
9344 {
9345 unsigned int fw_caps = adapter->params.fw_caps_support;
9346 struct fw_port_cmd cmd;
9347 int ret;
9348
9349 t4_link_sanitize_caps(lc, &rcap);
9350
9351 memset(&cmd, 0, sizeof(cmd));
9352 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9353 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9354 V_FW_PORT_CMD_PORTID(port));
9355 cmd.action_to_len16 =
9356 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9357 ? FW_PORT_ACTION_L1_CFG
9358 : FW_PORT_ACTION_L1_CFG32) |
9359 FW_LEN16(cmd));
9360 if (fw_caps == FW_CAPS16)
9361 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
9362 else
9363 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
9364 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
9365 sleep_ok, timeout);
9366
9367 /* Unfortunately, even if the Requested Port Capabilities "fit" within
9368 * the Physical Port Capabilities, some combinations of features may
9369 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward
9370 * Error Correction. So if the Firmware rejects the L1 Configure
9371 * request, flag that here.
9372 */
9373 if (ret) {
9374 CH_ERR(adapter,
9375 "Requested Port Capabilities 0x%x rejected, error %d\n",
9376 rcap, -ret);
9377 return ret;
9378 }
9379
9380 return 0;
9381 }
9382
9383 /**
9384 * t4_restart_aneg - restart autonegotiation
9385 * @adap: the adapter
9386 * @mbox: mbox to use for the FW command
9387 * @port: the port id
9388 *
9389 * Restarts autonegotiation for the selected port.
9390 */
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)9391 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
9392 {
9393 unsigned int fw_caps = adap->params.fw_caps_support;
9394 struct fw_port_cmd c;
9395
9396 memset(&c, 0, sizeof(c));
9397 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9398 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9399 V_FW_PORT_CMD_PORTID(port));
9400 c.action_to_len16 =
9401 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9402 ? FW_PORT_ACTION_L1_CFG
9403 : FW_PORT_ACTION_L1_CFG32) |
9404 FW_LEN16(c));
9405 if (fw_caps == FW_CAPS16)
9406 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
9407 else
9408 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
9409 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9410 }
9411
9412 /**
9413 * t4_init_link_config - initialize a link's SW state
9414 * @pi: the port info
9415 * @pcaps: link Port Capabilities
9416 * @acaps: link current Advertised Port Capabilities
9417 *
9418 * Initializes the SW state maintained for each link, including the link's
9419 * capabilities and default speed/flow-control/autonegotiation settings.
9420 */
t4_init_link_config(struct port_info * pi,fw_port_cap32_t pcaps,fw_port_cap32_t acaps)9421 static void t4_init_link_config(struct port_info *pi, fw_port_cap32_t pcaps,
9422 fw_port_cap32_t acaps)
9423 {
9424 u32 max_speed = t4_link_fwcap_to_speed(acaps);
9425 struct link_config *lc = &pi->link_cfg;
9426 fw_port_cap32_t new_caps = acaps;
9427
9428 /* If initializing for the first time or if port module changed,
9429 * then overwrite the saved link params with the new port module
9430 * caps.
9431 */
9432 if (lc->admin_caps == 0 || lc->pcaps != pcaps) {
9433 t4_link_update_fec(pi, max_speed, FEC_AUTO, &new_caps);
9434 lc->admin_caps = new_caps;
9435 }
9436
9437 lc->pcaps = pcaps;
9438 lc->acaps = acaps;
9439 lc->lpacaps = 0;
9440 lc->link_caps = 0;
9441 }
9442
9443 /**
9444 * t4_link_down_rc_str - return a string for a Link Down Reason Code
9445 * @link_down_rc: Link Down Reason Code
9446 *
9447 * Returns a string representation of the Link Down Reason Code.
9448 */
t4_link_down_rc_str(unsigned char link_down_rc)9449 const char *t4_link_down_rc_str(unsigned char link_down_rc)
9450 {
9451 static const char * const reason[] = {
9452 "Link Down",
9453 "Remote Fault",
9454 "Auto-negotiation Failure",
9455 "Reserved",
9456 "Insufficient Airflow",
9457 "Unable To Determine Reason",
9458 "No RX Signal Detected",
9459 "Reserved",
9460 };
9461
9462 if (link_down_rc >= ARRAY_SIZE(reason))
9463 return "Bad Reason Code";
9464
9465 return reason[link_down_rc];
9466 }
9467
9468 /**
9469 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
9470 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
9471 *
9472 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
9473 * 32-bit Port Capabilities value.
9474 */
lstatus_to_fwcap(u32 lstatus)9475 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
9476 {
9477 fw_port_cap32_t linkattr = 0;
9478
9479 /*
9480 * Unfortunately the format of the Link Status in the old
9481 * 16-bit Port Information message isn't the same as the
9482 * 16-bit Port Capabilities bitfield used everywhere else ...
9483 */
9484 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
9485 linkattr |= FW_PORT_CAP32_FC_RX;
9486 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
9487 linkattr |= FW_PORT_CAP32_FC_TX;
9488 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
9489 linkattr |= FW_PORT_CAP32_SPEED_100M;
9490 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
9491 linkattr |= FW_PORT_CAP32_SPEED_1G;
9492 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
9493 linkattr |= FW_PORT_CAP32_SPEED_10G;
9494 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
9495 linkattr |= FW_PORT_CAP32_SPEED_25G;
9496 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
9497 linkattr |= FW_PORT_CAP32_SPEED_40G;
9498 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
9499 linkattr |= FW_PORT_CAP32_SPEED_100G;
9500
9501 return linkattr;
9502 }
9503
9504 /**
9505 * t4_handle_get_port_info - process a FW reply message
9506 * @pi: the port info
9507 * @rpl: start of the FW message
9508 *
9509 * Processes a GET_PORT_INFO FW reply message.
9510 */
t4_handle_get_port_info(struct port_info * pi,const __be64 * rpl)9511 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
9512 {
9513 const struct fw_port_cmd *cmd = (const void *)rpl;
9514 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
9515 struct adapter *adapter = pi->adapter;
9516 struct link_config *lc = &pi->link_cfg;
9517 int link_ok, linkdnrc;
9518 enum fw_port_type port_type;
9519 enum fw_port_module_type mod_type;
9520 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
9521
9522 /*
9523 * Extract the various fields from the Port Information message.
9524 */
9525 switch (action) {
9526 case FW_PORT_ACTION_GET_PORT_INFO: {
9527 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
9528
9529 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
9530 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
9531 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
9532 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
9533 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
9534 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
9535 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
9536 linkattr = lstatus_to_fwcap(lstatus);
9537 break;
9538 }
9539
9540 case FW_PORT_ACTION_GET_PORT_INFO32: {
9541 u32 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
9542
9543 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
9544 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
9545 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
9546 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
9547 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
9548 acaps = be32_to_cpu(cmd->u.info32.acaps32);
9549 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
9550 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
9551 break;
9552 }
9553
9554 default:
9555 CH_ERR(adapter, "Handle Port Information: Bad Command/Action %#x\n",
9556 be32_to_cpu(cmd->action_to_len16));
9557 return;
9558 }
9559
9560 /*
9561 * Reset state for communicating new Transceiver Module status and
9562 * whether the OS-dependent layer wants us to redo the current
9563 * "sticky" L1 Configure Link Parameters.
9564 */
9565 lc->new_module = false;
9566 lc->redo_l1cfg = false;
9567
9568 if (mod_type != pi->mod_type) {
9569 /*
9570 * Some versions of the early T6 Firmware "cheated" when
9571 * handling different Transceiver Modules by changing the
9572 * underlaying Port Type reported to the Host Drivers. As
9573 * such we need to capture whatever Port Type the Firmware
9574 * sends us and record it in case it's different from what we
9575 * were told earlier. Unfortunately, since Firmware is
9576 * forever, we'll need to keep this code here forever, but in
9577 * later T6 Firmware it should just be an assignment of the
9578 * same value already recorded.
9579 */
9580 pi->port_type = port_type;
9581
9582 /*
9583 * Record new Module Type information.
9584 */
9585 pi->mod_type = mod_type;
9586
9587 /*
9588 * Let the OS-dependent layer know if we have a new
9589 * Transceiver Module inserted.
9590 */
9591 lc->new_module = t4_is_inserted_mod_type(mod_type);
9592
9593 if (lc->new_module)
9594 t4_init_link_config(pi, pcaps, acaps);
9595 t4_os_portmod_changed(adapter, pi->port_id);
9596 }
9597
9598 if (link_ok != lc->link_ok || acaps != lc->acaps ||
9599 lpacaps != lc->lpacaps || linkattr != lc->link_caps) {
9600 /* something changed */
9601 if (!link_ok && lc->link_ok) {
9602 lc->link_down_rc = linkdnrc;
9603 CH_WARN_RATELIMIT(adapter,
9604 "Port %d link down, reason: %s\n",
9605 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
9606 }
9607
9608 lc->link_ok = link_ok;
9609 lc->acaps = acaps;
9610 lc->lpacaps = lpacaps;
9611 lc->link_caps = linkattr;
9612
9613 t4_os_link_changed(adapter, pi->port_id, link_ok);
9614 }
9615
9616 /*
9617 * If we have a new Transceiver Module and the OS-dependent code has
9618 * told us that it wants us to redo whatever "sticky" L1 Configuration
9619 * Link Parameters are set, do that now.
9620 */
9621 if (lc->new_module && lc->redo_l1cfg) {
9622 int ret;
9623
9624 /*
9625 * Save the current L1 Configuration and restore it if an
9626 * error occurs. We probably should fix the l1_cfg*()
9627 * routines not to change the link_config when an error
9628 * occurs ...
9629 */
9630 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc,
9631 lc->admin_caps);
9632 if (ret) {
9633 CH_WARN(adapter,
9634 "Attempt to update new Transceiver Module settings failed\n");
9635 }
9636 }
9637 lc->new_module = false;
9638 lc->redo_l1cfg = false;
9639 }
9640
9641 /**
9642 * t4_update_port_info - retrieve and update port information if changed
9643 * @pi: the port_info
9644 *
9645 * We issue a Get Port Information Command to the Firmware and, if
9646 * successful, we check to see if anything is different from what we
9647 * last recorded and update things accordingly.
9648 */
t4_update_port_info(struct port_info * pi)9649 int t4_update_port_info(struct port_info *pi)
9650 {
9651 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9652 struct fw_port_cmd port_cmd;
9653 int ret;
9654
9655 memset(&port_cmd, 0, sizeof port_cmd);
9656 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9657 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9658 V_FW_PORT_CMD_PORTID(pi->lport));
9659 port_cmd.action_to_len16 = cpu_to_be32(
9660 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9661 ? FW_PORT_ACTION_GET_PORT_INFO
9662 : FW_PORT_ACTION_GET_PORT_INFO32) |
9663 FW_LEN16(port_cmd));
9664 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9665 &port_cmd, sizeof(port_cmd), &port_cmd);
9666 if (ret)
9667 return ret;
9668
9669 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
9670 return 0;
9671 }
9672
9673 /**
9674 * t4_get_link_params - retrieve basic link parameters for given port
9675 * @pi: the port
9676 * @link_okp: value return pointer for link up/down
9677 * @speedp: value return pointer for speed (Mb/s)
9678 * @mtup: value return pointer for mtu
9679 *
9680 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
9681 * and MTU for a specified port. A negative error is returned on
9682 * failure; 0 on success.
9683 */
t4_get_link_params(struct port_info * pi,unsigned int * link_okp,unsigned int * speedp,unsigned int * mtup)9684 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
9685 unsigned int *speedp, unsigned int *mtup)
9686 {
9687 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9688 struct fw_port_cmd port_cmd;
9689 unsigned int action, link_ok, mtu;
9690 fw_port_cap32_t linkattr;
9691 int ret;
9692
9693 memset(&port_cmd, 0, sizeof port_cmd);
9694 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9695 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9696 V_FW_PORT_CMD_PORTID(pi->tx_chan));
9697 action = (fw_caps == FW_CAPS16
9698 ? FW_PORT_ACTION_GET_PORT_INFO
9699 : FW_PORT_ACTION_GET_PORT_INFO32);
9700 port_cmd.action_to_len16 = cpu_to_be32(
9701 V_FW_PORT_CMD_ACTION(action) |
9702 FW_LEN16(port_cmd));
9703 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9704 &port_cmd, sizeof(port_cmd), &port_cmd);
9705 if (ret)
9706 return ret;
9707
9708 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
9709 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
9710
9711 link_ok = !!(lstatus & F_FW_PORT_CMD_LSTATUS);
9712 linkattr = lstatus_to_fwcap(lstatus);
9713 mtu = be16_to_cpu(port_cmd.u.info.mtu);;
9714 } else {
9715 u32 lstatus32 = be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
9716
9717 link_ok = !!(lstatus32 & F_FW_PORT_CMD_LSTATUS32);
9718 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
9719 mtu = G_FW_PORT_CMD_MTU32(
9720 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
9721 }
9722
9723 *link_okp = link_ok;
9724 *speedp = t4_link_fwcap_to_speed(linkattr);
9725 *mtup = mtu;
9726
9727 return 0;
9728 }
9729
9730 /**
9731 * t4_handle_fw_rpl - process a FW reply message
9732 * @adap: the adapter
9733 * @rpl: start of the FW message
9734 *
9735 * Processes a FW message, such as link state change messages.
9736 */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)9737 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
9738 {
9739 u8 opcode = *(const u8 *)rpl;
9740
9741 /*
9742 * This might be a port command ... this simplifies the following
9743 * conditionals ... We can get away with pre-dereferencing
9744 * action_to_len16 because it's in the first 16 bytes and all messages
9745 * will be at least that long.
9746 */
9747 const struct fw_port_cmd *p = (const void *)rpl;
9748 unsigned int action =
9749 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
9750
9751 if (opcode == FW_PORT_CMD &&
9752 (action == FW_PORT_ACTION_GET_PORT_INFO ||
9753 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
9754 int i;
9755 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
9756 struct port_info *pi = NULL;
9757
9758 for_each_port(adap, i) {
9759 pi = adap2pinfo(adap, i);
9760 if (pi->lport == chan)
9761 break;
9762 }
9763
9764 t4_handle_get_port_info(pi, rpl);
9765 } else {
9766 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9767 return -EINVAL;
9768 }
9769 return 0;
9770 }
9771
9772 /**
9773 * get_pci_mode - determine a card's PCI mode
9774 * @adapter: the adapter
9775 * @p: where to store the PCI settings
9776 *
9777 * Determines a card's PCI mode and associated parameters, such as speed
9778 * and width.
9779 */
get_pci_mode(struct adapter * adapter,struct pci_params * p)9780 static void get_pci_mode(struct adapter *adapter,
9781 struct pci_params *p)
9782 {
9783 u16 val;
9784 u32 pcie_cap;
9785
9786 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9787 if (pcie_cap) {
9788 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
9789 p->speed = val & PCI_EXP_LNKSTA_CLS;
9790 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
9791 }
9792 }
9793
9794 /**
9795 * t4_wait_dev_ready - wait till to reads of registers work
9796 *
9797 * Right after the device is RESET is can take a small amount of time
9798 * for it to respond to register reads. Until then, all reads will
9799 * return either 0xff...ff or 0xee...ee. Return an error if reads
9800 * don't work within a reasonable time frame.
9801 */
t4_wait_dev_ready(struct adapter * adapter)9802 int t4_wait_dev_ready(struct adapter *adapter)
9803 {
9804 u32 whoami;
9805
9806 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9807 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9808 return 0;
9809
9810 msleep(500);
9811 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9812 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9813 return 0;
9814
9815 CH_ERR(adapter, "Device didn't become ready for access, "
9816 "whoami = %#x\n", whoami);
9817 return -EIO;
9818 }
9819
9820 struct flash_desc {
9821 u32 vendor_and_model_id;
9822 u32 size_mb;
9823 };
9824
t4_get_flash_params(struct adapter * adapter)9825 int t4_get_flash_params(struct adapter *adapter)
9826 {
9827 /*
9828 * Table for non-standard supported Flash parts. Note, all Flash
9829 * parts must have 64KB sectors.
9830 */
9831 static struct flash_desc supported_flash[] = {
9832 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
9833 };
9834
9835 int ret;
9836 u32 flashid = 0;
9837 unsigned int part, manufacturer;
9838 unsigned int density, size = 0;
9839
9840
9841 /*
9842 * Issue a Read ID Command to the Flash part. We decode supported
9843 * Flash parts and their sizes from this. There's a newer Query
9844 * Command which can retrieve detailed geometry information but many
9845 * Flash parts don't support it.
9846 */
9847 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
9848 if (!ret)
9849 ret = sf1_read(adapter, 3, 0, 1, &flashid);
9850 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
9851 if (ret < 0)
9852 return ret;
9853
9854 /*
9855 * Check to see if it's one of our non-standard supported Flash parts.
9856 */
9857 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9858 if (supported_flash[part].vendor_and_model_id == flashid) {
9859 adapter->params.sf_size =
9860 supported_flash[part].size_mb;
9861 adapter->params.sf_nsec =
9862 adapter->params.sf_size / SF_SEC_SIZE;
9863 goto found;
9864 }
9865
9866 /*
9867 * Decode Flash part size. The code below looks repetative with
9868 * common encodings, but that's not guaranteed in the JEDEC
9869 * specification for the Read JADEC ID command. The only thing that
9870 * we're guaranteed by the JADEC specification is where the
9871 * Manufacturer ID is in the returned result. After that each
9872 * Manufacturer ~could~ encode things completely differently.
9873 * Note, all Flash parts must have 64KB sectors.
9874 */
9875 manufacturer = flashid & 0xff;
9876 switch (manufacturer) {
9877 case 0x20: { /* Micron/Numonix */
9878 /*
9879 * This Density -> Size decoding table is taken from Micron
9880 * Data Sheets.
9881 */
9882 density = (flashid >> 16) & 0xff;
9883 switch (density) {
9884 case 0x14: size = 1 << 20; break; /* 1MB */
9885 case 0x15: size = 1 << 21; break; /* 2MB */
9886 case 0x16: size = 1 << 22; break; /* 4MB */
9887 case 0x17: size = 1 << 23; break; /* 8MB */
9888 case 0x18: size = 1 << 24; break; /* 16MB */
9889 case 0x19: size = 1 << 25; break; /* 32MB */
9890 case 0x20: size = 1 << 26; break; /* 64MB */
9891 case 0x21: size = 1 << 27; break; /* 128MB */
9892 case 0x22: size = 1 << 28; break; /* 256MB */
9893 }
9894 break;
9895 }
9896
9897 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9898 /*
9899 * This Density -> Size decoding table is taken from ISSI
9900 * Data Sheets.
9901 */
9902 density = (flashid >> 16) & 0xff;
9903 switch (density) {
9904 case 0x16: size = 1 << 25; break; /* 32MB */
9905 case 0x17: size = 1 << 26; break; /* 64MB */
9906 }
9907 break;
9908 }
9909
9910 case 0xc2: { /* Macronix */
9911 /*
9912 * This Density -> Size decoding table is taken from Macronix
9913 * Data Sheets.
9914 */
9915 density = (flashid >> 16) & 0xff;
9916 switch (density) {
9917 case 0x17: size = 1 << 23; break; /* 8MB */
9918 case 0x18: size = 1 << 24; break; /* 16MB */
9919 }
9920 break;
9921 }
9922
9923 case 0xef: { /* Winbond */
9924 /*
9925 * This Density -> Size decoding table is taken from Winbond
9926 * Data Sheets.
9927 */
9928 density = (flashid >> 16) & 0xff;
9929 switch (density) {
9930 case 0x17: size = 1 << 23; break; /* 8MB */
9931 case 0x18: size = 1 << 24; break; /* 16MB */
9932 }
9933 break;
9934 }
9935 }
9936
9937 /*
9938 * If we didn't recognize the FLASH part, that's no real issue: the
9939 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9940 * use a FLASH part which is at least 4MB in size and has 64KB
9941 * sectors. The unrecognized FLASH part is likely to be much larger
9942 * than 4MB, but that's all we really need.
9943 */
9944 if (size == 0) {
9945 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
9946 size = 1 << 22;
9947 }
9948
9949 /*
9950 * Store decoded Flash size and fall through into vetting code.
9951 */
9952 adapter->params.sf_size = size;
9953 adapter->params.sf_nsec = size / SF_SEC_SIZE;
9954
9955 found:
9956 /*
9957 * We should ~probably~ reject adapters with FLASHes which are too
9958 * small but we have some legacy FPGAs with small FLASHes that we'd
9959 * still like to use. So instead we emit a scary message ...
9960 */
9961 if (adapter->params.sf_size < FLASH_MIN_SIZE)
9962 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9963 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
9964
9965 return 0;
9966 }
9967
set_pcie_completion_timeout(struct adapter * adapter,u8 range)9968 static void set_pcie_completion_timeout(struct adapter *adapter,
9969 u8 range)
9970 {
9971 u16 val;
9972 u32 pcie_cap;
9973
9974 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9975 if (pcie_cap) {
9976 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
9977 val &= 0xfff0;
9978 val |= range ;
9979 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
9980 }
9981 }
9982
9983 /**
9984 * t4_get_chip_type - Determine chip type from device ID
9985 * @adap: the adapter
9986 * @ver: adapter version
9987 */
t4_get_chip_type(struct adapter * adap,int ver)9988 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
9989 {
9990 enum chip_type chip = 0;
9991 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
9992
9993 /* Retrieve adapter's device ID */
9994 switch (ver) {
9995 case CHELSIO_T4_FPGA:
9996 chip |= CHELSIO_CHIP_FPGA;
9997 /*FALLTHROUGH*/
9998 case CHELSIO_T4:
9999 chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
10000 break;
10001 case CHELSIO_T5_FPGA:
10002 chip |= CHELSIO_CHIP_FPGA;
10003 /*FALLTHROUGH*/
10004 case CHELSIO_T5:
10005 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
10006 break;
10007 case CHELSIO_T6_FPGA:
10008 chip |= CHELSIO_CHIP_FPGA;
10009 /*FALLTHROUGH*/
10010 case CHELSIO_T6:
10011 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
10012 break;
10013 default:
10014 CH_ERR(adap, "Device %d is not supported\n",
10015 adap->params.pci.device_id);
10016 return -EINVAL;
10017 }
10018
10019 /* T4A1 chip is no longer supported */
10020 if (chip == T4_A1) {
10021 CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
10022 return -EINVAL;
10023 }
10024 return chip;
10025 }
10026
10027 /**
10028 * t4_prep_pf - prepare SW and HW for PF operation
10029 * @adapter: the adapter
10030 *
10031 * Initialize adapter SW state for the various HW modules, set initial
10032 * values for some adapter tunables on each PF.
10033 */
t4_prep_pf(struct adapter * adapter)10034 int t4_prep_pf(struct adapter *adapter)
10035 {
10036 int ret, ver;
10037
10038 ret = t4_wait_dev_ready(adapter);
10039 if (ret < 0)
10040 return ret;
10041
10042 get_pci_mode(adapter, &adapter->params.pci);
10043
10044
10045 /* Retrieve adapter's device ID
10046 */
10047 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
10048 t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
10049
10050 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
10051 adapter->params.chip = t4_get_chip_type(adapter, ver);
10052 if (is_t4(adapter->params.chip)) {
10053 adapter->params.arch.sge_fl_db = F_DBPRIO;
10054 adapter->params.arch.mps_tcam_size =
10055 NUM_MPS_CLS_SRAM_L_INSTANCES;
10056 adapter->params.arch.mps_rplc_size = 128;
10057 adapter->params.arch.nchan = NCHAN;
10058 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
10059 adapter->params.arch.vfcount = 128;
10060 /* Congestion map is for 4 channels so that
10061 * MPS can have 4 priority per port.
10062 */
10063 adapter->params.arch.cng_ch_bits_log = 2;
10064 } else if (is_t5(adapter->params.chip)) {
10065 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
10066 adapter->params.arch.mps_tcam_size =
10067 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
10068 adapter->params.arch.mps_rplc_size = 128;
10069 adapter->params.arch.nchan = NCHAN;
10070 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
10071 adapter->params.arch.vfcount = 128;
10072 adapter->params.arch.cng_ch_bits_log = 2;
10073 } else if (is_t6(adapter->params.chip)) {
10074 adapter->params.arch.sge_fl_db = 0;
10075 adapter->params.arch.mps_tcam_size =
10076 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
10077 adapter->params.arch.mps_rplc_size = 256;
10078 adapter->params.arch.nchan = 2;
10079 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
10080 adapter->params.arch.vfcount = 256;
10081 /* Congestion map will be for 2 channels so that
10082 * MPS can have 8 priority per port.
10083 */
10084 adapter->params.arch.cng_ch_bits_log = 3;
10085 } else {
10086 CH_ERR(adapter, "Device %d is not supported\n",
10087 adapter->params.pci.device_id);
10088 return -EINVAL;
10089 }
10090
10091 adapter->params.pci.vpd_cap_addr =
10092 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
10093
10094 if (is_fpga(adapter->params.chip)) {
10095 /* FPGA */
10096 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
10097 } else {
10098 /* ASIC */
10099 adapter->params.cim_la_size = CIMLA_SIZE;
10100 }
10101
10102 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
10103
10104 /*
10105 * Default port and clock for debugging in case we can't reach FW.
10106 */
10107 adapter->params.nports = 1;
10108 adapter->params.portvec = 1;
10109 adapter->params.vpd.cclk = 50000;
10110
10111 /* Set pci completion timeout value to 4 seconds. */
10112 set_pcie_completion_timeout(adapter, 0xd);
10113 return 0;
10114 }
10115
10116 /**
10117 * t4_prep_master_pf - prepare SW for master PF operations
10118 * @adapter: the adapter
10119 *
10120 */
t4_prep_master_pf(struct adapter * adapter)10121 int t4_prep_master_pf(struct adapter *adapter)
10122 {
10123 int ret;
10124
10125 ret = t4_prep_pf(adapter);
10126 if (ret < 0)
10127 return ret;
10128
10129 ret = t4_get_flash_params(adapter);
10130 if (ret < 0) {
10131 CH_ERR(adapter,
10132 "Unable to retrieve Flash parameters ret = %d\n", -ret);
10133 return ret;
10134 }
10135
10136 return 0;
10137 }
10138
10139 /**
10140 * t4_prep_adapter - prepare SW and HW for operation
10141 * @adapter: the adapter
10142 * @reset: if true perform a HW reset
10143 *
10144 * Initialize adapter SW state for the various HW modules, set initial
10145 * values for some adapter tunables.
10146 */
t4_prep_adapter(struct adapter * adapter,bool reset)10147 int t4_prep_adapter(struct adapter *adapter, bool reset)
10148 {
10149 return t4_prep_master_pf(adapter);
10150 }
10151
10152 /**
10153 * t4_shutdown_adapter - shut down adapter, host & wire
10154 * @adapter: the adapter
10155 *
10156 * Perform an emergency shutdown of the adapter and stop it from
10157 * continuing any further communication on the ports or DMA to the
10158 * host. This is typically used when the adapter and/or firmware
10159 * have crashed and we want to prevent any further accidental
10160 * communication with the rest of the world. This will also force
10161 * the port Link Status to go down -- if register writes work --
10162 * which should help our peers figure out that we're down.
10163 */
t4_shutdown_adapter(struct adapter * adapter)10164 int t4_shutdown_adapter(struct adapter *adapter)
10165 {
10166 int port;
10167
10168 t4_intr_disable(adapter);
10169 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
10170 for_each_port(adapter, port) {
10171 u32 a_port_cfg = is_t4(adapter->params.chip) ?
10172 PORT_REG(port, A_XGMAC_PORT_CFG) :
10173 T5_PORT_REG(port, A_MAC_PORT_CFG);
10174
10175 t4_write_reg(adapter, a_port_cfg,
10176 t4_read_reg(adapter, a_port_cfg)
10177 & ~V_SIGNAL_DET(1));
10178 }
10179 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
10180
10181 return 0;
10182 }
10183
10184 /**
10185 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
10186 * @adapter: the adapter
10187 * @qid: the Queue ID
10188 * @qtype: the Ingress or Egress type for @qid
10189 * @user: true if this request is for a user mode queue
10190 * @pbar2_qoffset: BAR2 Queue Offset
10191 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
10192 *
10193 * Returns the BAR2 SGE Queue Registers information associated with the
10194 * indicated Absolute Queue ID. These are passed back in return value
10195 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
10196 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
10197 *
10198 * This may return an error which indicates that BAR2 SGE Queue
10199 * registers aren't available. If an error is not returned, then the
10200 * following values are returned:
10201 *
10202 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
10203 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
10204 *
10205 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
10206 * require the "Inferred Queue ID" ability may be used. E.g. the
10207 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
10208 * then these "Inferred Queue ID" register may not be used.
10209 */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,int user,u64 * pbar2_qoffset,unsigned int * pbar2_qid)10210 int t4_bar2_sge_qregs(struct adapter *adapter,
10211 unsigned int qid,
10212 enum t4_bar2_qtype qtype,
10213 int user,
10214 u64 *pbar2_qoffset,
10215 unsigned int *pbar2_qid)
10216 {
10217 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
10218 u64 bar2_page_offset, bar2_qoffset;
10219 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
10220
10221 /* T4 doesn't support BAR2 SGE Queue registers for kernel
10222 * mode queues.
10223 */
10224 if (!user && is_t4(adapter->params.chip))
10225 return -EINVAL;
10226
10227 /* Get our SGE Page Size parameters.
10228 */
10229 page_shift = adapter->params.sge.hps + 10;
10230 page_size = 1 << page_shift;
10231
10232 /* Get the right Queues per Page parameters for our Queue.
10233 */
10234 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
10235 ? adapter->params.sge.eq_qpp
10236 : adapter->params.sge.iq_qpp);
10237 qpp_mask = (1 << qpp_shift) - 1;
10238
10239 /* Calculate the basics of the BAR2 SGE Queue register area:
10240 * o The BAR2 page the Queue registers will be in.
10241 * o The BAR2 Queue ID.
10242 * o The BAR2 Queue ID Offset into the BAR2 page.
10243 */
10244 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
10245 bar2_qid = qid & qpp_mask;
10246 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
10247
10248 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
10249 * hardware will infer the Absolute Queue ID simply from the writes to
10250 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
10251 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
10252 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
10253 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
10254 * from the BAR2 Page and BAR2 Queue ID.
10255 *
10256 * One important censequence of this is that some BAR2 SGE registers
10257 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
10258 * there. But other registers synthesize the SGE Queue ID purely
10259 * from the writes to the registers -- the Write Combined Doorbell
10260 * Buffer is a good example. These BAR2 SGE Registers are only
10261 * available for those BAR2 SGE Register areas where the SGE Absolute
10262 * Queue ID can be inferred from simple writes.
10263 */
10264 bar2_qoffset = bar2_page_offset;
10265 bar2_qinferred = (bar2_qid_offset < page_size);
10266 if (bar2_qinferred) {
10267 bar2_qoffset += bar2_qid_offset;
10268 bar2_qid = 0;
10269 }
10270
10271 *pbar2_qoffset = bar2_qoffset;
10272 *pbar2_qid = bar2_qid;
10273 return 0;
10274 }
10275
10276 /**
10277 * t4_init_devlog_params - initialize adapter->params.devlog
10278 * @adap: the adapter
10279 * @fw_attach: whether we can talk to the firmware
10280 *
10281 * Initialize various fields of the adapter's Firmware Device Log
10282 * Parameters structure.
10283 */
t4_init_devlog_params(struct adapter * adap,int fw_attach)10284 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
10285 {
10286 struct devlog_params *dparams = &adap->params.devlog;
10287 u32 pf_dparams;
10288 unsigned int devlog_meminfo;
10289 struct fw_devlog_cmd devlog_cmd;
10290 int ret;
10291
10292 /* If we're dealing with newer firmware, the Device Log Paramerters
10293 * are stored in a designated register which allows us to access the
10294 * Device Log even if we can't talk to the firmware.
10295 */
10296 pf_dparams =
10297 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
10298 if (pf_dparams) {
10299 unsigned int nentries, nentries128;
10300
10301 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
10302 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
10303
10304 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
10305 nentries = (nentries128 + 1) * 128;
10306 dparams->size = nentries * sizeof(struct fw_devlog_e);
10307
10308 return 0;
10309 }
10310
10311 /*
10312 * For any failing returns ...
10313 */
10314 memset(dparams, 0, sizeof *dparams);
10315
10316 /*
10317 * If we can't talk to the firmware, there's really nothing we can do
10318 * at this point.
10319 */
10320 if (!fw_attach)
10321 return -ENXIO;
10322
10323 /* Otherwise, ask the firmware for it's Device Log Parameters.
10324 */
10325 memset(&devlog_cmd, 0, sizeof devlog_cmd);
10326 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10327 F_FW_CMD_REQUEST | F_FW_CMD_READ);
10328 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10329 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
10330 &devlog_cmd);
10331 if (ret)
10332 return ret;
10333
10334 devlog_meminfo =
10335 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
10336 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
10337 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
10338 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
10339
10340 return 0;
10341 }
10342
10343 /**
10344 * t4_init_sge_params - initialize adap->params.sge
10345 * @adapter: the adapter
10346 *
10347 * Initialize various fields of the adapter's SGE Parameters structure.
10348 */
t4_init_sge_params(struct adapter * adapter)10349 int t4_init_sge_params(struct adapter *adapter)
10350 {
10351 struct sge_params *sge_params = &adapter->params.sge;
10352 u32 hps, qpp;
10353 unsigned int s_hps, s_qpp;
10354
10355 /* Extract the SGE Page Size for our PF.
10356 */
10357 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
10358 s_hps = (S_HOSTPAGESIZEPF0 +
10359 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
10360 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
10361
10362 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
10363 */
10364 s_qpp = (S_QUEUESPERPAGEPF0 +
10365 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
10366 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
10367 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10368 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
10369 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10370
10371 return 0;
10372 }
10373
10374 /**
10375 * t4_init_tp_params - initialize adap->params.tp
10376 * @adap: the adapter
10377 * @sleep_ok: if true we may sleep while awaiting command completion
10378 *
10379 * Initialize various fields of the adapter's TP Parameters structure.
10380 */
t4_init_tp_params(struct adapter * adap,bool sleep_ok)10381 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
10382 {
10383 u32 param, val, v;
10384 int chan, ret;
10385
10386 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
10387 adap->params.tp.tre = G_TIMERRESOLUTION(v);
10388 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
10389
10390 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
10391 for (chan = 0; chan < NCHAN; chan++)
10392 adap->params.tp.tx_modq[chan] = chan;
10393
10394 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
10395 * Configuration.
10396 */
10397
10398 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10399 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10400 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
10401
10402 /* Read current value */
10403 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
10404 ¶m, &val);
10405 if (ret == 0) {
10406 CH_INFO(adap,
10407 "Current filter mode/mask 0x%x:0x%x\n",
10408 G_FW_PARAMS_PARAM_FILTER_MODE(val),
10409 G_FW_PARAMS_PARAM_FILTER_MASK(val));
10410 adap->params.tp.vlan_pri_map = G_FW_PARAMS_PARAM_FILTER_MODE(val);
10411 adap->params.tp.filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val);
10412 } else {
10413 CH_WARN(adap,
10414 "Reading filter mode/mask not supported via fw api, "
10415 "falling back to older indirect-reg-read \n");
10416
10417 /* Incase of older-fw (which doesn't expose the api
10418 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
10419 * the fw api) combination, fall-back to older method of reading
10420 * the filter mode from indirect-register
10421 */
10422 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
10423 A_TP_VLAN_PRI_MAP, sleep_ok);
10424
10425 /* With the older-fw and newer-driver combination we might run
10426 * into an issue when user wants to use hash filter region but
10427 * the filter_mask is zero, in this case filter_mask validation
10428 * is tough. To avoid that we set the filter_mask same as filter
10429 * mode, which will behave exactly as the older way of ignoring
10430 * the filter mask validation.
10431 */
10432 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
10433 }
10434
10435 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
10436 A_TP_INGRESS_CONFIG, sleep_ok);
10437
10438 /* For T6, cache the adapter's compressed error vector
10439 * and passing outer header info for encapsulated packets.
10440 */
10441 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
10442 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
10443 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
10444 }
10445
10446 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
10447 * shift positions of several elements of the Compressed Filter Tuple
10448 * for this adapter which we need frequently ...
10449 */
10450 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
10451 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
10452 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
10453 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
10454 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
10455 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
10456 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
10457 F_ETHERTYPE);
10458 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
10459 F_MACMATCH);
10460 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
10461 F_MPSHITTYPE);
10462 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
10463 F_FRAGMENTATION);
10464 return 0;
10465 }
10466
10467 /**
10468 * t4_filter_field_shift - calculate filter field shift
10469 * @adap: the adapter
10470 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
10471 *
10472 * Return the shift position of a filter field within the Compressed
10473 * Filter Tuple. The filter field is specified via its selection bit
10474 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
10475 */
t4_filter_field_shift(const struct adapter * adap,int filter_sel)10476 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
10477 {
10478 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
10479 unsigned int sel;
10480 int field_shift;
10481
10482 if ((filter_mode & filter_sel) == 0)
10483 return -1;
10484
10485 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
10486 switch (filter_mode & sel) {
10487 case F_FCOE:
10488 field_shift += W_FT_FCOE;
10489 break;
10490 case F_PORT:
10491 field_shift += W_FT_PORT;
10492 break;
10493 case F_VNIC_ID:
10494 field_shift += W_FT_VNIC_ID;
10495 break;
10496 case F_VLAN:
10497 field_shift += W_FT_VLAN;
10498 break;
10499 case F_TOS:
10500 field_shift += W_FT_TOS;
10501 break;
10502 case F_PROTOCOL:
10503 field_shift += W_FT_PROTOCOL;
10504 break;
10505 case F_ETHERTYPE:
10506 field_shift += W_FT_ETHERTYPE;
10507 break;
10508 case F_MACMATCH:
10509 field_shift += W_FT_MACMATCH;
10510 break;
10511 case F_MPSHITTYPE:
10512 field_shift += W_FT_MPSHITTYPE;
10513 break;
10514 case F_FRAGMENTATION:
10515 field_shift += W_FT_FRAGMENTATION;
10516 break;
10517 }
10518 }
10519 return field_shift;
10520 }
10521
10522 /**
10523 * t4_create_filter_info - return Compressed Filter Value/Mask tuple
10524 * @adapter: the adapter
10525 * @filter_value: Filter Value return value pointer
10526 * @filter_mask: Filter Mask return value pointer
10527 * @fcoe: FCoE filter selection
10528 * @port: physical port filter selection
10529 * @vnic: Virtual NIC ID filter selection
10530 * @vlan: VLAN ID filter selection
10531 * @vlan_pcp: VLAN Priority Code Point
10532 * @vlan_dei: VLAN Drop Eligibility Indicator
10533 * @tos: Type Of Server filter selection
10534 * @protocol: IP Protocol filter selection
10535 * @ethertype: Ethernet Type filter selection
10536 * @macmatch: MPS MAC Index filter selection
10537 * @matchtype: MPS Hit Type filter selection
10538 * @frag: IP Fragmentation filter selection
10539 *
10540 * Construct a Compressed Filter Value/Mask tuple based on a set of
10541 * "filter selection" values. For each passed filter selection value
10542 * which is greater than or equal to 0, we put that value into the
10543 * constructed Filter Value and the appropriate mask into the Filter
10544 * Mask. If a filter selections is specified which is not currently
10545 * configured into the hardware, an error will be returned. Otherwise
10546 * the constructed FIlter Value/Mask tuple will be returned via the
10547 * specified return value pointers and success will be returned.
10548 *
10549 * All filter selection values and the returned Filter Value/Mask values
10550 * are in Host-Endian format.
10551 */
t4_create_filter_info(const struct adapter * adapter,u64 * filter_value,u64 * filter_mask,int fcoe,int port,int vnic,int vlan,int vlan_pcp,int vlan_dei,int tos,int protocol,int ethertype,int macmatch,int matchtype,int frag)10552 int t4_create_filter_info(const struct adapter *adapter,
10553 u64 *filter_value, u64 *filter_mask,
10554 int fcoe, int port, int vnic,
10555 int vlan, int vlan_pcp, int vlan_dei,
10556 int tos, int protocol, int ethertype,
10557 int macmatch, int matchtype, int frag)
10558 {
10559 const struct tp_params *tp = &adapter->params.tp;
10560 u64 v, m;
10561
10562 /*
10563 * If any selected filter field isn't enabled, return an error.
10564 */
10565 #define BAD_FILTER(__field) \
10566 ((__field) >= 0 && tp->__field##_shift < 0)
10567 if (BAD_FILTER(fcoe) ||
10568 BAD_FILTER(port) ||
10569 BAD_FILTER(vnic) ||
10570 BAD_FILTER(vlan) ||
10571 BAD_FILTER(tos) ||
10572 BAD_FILTER(protocol) ||
10573 BAD_FILTER(ethertype) ||
10574 BAD_FILTER(macmatch) ||
10575 BAD_FILTER(matchtype) ||
10576 BAD_FILTER(frag))
10577 return -EINVAL;
10578 #undef BAD_FILTER
10579
10580 /*
10581 * We have to have VLAN ID selected if we want to also select on
10582 * either the Priority Code Point or Drop Eligibility Indicator
10583 * fields.
10584 */
10585 if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
10586 return -EINVAL;
10587
10588 /*
10589 * Construct Filter Value and Mask.
10590 */
10591 v = m = 0;
10592 #define SET_FILTER_FIELD(__field, __width) \
10593 do { \
10594 if ((__field) >= 0) { \
10595 const int shift = tp->__field##_shift; \
10596 \
10597 v |= (__field) << shift; \
10598 m |= ((1ULL << (__width)) - 1) << shift; \
10599 } \
10600 } while (0)
10601 SET_FILTER_FIELD(fcoe, W_FT_FCOE);
10602 SET_FILTER_FIELD(port, W_FT_PORT);
10603 SET_FILTER_FIELD(tos, W_FT_TOS);
10604 SET_FILTER_FIELD(protocol, W_FT_PROTOCOL);
10605 SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
10606 SET_FILTER_FIELD(macmatch, W_FT_MACMATCH);
10607 SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
10608 SET_FILTER_FIELD(frag, W_FT_FRAGMENTATION);
10609 #undef SET_FILTER_FIELD
10610
10611 /*
10612 * We handle VNIC ID and VLANs separately because they're slightly
10613 * different than the rest of the fields. Both require that a
10614 * corresponding "valid" bit be set in the Filter Value and Mask.
10615 * These bits are in the top bit of the field. Additionally, we can
10616 * select the Priority Code Point and Drop Eligibility Indicator
10617 * fields for VLANs as an option. Remember that the format of a VLAN
10618 * Tag is:
10619 *
10620 * bits: 3 1 12
10621 * +---+-+------------+
10622 * |PCP|D| VLAN ID |
10623 * +---+-+------------+
10624 */
10625 if (vnic >= 0) {
10626 v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
10627 m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
10628 }
10629 if (vlan >= 0) {
10630 v |= ((1ULL << (W_FT_VLAN-1)) | vlan) << tp->vlan_shift;
10631 m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
10632
10633 if (vlan_dei >= 0) {
10634 v |= vlan_dei << (tp->vlan_shift + 12);
10635 m |= 0x7 << (tp->vlan_shift + 12);
10636 }
10637 if (vlan_pcp >= 0) {
10638 v |= vlan_pcp << (tp->vlan_shift + 13);
10639 m |= 0x7 << (tp->vlan_shift + 13);
10640 }
10641 }
10642
10643 /*
10644 * Pass back computed Filter Value and Mask; return success.
10645 */
10646 *filter_value = v;
10647 *filter_mask = m;
10648 return 0;
10649 }
10650
t4_init_rss_mode(struct adapter * adap,int mbox)10651 int t4_init_rss_mode(struct adapter *adap, int mbox)
10652 {
10653 int i, ret;
10654 struct fw_rss_vi_config_cmd rvc;
10655
10656 memset(&rvc, 0, sizeof(rvc));
10657
10658 for_each_port(adap, i) {
10659 struct port_info *p = adap2pinfo(adap, i);
10660 rvc.op_to_viid =
10661 cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
10662 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10663 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
10664 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
10665 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
10666 if (ret)
10667 return ret;
10668 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
10669 }
10670 return 0;
10671 }
10672
t4_init_portmirror(struct port_info * pi,int mbox,int port,int pf,int vf)10673 static int t4_init_portmirror(struct port_info *pi, int mbox,
10674 int port, int pf, int vf)
10675 {
10676 struct adapter *adapter = pi->adapter;
10677 int ret;
10678 u8 vivld = 0, vin = 0;
10679
10680 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
10681 &vivld, &vin);
10682 if (ret < 0)
10683 return ret;
10684
10685 pi->viid_mirror = ret;
10686
10687 /* If fw supports returning the VIN as part of FW_VI_CMD,
10688 * save the returned values.
10689 */
10690 if (adapter->params.viid_smt_extn_support) {
10691 pi->vivld_mirror = vivld;
10692 pi->vin_mirror = vin;
10693 } else {
10694 /* Retrieve the values from VIID */
10695 pi->vivld_mirror = G_FW_VIID_VIVLD(pi->viid_mirror);
10696 pi->vin_mirror = G_FW_VIID_VIN(pi->viid_mirror);
10697 }
10698
10699 CH_INFO(pi->adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
10700 port, pf, pi->vin_mirror);
10701 return 0;
10702 }
10703
t4_mirror_init(struct adapter * adap,int mbox,int pf,int vf,bool enable_ringbb)10704 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf,
10705 bool enable_ringbb)
10706 {
10707 int ret, i, j = 0;
10708
10709 for_each_port(adap, i) {
10710 struct port_info *pi = adap2pinfo(adap, i);
10711
10712 /* We want mirroring only on Port0 for ringbackbone
10713 * configuration.
10714 */
10715 if (enable_ringbb && i)
10716 break;
10717 while ((adap->params.portvec & (1 << j)) == 0)
10718 j++;
10719
10720 ret = t4_init_portmirror(pi, mbox, j, pf, vf);
10721 if (ret)
10722 return ret;
10723 j++;
10724 }
10725 return 0;
10726 }
10727
10728 /**
10729 * t4_init_portinfo_viid - allocate a virtual interface and initialize
10730 * port_info
10731 * @pi: the port_info
10732 * @mbox: mailbox to use for the FW command
10733 * @port: physical port associated with the VI
10734 * @pf: the PF owning the VI
10735 * @vf: the VF owning the VI
10736 * @mac: the MAC address of the VI
10737 * @alloc_vi: Indicator to alloc VI
10738 *
10739 * Allocates a virtual interface for the given physical port. If @mac is
10740 * not %NULL it contains the MAC address of the VI as assigned by FW.
10741 * @mac should be large enough to hold an Ethernet address.
10742 * Returns < 0 on error.
10743 */
t4_init_portinfo_viid(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[],bool alloc_vi)10744 int t4_init_portinfo_viid(struct port_info *pi, int mbox,
10745 int port, int pf, int vf, u8 mac[], bool alloc_vi)
10746 {
10747 struct adapter *adapter = pi->adapter;
10748 unsigned int fw_caps = adapter->params.fw_caps_support;
10749 struct fw_port_cmd cmd;
10750 unsigned int rss_size;
10751 enum fw_port_type port_type;
10752 int mdio_addr;
10753 fw_port_cap32_t pcaps, acaps;
10754 int ret;
10755
10756 /*
10757 * If we haven't yet determined whether we're talking to Firmware
10758 * which knows the new 32-bit Port Capabilities, it's time to find
10759 * out now. This will also tell new Firmware to send us Port Status
10760 * Updates using the new 32-bit Port Capabilities version of the
10761 * Port Information message.
10762 */
10763 if (fw_caps == FW_CAPS_UNKNOWN) {
10764 u32 param, val;
10765
10766 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
10767 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
10768 val = 1;
10769 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
10770 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
10771 adapter->params.fw_caps_support = fw_caps;
10772 }
10773
10774 memset(&cmd, 0, sizeof(cmd));
10775 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
10776 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10777 V_FW_PORT_CMD_PORTID(port));
10778 cmd.action_to_len16 = cpu_to_be32(
10779 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
10780 ? FW_PORT_ACTION_GET_PORT_INFO
10781 : FW_PORT_ACTION_GET_PORT_INFO32) |
10782 FW_LEN16(cmd));
10783 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
10784 if (ret)
10785 return ret;
10786
10787 /*
10788 * Extract the various fields from the Port Information message.
10789 */
10790 if (fw_caps == FW_CAPS16) {
10791 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
10792
10793 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
10794 mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP)
10795 ? G_FW_PORT_CMD_MDIOADDR(lstatus)
10796 : -1);
10797 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
10798 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
10799 } else {
10800 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
10801
10802 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
10803 mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32)
10804 ? G_FW_PORT_CMD_MDIOADDR32(lstatus32)
10805 : -1);
10806 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
10807 acaps = be32_to_cpu(cmd.u.info32.acaps32);
10808 }
10809
10810 if (alloc_vi) {
10811 u8 vivld = 0, vin = 0;
10812
10813 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac,
10814 &rss_size, &vivld, &vin);
10815 if (ret < 0)
10816 return ret;
10817
10818 pi->viid = ret;
10819 pi->rss_size = rss_size;
10820
10821 /* If fw supports returning the VIN as part of FW_VI_CMD,
10822 * save the returned values.
10823 */
10824 if (adapter->params.viid_smt_extn_support) {
10825 pi->vivld = vivld;
10826 pi->vin = vin;
10827 } else {
10828 /* Retrieve the values from VIID */
10829 pi->vivld = G_FW_VIID_VIVLD(pi->viid);
10830 pi->vin = G_FW_VIID_VIN(pi->viid);
10831 }
10832 }
10833
10834 pi->tx_chan = port;
10835 pi->lport = port;
10836 pi->rx_chan = port;
10837 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
10838
10839 pi->port_type = port_type;
10840 pi->mdio_addr = mdio_addr;
10841 pi->mod_type = FW_PORT_MOD_TYPE_NA;
10842
10843 t4_init_link_config(pi, pcaps, acaps);
10844 return 0;
10845 }
10846
10847 /**
10848 * t4_init_portinfo - allocate a virtual interface and initialize port_info
10849 * @pi: the port_info
10850 * @mbox: mailbox to use for the FW command
10851 * @port: physical port associated with the VI
10852 * @pf: the PF owning the VI
10853 * @vf: the VF owning the VI
10854 * @mac: the MAC address of the VI
10855 *
10856 * Allocates a virtual interface for the given physical port. If @mac is
10857 * not %NULL it contains the MAC address of the VI as assigned by FW.
10858 * @mac should be large enough to hold an Ethernet address.
10859 * Returns < 0 on error.
10860 */
t4_init_portinfo(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[])10861 int t4_init_portinfo(struct port_info *pi, int mbox,
10862 int port, int pf, int vf, u8 mac[])
10863 {
10864 return t4_init_portinfo_viid(pi, mbox, port, pf, vf, mac, true);
10865 }
10866
t4_port_init(struct adapter * adap,int mbox,int pf,int vf)10867 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
10868 {
10869 u8 addr[6];
10870 int ret, i, j = 0;
10871
10872 for_each_port(adap, i) {
10873 struct port_info *pi = adap2pinfo(adap, i);
10874
10875 while ((adap->params.portvec & (1 << j)) == 0)
10876 j++;
10877
10878 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
10879 if (ret)
10880 return ret;
10881
10882 t4_os_set_hw_addr(adap, i, addr);
10883 j++;
10884 }
10885 return 0;
10886 }
10887
10888 /**
10889 * t4_read_cimq_cfg - read CIM queue configuration
10890 * @adap: the adapter
10891 * @base: holds the queue base addresses in bytes
10892 * @size: holds the queue sizes in bytes
10893 * @thres: holds the queue full thresholds in bytes
10894 *
10895 * Returns the current configuration of the CIM queues, starting with
10896 * the IBQs, then the OBQs.
10897 */
t4_read_cimq_cfg(struct adapter * adap,u16 * base,u16 * size,u16 * thres)10898 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
10899 {
10900 unsigned int i, v;
10901 int cim_num_obq = is_t4(adap->params.chip) ?
10902 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10903
10904 for (i = 0; i < CIM_NUM_IBQ; i++) {
10905 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
10906 V_QUENUMSELECT(i));
10907 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10908 /* value is in 256-byte units */
10909 *base++ = G_CIMQBASE(v) * 256;
10910 *size++ = G_CIMQSIZE(v) * 256;
10911 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
10912 }
10913 for (i = 0; i < cim_num_obq; i++) {
10914 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10915 V_QUENUMSELECT(i));
10916 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10917 /* value is in 256-byte units */
10918 *base++ = G_CIMQBASE(v) * 256;
10919 *size++ = G_CIMQSIZE(v) * 256;
10920 }
10921 }
10922
10923 /**
10924 * t4_read_cim_ibq - read the contents of a CIM inbound queue
10925 * @adap: the adapter
10926 * @qid: the queue index
10927 * @data: where to store the queue contents
10928 * @n: capacity of @data in 32-bit words
10929 *
10930 * Reads the contents of the selected CIM queue starting at address 0 up
10931 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
10932 * error and the number of 32-bit words actually read on success.
10933 */
t4_read_cim_ibq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)10934 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10935 {
10936 int i, err, attempts;
10937 unsigned int addr;
10938 const unsigned int nwords = CIM_IBQ_SIZE * 4;
10939
10940 if (qid > 5 || (n & 3))
10941 return -EINVAL;
10942
10943 addr = qid * nwords;
10944 if (n > nwords)
10945 n = nwords;
10946
10947 /* It might take 3-10ms before the IBQ debug read access is allowed.
10948 * Wait for 1 Sec with a delay of 1 usec.
10949 */
10950 attempts = 1000000;
10951
10952 for (i = 0; i < n; i++, addr++) {
10953 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
10954 F_IBQDBGEN);
10955 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
10956 attempts, 1);
10957 if (err)
10958 return err;
10959 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
10960 }
10961 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
10962 return i;
10963 }
10964
10965 /**
10966 * t4_read_cim_obq - read the contents of a CIM outbound queue
10967 * @adap: the adapter
10968 * @qid: the queue index
10969 * @data: where to store the queue contents
10970 * @n: capacity of @data in 32-bit words
10971 *
10972 * Reads the contents of the selected CIM queue starting at address 0 up
10973 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
10974 * error and the number of 32-bit words actually read on success.
10975 */
t4_read_cim_obq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)10976 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10977 {
10978 int i, err;
10979 unsigned int addr, v, nwords;
10980 int cim_num_obq = is_t4(adap->params.chip) ?
10981 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10982
10983 if ((qid > (cim_num_obq - 1)) || (n & 3))
10984 return -EINVAL;
10985
10986 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10987 V_QUENUMSELECT(qid));
10988 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10989
10990 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
10991 nwords = G_CIMQSIZE(v) * 64; /* same */
10992 if (n > nwords)
10993 n = nwords;
10994
10995 for (i = 0; i < n; i++, addr++) {
10996 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
10997 F_OBQDBGEN);
10998 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
10999 2, 1);
11000 if (err)
11001 return err;
11002 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
11003 }
11004 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
11005 return i;
11006 }
11007
11008 /**
11009 * t4_cim_read - read a block from CIM internal address space
11010 * @adap: the adapter
11011 * @addr: the start address within the CIM address space
11012 * @n: number of words to read
11013 * @valp: where to store the result
11014 *
11015 * Reads a block of 4-byte words from the CIM intenal address space.
11016 */
t4_cim_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)11017 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
11018 unsigned int *valp)
11019 {
11020 int ret = 0;
11021
11022 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
11023 return -EBUSY;
11024
11025 for ( ; !ret && n--; addr += 4) {
11026 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
11027 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
11028 0, 5, 2);
11029 if (!ret)
11030 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
11031 }
11032 return ret;
11033 }
11034
11035 /**
11036 * t4_cim_write - write a block into CIM internal address space
11037 * @adap: the adapter
11038 * @addr: the start address within the CIM address space
11039 * @n: number of words to write
11040 * @valp: set of values to write
11041 *
11042 * Writes a block of 4-byte words into the CIM intenal address space.
11043 */
t4_cim_write(struct adapter * adap,unsigned int addr,unsigned int n,const unsigned int * valp)11044 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
11045 const unsigned int *valp)
11046 {
11047 int ret = 0;
11048
11049 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
11050 return -EBUSY;
11051
11052 for ( ; !ret && n--; addr += 4) {
11053 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
11054 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
11055 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
11056 0, 5, 2);
11057 }
11058 return ret;
11059 }
11060
t4_cim_write1(struct adapter * adap,unsigned int addr,unsigned int val)11061 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
11062 unsigned int val)
11063 {
11064 return t4_cim_write(adap, addr, 1, &val);
11065 }
11066
11067 /**
11068 * t4_cim_read_la - read CIM LA capture buffer
11069 * @adap: the adapter
11070 * @la_buf: where to store the LA data
11071 * @wrptr: the HW write pointer within the capture buffer
11072 *
11073 * Reads the contents of the CIM LA buffer with the most recent entry at
11074 * the end of the returned data and with the entry at @wrptr first.
11075 * We try to leave the LA in the running state we find it in.
11076 */
t4_cim_read_la(struct adapter * adap,u32 * la_buf,unsigned int * wrptr)11077 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
11078 {
11079 int i, ret;
11080 unsigned int cfg, val, idx;
11081
11082 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
11083 if (ret)
11084 return ret;
11085
11086 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
11087 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
11088 if (ret)
11089 return ret;
11090 }
11091
11092 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
11093 if (ret)
11094 goto restart;
11095
11096 idx = G_UPDBGLAWRPTR(val);
11097 if (wrptr)
11098 *wrptr = idx;
11099
11100 for (i = 0; i < adap->params.cim_la_size; i++) {
11101 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11102 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
11103 if (ret)
11104 break;
11105 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
11106 if (ret)
11107 break;
11108 if (val & F_UPDBGLARDEN) {
11109 ret = -ETIMEDOUT;
11110 break;
11111 }
11112 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
11113 if (ret)
11114 break;
11115
11116 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
11117 * identify the 32-bit portion of the full 312-bit data
11118 */
11119 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
11120 idx = (idx & 0xff0) + 0x10;
11121 else
11122 idx++;
11123 /* address can't exceed 0xfff */
11124 idx &= M_UPDBGLARDPTR;
11125 }
11126 restart:
11127 if (cfg & F_UPDBGLAEN) {
11128 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11129 cfg & ~F_UPDBGLARDEN);
11130 if (!ret)
11131 ret = r;
11132 }
11133 return ret;
11134 }
11135
11136 /**
11137 * t4_tp_read_la - read TP LA capture buffer
11138 * @adap: the adapter
11139 * @la_buf: where to store the LA data
11140 * @wrptr: the HW write pointer within the capture buffer
11141 *
11142 * Reads the contents of the TP LA buffer with the most recent entry at
11143 * the end of the returned data and with the entry at @wrptr first.
11144 * We leave the LA in the running state we find it in.
11145 */
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)11146 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
11147 {
11148 bool last_incomplete;
11149 unsigned int i, cfg, val, idx;
11150
11151 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
11152 if (cfg & F_DBGLAENABLE) /* freeze LA */
11153 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11154 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
11155
11156 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
11157 idx = G_DBGLAWPTR(val);
11158 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
11159 if (last_incomplete)
11160 idx = (idx + 1) & M_DBGLARPTR;
11161 if (wrptr)
11162 *wrptr = idx;
11163
11164 val &= 0xffff;
11165 val &= ~V_DBGLARPTR(M_DBGLARPTR);
11166 val |= adap->params.tp.la_mask;
11167
11168 for (i = 0; i < TPLA_SIZE; i++) {
11169 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
11170 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
11171 idx = (idx + 1) & M_DBGLARPTR;
11172 }
11173
11174 /* Wipe out last entry if it isn't valid */
11175 if (last_incomplete)
11176 la_buf[TPLA_SIZE - 1] = ~0ULL;
11177
11178 if (cfg & F_DBGLAENABLE) /* restore running state */
11179 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11180 cfg | adap->params.tp.la_mask);
11181 }
11182
11183 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
11184 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
11185 * state for more than the Warning Threshold then we'll issue a warning about
11186 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
11187 * appears to be hung every Warning Repeat second till the situation clears.
11188 * If the situation clears, we'll note that as well.
11189 */
11190 #define SGE_IDMA_WARN_THRESH 1
11191 #define SGE_IDMA_WARN_REPEAT 300
11192
11193 /**
11194 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
11195 * @adapter: the adapter
11196 * @idma: the adapter IDMA Monitor state
11197 *
11198 * Initialize the state of an SGE Ingress DMA Monitor.
11199 */
t4_idma_monitor_init(struct adapter * adapter,struct sge_idma_monitor_state * idma)11200 void t4_idma_monitor_init(struct adapter *adapter,
11201 struct sge_idma_monitor_state *idma)
11202 {
11203 /* Initialize the state variables for detecting an SGE Ingress DMA
11204 * hang. The SGE has internal counters which count up on each clock
11205 * tick whenever the SGE finds its Ingress DMA State Engines in the
11206 * same state they were on the previous clock tick. The clock used is
11207 * the Core Clock so we have a limit on the maximum "time" they can
11208 * record; typically a very small number of seconds. For instance,
11209 * with a 600MHz Core Clock, we can only count up to a bit more than
11210 * 7s. So we'll synthesize a larger counter in order to not run the
11211 * risk of having the "timers" overflow and give us the flexibility to
11212 * maintain a Hung SGE State Machine of our own which operates across
11213 * a longer time frame.
11214 */
11215 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
11216 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
11217 }
11218
11219 /**
11220 * t4_idma_monitor - monitor SGE Ingress DMA state
11221 * @adapter: the adapter
11222 * @idma: the adapter IDMA Monitor state
11223 * @hz: number of ticks/second
11224 * @ticks: number of ticks since the last IDMA Monitor call
11225 */
t4_idma_monitor(struct adapter * adapter,struct sge_idma_monitor_state * idma,int hz,int ticks)11226 void t4_idma_monitor(struct adapter *adapter,
11227 struct sge_idma_monitor_state *idma,
11228 int hz, int ticks)
11229 {
11230 int i, idma_same_state_cnt[2];
11231
11232 /* Read the SGE Debug Ingress DMA Same State Count registers. These
11233 * are counters inside the SGE which count up on each clock when the
11234 * SGE finds its Ingress DMA State Engines in the same states they
11235 * were in the previous clock. The counters will peg out at
11236 * 0xffffffff without wrapping around so once they pass the 1s
11237 * threshold they'll stay above that till the IDMA state changes.
11238 */
11239 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
11240 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
11241 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11242
11243 for (i = 0; i < 2; i++) {
11244 u32 debug0, debug11;
11245
11246 /* If the Ingress DMA Same State Counter ("timer") is less
11247 * than 1s, then we can reset our synthesized Stall Timer and
11248 * continue. If we have previously emitted warnings about a
11249 * potential stalled Ingress Queue, issue a note indicating
11250 * that the Ingress Queue has resumed forward progress.
11251 */
11252 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
11253 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
11254 CH_WARN(adapter, "SGE idma%d, queue %u, "
11255 "resumed after %d seconds\n",
11256 i, idma->idma_qid[i],
11257 idma->idma_stalled[i]/hz);
11258 idma->idma_stalled[i] = 0;
11259 continue;
11260 }
11261
11262 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
11263 * domain. The first time we get here it'll be because we
11264 * passed the 1s Threshold; each additional time it'll be
11265 * because the RX Timer Callback is being fired on its regular
11266 * schedule.
11267 *
11268 * If the stall is below our Potential Hung Ingress Queue
11269 * Warning Threshold, continue.
11270 */
11271 if (idma->idma_stalled[i] == 0) {
11272 idma->idma_stalled[i] = hz;
11273 idma->idma_warn[i] = 0;
11274 } else {
11275 idma->idma_stalled[i] += ticks;
11276 idma->idma_warn[i] -= ticks;
11277 }
11278
11279 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
11280 continue;
11281
11282 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
11283 */
11284 if (idma->idma_warn[i] > 0)
11285 continue;
11286 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
11287
11288 /* Read and save the SGE IDMA State and Queue ID information.
11289 * We do this every time in case it changes across time ...
11290 * can't be too careful ...
11291 */
11292 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
11293 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11294 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
11295
11296 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
11297 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11298 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
11299
11300 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
11301 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
11302 i, idma->idma_qid[i], idma->idma_state[i],
11303 idma->idma_stalled[i]/hz,
11304 debug0, debug11);
11305 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
11306 }
11307 }
11308
11309 /**
11310 * t4_set_vf_mac - Set MAC address for the specified VF
11311 * @adapter: The adapter
11312 * @vf: one of the VFs instantiated by the specified PF
11313 * @naddr: the number of MAC addresses
11314 * @addr: the MAC address(es) to be set to the specified VF
11315 */
t4_set_vf_mac_acl(struct adapter * adapter,unsigned int vf,unsigned int naddr,u8 * addr)11316 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
11317 unsigned int naddr, u8 *addr)
11318 {
11319 struct fw_acl_mac_cmd cmd;
11320
11321 memset(&cmd, 0, sizeof(cmd));
11322 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
11323 F_FW_CMD_REQUEST |
11324 F_FW_CMD_WRITE |
11325 V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
11326 V_FW_ACL_MAC_CMD_VFN(vf));
11327
11328 /* Note: Do not enable the ACL */
11329 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
11330 cmd.nmac = naddr;
11331
11332 switch (adapter->pf) {
11333 case 3:
11334 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
11335 break;
11336 case 2:
11337 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
11338 break;
11339 case 1:
11340 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
11341 break;
11342 case 0:
11343 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
11344 break;
11345 }
11346
11347 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
11348 }
11349
11350 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
11351 * functions
11352 */
11353
11354 /**
11355 * t4_read_pace_tbl - read the pace table
11356 * @adap: the adapter
11357 * @pace_vals: holds the returned values
11358 *
11359 * Returns the values of TP's pace table in microseconds.
11360 */
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])11361 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
11362 {
11363 unsigned int i, v;
11364
11365 for (i = 0; i < NTX_SCHED; i++) {
11366 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
11367 v = t4_read_reg(adap, A_TP_PACE_TABLE);
11368 pace_vals[i] = dack_ticks_to_usec(adap, v);
11369 }
11370 }
11371
11372 /**
11373 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
11374 * @adap: the adapter
11375 * @sched: the scheduler index
11376 * @kbps: the byte rate in Kbps
11377 * @ipg: the interpacket delay in tenths of nanoseconds
11378 * @sleep_ok: if true we may sleep while awaiting command completion
11379 *
11380 * Return the current configuration of a HW Tx scheduler.
11381 */
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg,bool sleep_ok)11382 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
11383 unsigned int *ipg, bool sleep_ok)
11384 {
11385 unsigned int v, addr, bpt, cpt;
11386
11387 if (kbps) {
11388 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
11389 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11390 if (sched & 1)
11391 v >>= 16;
11392 bpt = (v >> 8) & 0xff;
11393 cpt = v & 0xff;
11394 if (!cpt)
11395 *kbps = 0; /* scheduler disabled */
11396 else {
11397 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
11398 *kbps = (v * bpt) / 125;
11399 }
11400 }
11401 if (ipg) {
11402 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
11403 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11404 if (sched & 1)
11405 v >>= 16;
11406 v &= 0xffff;
11407 *ipg = (10000 * v) / core_ticks_per_usec(adap);
11408 }
11409 }
11410
11411 /**
11412 * t4_load_cfg - download config file
11413 * @adap: the adapter
11414 * @cfg_data: the cfg text file to write
11415 * @size: text file size
11416 *
11417 * Write the supplied config text file to the card's serial flash.
11418 */
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11419 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
11420 {
11421 int ret, i, n, cfg_addr;
11422 unsigned int addr;
11423 unsigned int flash_cfg_start_sec;
11424 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11425
11426 cfg_addr = t4_flash_cfg_addr(adap);
11427 if (cfg_addr < 0)
11428 return cfg_addr;
11429
11430 addr = cfg_addr;
11431 flash_cfg_start_sec = addr / SF_SEC_SIZE;
11432
11433 if (size > FLASH_CFG_MAX_SIZE) {
11434 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
11435 FLASH_CFG_MAX_SIZE);
11436 return -EFBIG;
11437 }
11438
11439 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
11440 sf_sec_size);
11441 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11442 flash_cfg_start_sec + i - 1);
11443 /*
11444 * If size == 0 then we're simply erasing the FLASH sectors associated
11445 * with the on-adapter Firmware Configuration File.
11446 */
11447 if (ret || size == 0)
11448 goto out;
11449
11450 /* this will write to the flash up to SF_PAGE_SIZE at a time */
11451 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11452 if ( (size - i) < SF_PAGE_SIZE)
11453 n = size - i;
11454 else
11455 n = SF_PAGE_SIZE;
11456 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
11457 if (ret)
11458 goto out;
11459
11460 addr += SF_PAGE_SIZE;
11461 cfg_data += SF_PAGE_SIZE;
11462 }
11463
11464 out:
11465 if (ret)
11466 CH_ERR(adap, "config file %s failed %d\n",
11467 (size == 0 ? "clear" : "download"), ret);
11468 return ret;
11469 }
11470
11471 /**
11472 * t5_fw_init_extern_mem - initialize the external memory
11473 * @adap: the adapter
11474 *
11475 * Initializes the external memory on T5.
11476 */
t5_fw_init_extern_mem(struct adapter * adap)11477 int t5_fw_init_extern_mem(struct adapter *adap)
11478 {
11479 u32 params[1], val[1];
11480 int ret;
11481
11482 if (!is_t5(adap->params.chip))
11483 return 0;
11484
11485 val[0] = 0xff; /* Initialize all MCs */
11486 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11487 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
11488 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
11489 FW_CMD_MAX_TIMEOUT);
11490
11491 return ret;
11492 }
11493
11494 /* BIOS boot headers */
11495 typedef struct pci_expansion_rom_header {
11496 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
11497 u8 reserved[22]; /* Reserved per processor Architecture data */
11498 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
11499 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
11500
11501 /* Legacy PCI Expansion ROM Header */
11502 typedef struct legacy_pci_expansion_rom_header {
11503 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
11504 u8 size512; /* Current Image Size in units of 512 bytes */
11505 u8 initentry_point[4];
11506 u8 cksum; /* Checksum computed on the entire Image */
11507 u8 reserved[16]; /* Reserved */
11508 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
11509 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
11510
11511 /* EFI PCI Expansion ROM Header */
11512 typedef struct efi_pci_expansion_rom_header {
11513 u8 signature[2]; // ROM signature. The value 0xaa55
11514 u8 initialization_size[2]; /* Units 512. Includes this header */
11515 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
11516 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
11517 u8 efi_machine_type[2]; /* Machine type from EFI image header */
11518 u8 compression_type[2]; /* Compression type. */
11519 /*
11520 * Compression type definition
11521 * 0x0: uncompressed
11522 * 0x1: Compressed
11523 * 0x2-0xFFFF: Reserved
11524 */
11525 u8 reserved[8]; /* Reserved */
11526 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
11527 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
11528 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
11529
11530 /* PCI Data Structure Format */
11531 typedef struct pcir_data_structure { /* PCI Data Structure */
11532 u8 signature[4]; /* Signature. The string "PCIR" */
11533 u8 vendor_id[2]; /* Vendor Identification */
11534 u8 device_id[2]; /* Device Identification */
11535 u8 vital_product[2]; /* Pointer to Vital Product Data */
11536 u8 length[2]; /* PCIR Data Structure Length */
11537 u8 revision; /* PCIR Data Structure Revision */
11538 u8 class_code[3]; /* Class Code */
11539 u8 image_length[2]; /* Image Length. Multiple of 512B */
11540 u8 code_revision[2]; /* Revision Level of Code/Data */
11541 u8 code_type; /* Code Type. */
11542 /*
11543 * PCI Expansion ROM Code Types
11544 * 0x00: Intel IA-32, PC-AT compatible. Legacy
11545 * 0x01: Open Firmware standard for PCI. FCODE
11546 * 0x02: Hewlett-Packard PA RISC. HP reserved
11547 * 0x03: EFI Image. EFI
11548 * 0x04-0xFF: Reserved.
11549 */
11550 u8 indicator; /* Indicator. Identifies the last image in the ROM */
11551 u8 reserved[2]; /* Reserved */
11552 } pcir_data_t; /* PCI__DATA_STRUCTURE */
11553
11554 /* BOOT constants */
11555 enum {
11556 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
11557 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
11558 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
11559 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
11560 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
11561 VENDOR_ID = 0x1425, /* Vendor ID */
11562 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
11563 };
11564
11565 /*
11566 * modify_device_id - Modifies the device ID of the Boot BIOS image
11567 * @adatper: the device ID to write.
11568 * @boot_data: the boot image to modify.
11569 *
11570 * Write the supplied device ID to the boot BIOS image.
11571 */
modify_device_id(int device_id,u8 * boot_data)11572 static void modify_device_id(int device_id, u8 *boot_data)
11573 {
11574 legacy_pci_exp_rom_header_t *header;
11575 pcir_data_t *pcir_header;
11576 u32 cur_header = 0;
11577
11578 /*
11579 * Loop through all chained images and change the device ID's
11580 */
11581 while (1) {
11582 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
11583 pcir_header = (pcir_data_t *) &boot_data[cur_header +
11584 le16_to_cpu(*(u16*)header->pcir_offset)];
11585
11586 /*
11587 * Only modify the Device ID if code type is Legacy or HP.
11588 * 0x00: Okay to modify
11589 * 0x01: FCODE. Do not be modify
11590 * 0x03: Okay to modify
11591 * 0x04-0xFF: Do not modify
11592 */
11593 if (pcir_header->code_type == 0x00) {
11594 u8 csum = 0;
11595 int i;
11596
11597 /*
11598 * Modify Device ID to match current adatper
11599 */
11600 *(u16*) pcir_header->device_id = device_id;
11601
11602 /*
11603 * Set checksum temporarily to 0.
11604 * We will recalculate it later.
11605 */
11606 header->cksum = 0x0;
11607
11608 /*
11609 * Calculate and update checksum
11610 */
11611 for (i = 0; i < (header->size512 * 512); i++)
11612 csum += (u8)boot_data[cur_header + i];
11613
11614 /*
11615 * Invert summed value to create the checksum
11616 * Writing new checksum value directly to the boot data
11617 */
11618 boot_data[cur_header + 7] = -csum;
11619
11620 } else if (pcir_header->code_type == 0x03) {
11621
11622 /*
11623 * Modify Device ID to match current adatper
11624 */
11625 *(u16*) pcir_header->device_id = device_id;
11626
11627 }
11628
11629
11630 /*
11631 * Check indicator element to identify if this is the last
11632 * image in the ROM.
11633 */
11634 if (pcir_header->indicator & 0x80)
11635 break;
11636
11637 /*
11638 * Move header pointer up to the next image in the ROM.
11639 */
11640 cur_header += header->size512 * 512;
11641 }
11642 }
11643
11644 #ifdef CHELSIO_T4_DIAGS
11645 /*
11646 * t4_earse_sf - Erase entire serial Flash region
11647 * @adapter: the adapter
11648 *
11649 * Clears the entire serial flash region.
11650 */
t4_erase_sf(struct adapter * adap)11651 int t4_erase_sf(struct adapter *adap)
11652 {
11653 unsigned int nsectors;
11654 int ret;
11655
11656 nsectors = FLASH_END_SEC;
11657 if (nsectors > adap->params.sf_nsec)
11658 nsectors = adap->params.sf_nsec;
11659
11660 // Erase all sectors of flash before and including the FW.
11661 // Flash layout is in t4_hw.h.
11662 ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
11663 if (ret)
11664 CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
11665 return ret;
11666 }
11667 #endif
11668
11669 /*
11670 * t4_load_boot - download boot flash
11671 * @adapter: the adapter
11672 * @boot_data: the boot image to write
11673 * @boot_addr: offset in flash to write boot_data
11674 * @size: image size
11675 *
11676 * Write the supplied boot image to the card's serial flash.
11677 * The boot image has the following sections: a 28-byte header and the
11678 * boot image.
11679 */
t4_load_boot(struct adapter * adap,u8 * boot_data,unsigned int boot_addr,unsigned int size)11680 int t4_load_boot(struct adapter *adap, u8 *boot_data,
11681 unsigned int boot_addr, unsigned int size)
11682 {
11683 pci_exp_rom_header_t *header;
11684 int pcir_offset ;
11685 pcir_data_t *pcir_header;
11686 int ret, addr;
11687 uint16_t device_id;
11688 unsigned int i;
11689 unsigned int boot_sector = (boot_addr * 1024 );
11690 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11691
11692 /*
11693 * Make sure the boot image does not encroach on the firmware region
11694 */
11695 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
11696 CH_ERR(adap, "boot image encroaching on firmware region\n");
11697 return -EFBIG;
11698 }
11699
11700 /*
11701 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
11702 * and Boot configuration data sections. These 3 boot sections span
11703 * sectors 0 to 7 in flash and live right before the FW image location.
11704 */
11705 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
11706 sf_sec_size);
11707 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
11708 (boot_sector >> 16) + i - 1);
11709
11710 /*
11711 * If size == 0 then we're simply erasing the FLASH sectors associated
11712 * with the on-adapter option ROM file
11713 */
11714 if (ret || (size == 0))
11715 goto out;
11716
11717 /* Get boot header */
11718 header = (pci_exp_rom_header_t *)boot_data;
11719 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
11720 /* PCIR Data Structure */
11721 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
11722
11723 /*
11724 * Perform some primitive sanity testing to avoid accidentally
11725 * writing garbage over the boot sectors. We ought to check for
11726 * more but it's not worth it for now ...
11727 */
11728 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
11729 CH_ERR(adap, "boot image too small/large\n");
11730 return -EFBIG;
11731 }
11732
11733 #ifndef CHELSIO_T4_DIAGS
11734 /*
11735 * Check BOOT ROM header signature
11736 */
11737 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
11738 CH_ERR(adap, "Boot image missing signature\n");
11739 return -EINVAL;
11740 }
11741
11742 /*
11743 * Check PCI header signature
11744 */
11745 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
11746 CH_ERR(adap, "PCI header missing signature\n");
11747 return -EINVAL;
11748 }
11749
11750 /*
11751 * Check Vendor ID matches Chelsio ID
11752 */
11753 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
11754 CH_ERR(adap, "Vendor ID missing signature\n");
11755 return -EINVAL;
11756 }
11757 #endif
11758
11759 /*
11760 * Retrieve adapter's device ID
11761 */
11762 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
11763 /* Want to deal with PF 0 so I strip off PF 4 indicator */
11764 device_id = device_id & 0xf0ff;
11765
11766 /*
11767 * Check PCIE Device ID
11768 */
11769 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
11770 /*
11771 * Change the device ID in the Boot BIOS image to match
11772 * the Device ID of the current adapter.
11773 */
11774 modify_device_id(device_id, boot_data);
11775 }
11776
11777 /*
11778 * Skip over the first SF_PAGE_SIZE worth of data and write it after
11779 * we finish copying the rest of the boot image. This will ensure
11780 * that the BIOS boot header will only be written if the boot image
11781 * was written in full.
11782 */
11783 addr = boot_sector;
11784 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
11785 addr += SF_PAGE_SIZE;
11786 boot_data += SF_PAGE_SIZE;
11787 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
11788 if (ret)
11789 goto out;
11790 }
11791
11792 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
11793 (const u8 *)header, 0);
11794
11795 out:
11796 if (ret)
11797 CH_ERR(adap, "boot image download failed, error %d\n", ret);
11798 return ret;
11799 }
11800
11801 /*
11802 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
11803 * @adapter: the adapter
11804 *
11805 * Return the address within the flash where the OptionROM Configuration
11806 * is stored, or an error if the device FLASH is too small to contain
11807 * a OptionROM Configuration.
11808 */
t4_flash_bootcfg_addr(struct adapter * adapter)11809 static int t4_flash_bootcfg_addr(struct adapter *adapter)
11810 {
11811 /*
11812 * If the device FLASH isn't large enough to hold a Firmware
11813 * Configuration File, return an error.
11814 */
11815 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
11816 return -ENOSPC;
11817
11818 return FLASH_BOOTCFG_START;
11819 }
11820
t4_load_bootcfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)11821 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
11822 {
11823 int ret, i, n, cfg_addr;
11824 unsigned int addr;
11825 unsigned int flash_cfg_start_sec;
11826 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11827
11828 cfg_addr = t4_flash_bootcfg_addr(adap);
11829 if (cfg_addr < 0)
11830 return cfg_addr;
11831
11832 addr = cfg_addr;
11833 flash_cfg_start_sec = addr / SF_SEC_SIZE;
11834
11835 if (size > FLASH_BOOTCFG_MAX_SIZE) {
11836 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
11837 FLASH_BOOTCFG_MAX_SIZE);
11838 return -EFBIG;
11839 }
11840
11841 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
11842 sf_sec_size);
11843 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11844 flash_cfg_start_sec + i - 1);
11845
11846 /*
11847 * If size == 0 then we're simply erasing the FLASH sectors associated
11848 * with the on-adapter OptionROM Configuration File.
11849 */
11850 if (ret || size == 0)
11851 goto out;
11852
11853 /* this will write to the flash up to SF_PAGE_SIZE at a time */
11854 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11855 if ( (size - i) < SF_PAGE_SIZE)
11856 n = size - i;
11857 else
11858 n = SF_PAGE_SIZE;
11859 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
11860 if (ret)
11861 goto out;
11862
11863 addr += SF_PAGE_SIZE;
11864 cfg_data += SF_PAGE_SIZE;
11865 }
11866
11867 out:
11868 if (ret)
11869 CH_ERR(adap, "boot config data %s failed %d\n",
11870 (size == 0 ? "clear" : "download"), ret);
11871 return ret;
11872 }
11873
11874 /**
11875 * t4_read_bootcfg - read the current (boot)OptionROM configuration from FLASH
11876 * @adap: the adapter
11877 * @cfg_data: where to store the read OptionROM configuration data
11878 *
11879 * Read the current OptionROM configuration from FLASH and write to the
11880 * buffer @cfg_data supplied.
11881 */
t4_read_bootcfg(struct adapter * adap,u8 * cfg_data,unsigned int size)11882 int t4_read_bootcfg(struct adapter *adap, u8 *cfg_data, unsigned int size)
11883 {
11884 u32 *ptr = (u32 *)cfg_data;
11885 int i, n, cfg_addr;
11886 int ret = 0;
11887
11888 if (size > FLASH_BOOTCFG_MAX_SIZE) {
11889 CH_ERR(adap, "bootcfg file too big, max is %u bytes\n",
11890 FLASH_BOOTCFG_MAX_SIZE);
11891 return -EINVAL;
11892 }
11893
11894 cfg_addr = t4_flash_bootcfg_addr(adap);
11895 if (cfg_addr < 0)
11896 return cfg_addr;
11897
11898 size = size / sizeof (u32);
11899 for (i = 0; i < size; i += SF_PAGE_SIZE) {
11900 if ( (size - i) < SF_PAGE_SIZE)
11901 n = size - i;
11902 else
11903 n = SF_PAGE_SIZE;
11904
11905 ret = t4_read_flash(adap, cfg_addr, n, ptr, 0);
11906 if (ret)
11907 goto out;
11908
11909 cfg_addr += (n*4);
11910 ptr += n;
11911 }
11912
11913 out:
11914 return ret;
11915 }
11916
11917 /**
11918 * t4_set_filter_mode - configure the optional components of filter tuples
11919 * @adap: the adapter
11920 * @mode_map: a bitmap selcting which optional filter components to enable
11921 * @sleep_ok: if true we may sleep while awaiting command completion
11922 *
11923 * Sets the filter mode by selecting the optional components to enable
11924 * in filter tuples. Returns 0 on success and a negative error if the
11925 * requested mode needs more bits than are available for optional
11926 * components.
11927 */
t4_set_filter_mode(struct adapter * adap,unsigned int mode_map,bool sleep_ok)11928 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
11929 bool sleep_ok)
11930 {
11931 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
11932
11933 int i, nbits = 0;
11934
11935 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
11936 if (mode_map & (1 << i))
11937 nbits += width[i];
11938 if (nbits > FILTER_OPT_LEN)
11939 return -EINVAL;
11940
11941 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
11942
11943 return 0;
11944 }
11945
11946 /**
11947 * t4_clr_port_stats - clear port statistics
11948 * @adap: the adapter
11949 * @idx: the port index
11950 *
11951 * Clear HW statistics for the given port.
11952 */
t4_clr_port_stats(struct adapter * adap,int idx)11953 void t4_clr_port_stats(struct adapter *adap, int idx)
11954 {
11955 unsigned int i;
11956 u32 bgmap = t4_get_mps_bg_map(adap, idx);
11957 u32 port_base_addr;
11958
11959 if (is_t4(adap->params.chip))
11960 port_base_addr = PORT_BASE(idx);
11961 else
11962 port_base_addr = T5_PORT_BASE(idx);
11963
11964 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
11965 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
11966 t4_write_reg(adap, port_base_addr + i, 0);
11967 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
11968 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
11969 t4_write_reg(adap, port_base_addr + i, 0);
11970 for (i = 0; i < 4; i++)
11971 if (bgmap & (1 << i)) {
11972 t4_write_reg(adap,
11973 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
11974 t4_write_reg(adap,
11975 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
11976 }
11977 }
11978
11979 /**
11980 * t4_i2c_io - read/write I2C data from adapter
11981 * @adap: the adapter
11982 * @port: Port number if per-port device; <0 if not
11983 * @devid: per-port device ID or absolute device ID
11984 * @offset: byte offset into device I2C space
11985 * @len: byte length of I2C space data
11986 * @buf: buffer in which to return I2C data for read
11987 * buffer which holds the I2C data for write
11988 * @write: if true, do a write; else do a read
11989 * Reads/Writes the I2C data from/to the indicated device and location.
11990 */
t4_i2c_io(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf,bool write)11991 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
11992 int port, unsigned int devid,
11993 unsigned int offset, unsigned int len,
11994 u8 *buf, bool write)
11995 {
11996 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
11997 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
11998 int ret = 0;
11999
12000 if (len > I2C_PAGE_SIZE)
12001 return -EINVAL;
12002
12003 /* Dont allow reads that spans multiple pages */
12004 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
12005 return -EINVAL;
12006
12007 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
12008 ldst_cmd.op_to_addrspace =
12009 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12010 F_FW_CMD_REQUEST |
12011 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
12012 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
12013 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
12014 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
12015 ldst_cmd.u.i2c.did = devid;
12016
12017 while (len > 0) {
12018 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
12019
12020 ldst_cmd.u.i2c.boffset = offset;
12021 ldst_cmd.u.i2c.blen = i2c_len;
12022
12023 if (write)
12024 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
12025
12026 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
12027 write ? NULL : &ldst_rpl);
12028 if (ret)
12029 break;
12030
12031 if (!write)
12032 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
12033 offset += i2c_len;
12034 buf += i2c_len;
12035 len -= i2c_len;
12036 }
12037
12038 return ret;
12039 }
12040
t4_i2c_rd(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)12041 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
12042 int port, unsigned int devid,
12043 unsigned int offset, unsigned int len,
12044 u8 *buf)
12045 {
12046 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
12047 }
12048
t4_i2c_wr(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)12049 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
12050 int port, unsigned int devid,
12051 unsigned int offset, unsigned int len,
12052 u8 *buf)
12053 {
12054 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
12055 }
12056
12057 /**
12058 * t4_sge_ctxt_rd - read an SGE context through FW
12059 * @adap: the adapter
12060 * @mbox: mailbox to use for the FW command
12061 * @cid: the context id
12062 * @ctype: the context type
12063 * @data: where to store the context data
12064 *
12065 * Issues a FW command through the given mailbox to read an SGE context.
12066 */
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)12067 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
12068 enum ctxt_type ctype, u32 *data)
12069 {
12070 int ret;
12071 struct fw_ldst_cmd c;
12072
12073 if (ctype == CTXT_EGRESS)
12074 ret = FW_LDST_ADDRSPC_SGE_EGRC;
12075 else if (ctype == CTXT_INGRESS)
12076 ret = FW_LDST_ADDRSPC_SGE_INGC;
12077 else if (ctype == CTXT_FLM)
12078 ret = FW_LDST_ADDRSPC_SGE_FLMC;
12079 else
12080 ret = FW_LDST_ADDRSPC_SGE_CONMC;
12081
12082 memset(&c, 0, sizeof(c));
12083 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
12084 F_FW_CMD_REQUEST | F_FW_CMD_READ |
12085 V_FW_LDST_CMD_ADDRSPACE(ret));
12086 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
12087 c.u.idctxt.physid = cpu_to_be32(cid);
12088
12089 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12090 if (ret == 0) {
12091 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
12092 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
12093 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
12094 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
12095 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
12096 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
12097 }
12098 return ret;
12099 }
12100
12101 /**
12102 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
12103 * @adap: the adapter
12104 * @cid: the context id
12105 * @ctype: the context type
12106 * @data: where to store the context data
12107 *
12108 * Reads an SGE context directly, bypassing FW. This is only for
12109 * debugging when FW is unavailable.
12110 */
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)12111 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
12112 u32 *data)
12113 {
12114 int i, ret;
12115
12116 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
12117 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
12118 if (!ret)
12119 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
12120 *data++ = t4_read_reg(adap, i);
12121 return ret;
12122 }
12123
t4_sched_config(struct adapter * adapter,int type,int minmaxen)12124 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
12125 {
12126 struct fw_sched_cmd cmd;
12127
12128 memset(&cmd, 0, sizeof(cmd));
12129 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12130 F_FW_CMD_REQUEST |
12131 F_FW_CMD_WRITE);
12132 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12133
12134 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
12135 cmd.u.config.type = type;
12136 cmd.u.config.minmaxen = minmaxen;
12137
12138 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12139 NULL, 1);
12140 }
12141
t4_sched_params(struct adapter * adapter,int channel,int cls,int level,int mode,int type,int rateunit,int ratemode,int minrate,int maxrate,int weight,int pktsize,int burstsize)12142 int t4_sched_params(struct adapter *adapter,
12143 int channel, int cls,
12144 int level, int mode, int type,
12145 int rateunit, int ratemode,
12146 int minrate, int maxrate, int weight,
12147 int pktsize, int burstsize)
12148 {
12149 struct fw_sched_cmd cmd;
12150
12151 memset(&cmd, 0, sizeof(cmd));
12152 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12153 F_FW_CMD_REQUEST |
12154 F_FW_CMD_WRITE);
12155 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12156
12157 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12158 cmd.u.params.type = type;
12159 cmd.u.params.level = level;
12160 cmd.u.params.mode = mode;
12161 cmd.u.params.ch = channel;
12162 cmd.u.params.cl = cls;
12163 cmd.u.params.unit = rateunit;
12164 cmd.u.params.rate = ratemode;
12165 cmd.u.params.min = cpu_to_be32(minrate);
12166 cmd.u.params.max = cpu_to_be32(maxrate);
12167 cmd.u.params.weight = cpu_to_be16(weight);
12168 cmd.u.params.pktsize = cpu_to_be16(pktsize);
12169 cmd.u.params.burstsize = cpu_to_be16(burstsize);
12170
12171 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12172 NULL, 1);
12173 }
12174
t4_read_sched_params(struct adapter * adapter,int channel,int cls,int * level,int * mode,int * type,int * rateunit,int * ratemode,int * minrate,int * maxrate,int * weight,int * pktsize,int * burstsize)12175 int t4_read_sched_params(struct adapter *adapter,
12176 int channel, int cls,
12177 int *level, int *mode, int *type,
12178 int *rateunit, int *ratemode,
12179 int *minrate, int *maxrate, int *weight,
12180 int *pktsize, int *burstsize)
12181 {
12182 struct fw_sched_cmd cmd;
12183 int ret = 0;
12184
12185 memset(&cmd, 0, sizeof(cmd));
12186 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12187 F_FW_CMD_REQUEST |
12188 F_FW_CMD_READ);
12189 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12190 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12191 cmd.u.params.ch = channel;
12192 cmd.u.params.cl = cls;
12193
12194 ret = t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
12195 &cmd, 1);
12196 if (ret)
12197 goto out;
12198
12199 *level = cmd.u.params.level;
12200 *mode = cmd.u.params.mode;
12201 *type = cmd.u.params.type;
12202 *rateunit = cmd.u.params.unit;
12203 *ratemode = cmd.u.params.rate;
12204 *minrate = be32_to_cpu(cmd.u.params.min);
12205 *maxrate = be32_to_cpu(cmd.u.params.max);
12206 *weight = be16_to_cpu(cmd.u.params.weight);
12207 *pktsize = be16_to_cpu(cmd.u.params.pktsize);
12208 *burstsize = be16_to_cpu(cmd.u.params.burstsize);
12209
12210 out:
12211 return ret;
12212 }
12213
12214 /*
12215 * t4_config_watchdog - configure (enable/disable) a watchdog timer
12216 * @adapter: the adapter
12217 * @mbox: mailbox to use for the FW command
12218 * @pf: the PF owning the queue
12219 * @vf: the VF owning the queue
12220 * @timeout: watchdog timeout in ms
12221 * @action: watchdog timer / action
12222 *
12223 * There are separate watchdog timers for each possible watchdog
12224 * action. Configure one of the watchdog timers by setting a non-zero
12225 * timeout. Disable a watchdog timer by using a timeout of zero.
12226 */
t4_config_watchdog(struct adapter * adapter,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int timeout,unsigned int action)12227 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
12228 unsigned int pf, unsigned int vf,
12229 unsigned int timeout, unsigned int action)
12230 {
12231 struct fw_watchdog_cmd wdog;
12232 unsigned int ticks;
12233
12234 /*
12235 * The watchdog command expects a timeout in units of 10ms so we need
12236 * to convert it here (via rounding) and force a minimum of one 10ms
12237 * "tick" if the timeout is non-zero but the convertion results in 0
12238 * ticks.
12239 */
12240 ticks = (timeout + 5)/10;
12241 if (timeout && !ticks)
12242 ticks = 1;
12243
12244 memset(&wdog, 0, sizeof wdog);
12245 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
12246 F_FW_CMD_REQUEST |
12247 F_FW_CMD_WRITE |
12248 V_FW_PARAMS_CMD_PFN(pf) |
12249 V_FW_PARAMS_CMD_VFN(vf));
12250 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
12251 wdog.timeout = cpu_to_be32(ticks);
12252 wdog.action = cpu_to_be32(action);
12253
12254 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
12255 }
12256
t4_get_devlog_level(struct adapter * adapter,unsigned int * level)12257 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
12258 {
12259 struct fw_devlog_cmd devlog_cmd;
12260 int ret;
12261
12262 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12263 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12264 F_FW_CMD_REQUEST | F_FW_CMD_READ);
12265 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12266 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12267 sizeof(devlog_cmd), &devlog_cmd);
12268 if (ret)
12269 return ret;
12270
12271 *level = devlog_cmd.level;
12272 return 0;
12273 }
12274
t4_set_devlog_level(struct adapter * adapter,unsigned int level)12275 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
12276 {
12277 struct fw_devlog_cmd devlog_cmd;
12278
12279 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12280 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12281 F_FW_CMD_REQUEST |
12282 F_FW_CMD_WRITE);
12283 devlog_cmd.level = level;
12284 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12285 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12286 sizeof(devlog_cmd), &devlog_cmd);
12287 }
12288
t4_configure_add_smac(struct adapter * adap)12289 int t4_configure_add_smac(struct adapter *adap)
12290 {
12291 unsigned int param, val;
12292 int ret = 0;
12293
12294 adap->params.smac_add_support = 0;
12295 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12296 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
12297 /* Query FW to check if FW supports adding source mac address
12298 * to TCAM feature or not.
12299 * If FW returns 1, driver can use this feature and driver need to send
12300 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
12301 * enable adding smac to TCAM.
12302 */
12303 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12304 if (ret)
12305 return ret;
12306
12307 if (val == 1) {
12308 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
12309 ¶m, &val);
12310 if (!ret)
12311 /* Firmware allows adding explicit TCAM entries.
12312 * Save this internally.
12313 */
12314 adap->params.smac_add_support = 1;
12315 }
12316
12317 return ret;
12318 }
12319
t4_configure_ringbb(struct adapter * adap)12320 int t4_configure_ringbb(struct adapter *adap)
12321 {
12322 unsigned int param, val;
12323 int ret = 0;
12324
12325 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12326 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
12327 /* Query FW to check if FW supports ring switch feature or not.
12328 * If FW returns 1, driver can use this feature and driver need to send
12329 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
12330 * enable the ring backbone configuration.
12331 */
12332 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12333 if (ret < 0) {
12334 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
12335 ret);
12336 goto out;
12337 }
12338
12339 if (val != 1) {
12340 CH_ERR(adap, "FW doesnot support ringbackbone features\n");
12341 goto out;
12342 }
12343
12344 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
12345 if (ret < 0) {
12346 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
12347 ret);
12348 goto out;
12349 }
12350
12351 out:
12352 return ret;
12353 }
12354
12355 /*
12356 * t4_set_vlan_acl - Set a VLAN id for the specified VF
12357 * @adapter: the adapter
12358 * @mbox: mailbox to use for the FW command
12359 * @vf: one of the VFs instantiated by the specified PF
12360 * @vlan: The vlanid to be set
12361 *
12362 */
t4_set_vlan_acl(struct adapter * adap,unsigned int mbox,unsigned int vf,u16 vlan)12363 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
12364 u16 vlan)
12365 {
12366 struct fw_acl_vlan_cmd vlan_cmd;
12367 unsigned int enable;
12368
12369 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
12370 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
12371 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
12372 F_FW_CMD_REQUEST |
12373 F_FW_CMD_WRITE |
12374 F_FW_CMD_EXEC |
12375 V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
12376 V_FW_ACL_VLAN_CMD_VFN(vf));
12377 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
12378 /* Drop all packets that donot match vlan id */
12379 vlan_cmd.dropnovlan_fm = (enable
12380 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
12381 F_FW_ACL_VLAN_CMD_FM)
12382 : 0);
12383 if (enable != 0) {
12384 vlan_cmd.nvlan = 1;
12385 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
12386 }
12387
12388 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
12389 }
12390
12391 /**
12392 * t4_del_mac - Removes the exact-match filter for a MAC address
12393 * @adap: the adapter
12394 * @mbox: mailbox to use for the FW command
12395 * @viid: the VI id
12396 * @addr: the MAC address value
12397 * @smac: if true, delete from only the smac region of MPS
12398 *
12399 * Modifies an exact-match filter and sets it to the new MAC address if
12400 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
12401 * latter case the address is added persistently if @persist is %true.
12402 *
12403 * Returns a negative error number or the index of the filter with the new
12404 * MAC value. Note that this index may differ from @idx.
12405 */
t4_del_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,const u8 * addr,bool smac)12406 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12407 const u8 *addr, bool smac)
12408 {
12409 int ret;
12410 struct fw_vi_mac_cmd c;
12411 struct fw_vi_mac_exact *p = c.u.exact;
12412 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12413
12414 memset(&c, 0, sizeof(c));
12415 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12416 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12417 V_FW_VI_MAC_CMD_VIID(viid));
12418 c.freemacs_to_len16 = cpu_to_be32(
12419 V_FW_CMD_LEN16(1) |
12420 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12421
12422 memcpy(p->macaddr, addr, sizeof(p->macaddr));
12423 p->valid_to_idx = cpu_to_be16(
12424 F_FW_VI_MAC_CMD_VALID |
12425 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
12426
12427 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12428 if (ret == 0) {
12429 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12430 if (ret < max_mac_addr)
12431 return -ENOMEM;
12432 }
12433
12434 return ret;
12435 }
12436
12437 /**
12438 * t4_add_mac - Adds an exact-match filter for a MAC address
12439 * @adap: the adapter
12440 * @mbox: mailbox to use for the FW command
12441 * @viid: the VI id
12442 * @idx: index of existing filter for old value of MAC address, or -1
12443 * @addr: the new MAC address value
12444 * @persist: whether a new MAC allocation should be persistent
12445 * @add_smt: if true also add the address to the HW SMT
12446 * @smac: if true, update only the smac region of MPS
12447 *
12448 * Modifies an exact-match filter and sets it to the new MAC address if
12449 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
12450 * latter case the address is added persistently if @persist is %true.
12451 *
12452 * Returns a negative error number or the index of the filter with the new
12453 * MAC value. Note that this index may differ from @idx.
12454 */
t4_add_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx,bool smac)12455 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12456 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
12457 {
12458 int ret, mode;
12459 struct fw_vi_mac_cmd c;
12460 struct fw_vi_mac_exact *p = c.u.exact;
12461 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12462
12463 if (idx < 0) /* new allocation */
12464 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
12465 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
12466
12467 memset(&c, 0, sizeof(c));
12468 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12469 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12470 V_FW_VI_MAC_CMD_VIID(viid));
12471 c.freemacs_to_len16 = cpu_to_be32(
12472 V_FW_CMD_LEN16(1) |
12473 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12474 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
12475 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
12476 V_FW_VI_MAC_CMD_IDX(idx));
12477 memcpy(p->macaddr, addr, sizeof(p->macaddr));
12478
12479 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12480 if (ret == 0) {
12481 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12482 if (ret >= max_mac_addr)
12483 return -ENOMEM;
12484 if (smt_idx) {
12485 /* Does fw supports returning smt_idx? */
12486 if (adap->params.viid_smt_extn_support)
12487 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
12488 else {
12489 /* In T4/T5, SMT contains 256 SMAC entries
12490 * organized in 128 rows of 2 entries each.
12491 * In T6, SMT contains 256 SMAC entries in
12492 * 256 rows.
12493 */
12494 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
12495 *smt_idx = ((viid & M_FW_VIID_VIN) << 1);
12496 else
12497 *smt_idx = (viid & M_FW_VIID_VIN);
12498 }
12499 }
12500 }
12501
12502 return ret;
12503 }
12504
12505