1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * This file is part of the Chelsio T4 Ethernet driver.
14 *
15 * Copyright (C) 2003-2013 Chelsio Communications. All rights reserved.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 * release for licensing terms and conditions.
21 */
22 #include <sys/queue.h>
23 #include "common.h"
24 #include "t4_regs.h"
25 #include "t4_regs_values.h"
26 #include "t4fw_interface.h"
27 #include "t4_fw.h"
28
29 /*
30 * t4_wait_op_done_val - wait until an operation is completed
31 * @adapter: the adapter performing the operation
32 * @reg: the register to check for completion
33 * @mask: a single-bit field within @reg that indicates completion
34 * @polarity: the value of the field when the operation is completed
35 * @attempts: number of check iterations
36 * @delay: delay in usecs between iterations
37 * @valp: where to store the value of the register at completion time
38 *
39 * Wait until an operation is completed by checking a bit in a register
40 * up to @attempts times. If @valp is not NULL the value of the register
41 * at the time it indicated completion is stored there. Returns 0 if the
42 * operation completes and -EAGAIN otherwise.
43 */
44 int
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int d,u32 * valp)45 t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
46 int polarity, int attempts, int d, u32 *valp)
47 {
48 int rc = 0;
49
50 /* LINTED: E_CONSTANT_CONDITION */
51 while (1) {
52 u32 val = t4_read_reg(adapter, reg);
53
54 if (!!(val & mask) == polarity) {
55 if (valp != NULL)
56 *valp = val;
57 goto done;
58 }
59 if (--attempts == 0) {
60 rc = -EAGAIN;
61 goto done;
62 }
63 if (d != 0)
64 udelay(d);
65 }
66
67 done:
68 return (rc);
69 }
70
71 /*
72 * t4_set_reg_field - set a register field to a value
73 * @adapter: the adapter to program
74 * @addr: the register address
75 * @mask: specifies the portion of the register to modify
76 * @val: the new value for the register field
77 *
78 * Sets a register field specified by the supplied mask to the
79 * given value.
80 */
81 void
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)82 t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val)
83 {
84 u32 v = t4_read_reg(adapter, addr) & ~mask;
85
86 t4_write_reg(adapter, addr, v | val);
87 (void) t4_read_reg(adapter, addr); /* flush */
88 }
89
90 /*
91 * t4_read_indirect - read indirectly addressed registers
92 * @adap: the adapter
93 * @addr_reg: register holding the indirect address
94 * @data_reg: register holding the value of the indirect register
95 * @vals: where the read register values are stored
96 * @nregs: how many indirect registers to read
97 * @start_idx: index of first indirect register to read
98 *
99 * Reads registers that are accessed indirectly through an address/data
100 * register pair.
101 */
102 void
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)103 t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
104 unsigned int data_reg, u32 *vals, unsigned int nregs,
105 unsigned int start_idx)
106 {
107 while (nregs--) {
108 t4_write_reg(adap, addr_reg, start_idx);
109 *vals++ = t4_read_reg(adap, data_reg);
110 start_idx++;
111 }
112 }
113
114 /*
115 * t4_write_indirect - write indirectly addressed registers
116 * @adap: the adapter
117 * @addr_reg: register holding the indirect addresses
118 * @data_reg: register holding the value for the indirect registers
119 * @vals: values to write
120 * @nregs: how many indirect registers to write
121 * @start_idx: address of first indirect register to write
122 *
123 * Writes a sequential block of registers that are accessed indirectly
124 * through an address/data register pair.
125 */
126 void
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)127 t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
128 unsigned int data_reg, const u32 *vals, unsigned int nregs,
129 unsigned int start_idx)
130 {
131 while (nregs--) {
132 t4_write_reg(adap, addr_reg, start_idx++);
133 t4_write_reg(adap, data_reg, *vals++);
134 }
135 }
136
137 /*
138 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
139 */
140 static void
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)141 get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr)
142 {
143 for (/* */; nflit; nflit--, mbox_addr += 8)
144 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
145 }
146
147 /*
148 * Handle a FW assertion reported in a mailbox.
149 */
150 static void
fw_asrt(struct adapter * adap,u32 mbox_addr)151 fw_asrt(struct adapter *adap, u32 mbox_addr)
152 {
153 struct fw_debug_cmd asrt;
154
155 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof (asrt) / 8, mbox_addr);
156 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %x, val1 %x",
157 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
158 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
159 }
160
161 #define X_CIM_PF_NOACCESS 0xeeeeeeee
162 /*
163 * t4_wr_mbox_meat - send a command to FW through the given mailbox
164 * @adap: the adapter
165 * @mbox: index of the mailbox to use
166 * @cmd: the command to write
167 * @size: command length in bytes
168 * @rpl: where to optionally store the reply
169 * @sleep_ok: if true we may sleep while awaiting command completion
170 *
171 * Sends the given command to FW through the selected mailbox and waits
172 * for the FW to execute the command. If @rpl is not %NULL it is used to
173 * store the FW's reply to the command. The command and its optional
174 * reply are of the same length. Some FW commands like RESET and
175 * INITIALIZE can take a considerable amount of time to execute.
176 * @sleep_ok determines whether we may sleep while awaiting the response.
177 * If sleeping is allowed we use progressive backoff otherwise we spin.
178 *
179 * The return value is 0 on success or a negative errno on failure. A
180 * failure can happen either because we are not able to execute the
181 * command or FW executes it but signals an error. In the latter case
182 * the return value is the error code indicated by FW (negated).
183 */
184 int
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)185 t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
186 void *rpl, bool sleep_ok)
187 {
188 /*
189 * We delay in small increments at first in an effort to maintain
190 * responsiveness for simple, fast executing commands but then back
191 * off to larger delays to a maximum retry delay.
192 */
193 static const int d[] = {
194 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
195 };
196
197 u32 v;
198 u64 res;
199 int i, ms, delay_idx;
200 const __be64 *p = cmd;
201
202 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
203 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
204
205 if ((size & 15) || size > MBOX_LEN)
206 return (-EINVAL);
207
208 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
209 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
210 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
211
212 if (v != X_MBOWNER_PL)
213 return (v ? -EBUSY : -ETIMEDOUT);
214
215 for (i = 0; i < size; i += 8, p++)
216 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
217
218 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
219 (void) t4_read_reg(adap, ctl_reg); /* flush write */
220
221 delay_idx = 0;
222 ms = d[0];
223
224 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
225 if (sleep_ok != 0) {
226 ms = d[delay_idx]; /* last element may repeat */
227 if (delay_idx < ARRAY_SIZE(d) - 1)
228 delay_idx++;
229 msleep(ms);
230 } else
231 mdelay(ms);
232
233 v = t4_read_reg(adap, ctl_reg);
234 if (v == X_CIM_PF_NOACCESS)
235 continue;
236 if (G_MBOWNER(v) == X_MBOWNER_PL) {
237 if (!(v & F_MBMSGVALID)) {
238 t4_write_reg(adap, ctl_reg,
239 V_MBOWNER(X_MBOWNER_NONE));
240 continue;
241 }
242
243 res = t4_read_reg64(adap, data_reg);
244 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
245 fw_asrt(adap, data_reg);
246 res = V_FW_CMD_RETVAL(EIO);
247 } else if (rpl != NULL)
248 get_mbox_rpl(adap, rpl, size / 8, data_reg);
249 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
250 return (-G_FW_CMD_RETVAL((int)res));
251 }
252 }
253
254 CH_ERR(adap, "command %x in mailbox %d timed out",
255 *(const u8 *)cmd, mbox);
256 return (-ETIMEDOUT);
257 }
258
259 /*
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270 int
t4_mc_read(struct adapter * adap,u32 addr,__be32 * data,u64 * ecc)271 t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
272 {
273 int i;
274
275 if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
276 return (-EBUSY);
277 t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
278 t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
279 t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
280 t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
281 V_BIST_CMD_GAP(1));
282 i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
283 if (i != 0)
284 return (i);
285
286 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
287
288 for (i = 15; i >= 0; i--)
289 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
290 if (ecc != NULL)
291 *ecc = t4_read_reg64(adap, MC_DATA(16));
292 #undef MC_DATA
293 return (0);
294 }
295
296 /*
297 * t4_edc_read - read from EDC through backdoor accesses
298 * @adap: the adapter
299 * @idx: which EDC to access
300 * @addr: address of first byte requested
301 * @data: 64 bytes of data containing the requested address
302 * @ecc: where to store the corresponding 64-bit ECC word
303 *
304 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
305 * that covers the requested address @addr. If @parity is not %NULL it
306 * is assigned the 64-bit ECC word for the read data.
307 */
308 int
t4_edc_read(struct adapter * adap,int idx,u32 addr,__be32 * data,u64 * ecc)309 t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
310 {
311 int i;
312
313 idx *= EDC_STRIDE;
314 if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
315 return (-EBUSY);
316 t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
317 t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
318 t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
319 t4_write_reg(adap, A_EDC_BIST_CMD + idx,
320 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
321 i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
322 if (i != 0)
323 return (i);
324
325 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
326
327 for (i = 15; i >= 0; i--)
328 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
329 if (ecc != NULL)
330 *ecc = t4_read_reg64(adap, EDC_DATA(16));
331 #undef EDC_DATA
332 return (0);
333 }
334
335 /*
336 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
337 * @adap: the adapter
338 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
339 * @addr: address within indicated memory type
340 * @len: amount of memory to read
341 * @buf: host memory buffer
342 *
343 * Reads an [almost] arbitrary memory region in the firmware: the
344 * firmware memory address, length and host buffer must be aligned on
345 * 32-bit boudaries. The memory is returned as a raw byte sequence from
346 * the firmware's memory. If this memory contains data structures which
347 * contain multi-byte integers, it's the callers responsibility to
348 * perform appropriate byte order conversions.
349 */
350 int
t4_mem_read(struct adapter * adap,int mtype,u32 addr,u32 len,__be32 * buf)351 t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf)
352 {
353 u32 pos, start, end, offset;
354 int ret;
355
356 /*
357 * Argument sanity checks ...
358 */
359 if ((addr & 0x3) || (len & 0x3))
360 return (-EINVAL);
361
362 /*
363 * The underlaying EDC/MC read routines read 64 bytes at a time so we
364 * need to round down the start and round up the end. We'll start
365 * copying out of the first line at (addr - start) a word at a time.
366 */
367 start = addr & ~(64-1);
368 end = (addr + len + 64-1) & ~(64-1);
369 offset = (addr - start)/sizeof (__be32);
370
371 for (pos = start; pos < end; pos += 64, offset = 0) {
372 __be32 data[16];
373
374 /*
375 * Read the chip's memory block and bail if there's an error.
376 */
377 if (mtype == MEM_MC)
378 ret = t4_mc_read(adap, pos, data, NULL);
379 else
380 ret = t4_edc_read(adap, mtype, pos, data, NULL);
381 if (ret != 0)
382 return (ret);
383
384 /*
385 * Copy the data into the caller's memory buffer.
386 */
387 while (offset < 16 && len > 0) {
388 *buf++ = data[offset++];
389 len -= sizeof (__be32);
390 }
391 }
392
393 return (0);
394 }
395
396 /*
397 * t4_mem_win_rw - read/write memory through PCIE memory window
398 * @adap: the adapter
399 * @addr: address of first byte requested
400 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
401 * @dir: direction of transfer 1 => read, 0 => write
402 *
403 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
404 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
405 * address @addr.
406 */
407 static int
t4_mem_win_rw(struct adapter * adap,u32 addr,__be32 * data,int dir)408 t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
409 {
410 int i;
411
412 /*
413 * Setup offset into PCIE memory window. Address must be a
414 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
415 * ensure that changes propagate before we attempt to use the new
416 * values.)
417 */
418 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
419 addr & ~(MEMWIN0_APERTURE - 1));
420 (void) t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
421 0));
422
423 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
424 for (i = 0; i < MEMWIN0_APERTURE; i = i + 0x4) {
425 if (dir != 0)
426 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
427 else
428 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
429 }
430
431 return (0);
432 }
433
434 int
t4_mem_win_read(struct adapter * adap,u32 addr,__be32 * data)435 t4_mem_win_read(struct adapter *adap, u32 addr, __be32 *data)
436 {
437 return (t4_mem_win_rw(adap, addr, data, 1));
438 }
439
440 /*
441 * Partial EEPROM Vital Product Data structure. Includes only the ID and
442 * VPD-R header.
443 */
444 struct t4_vpd_hdr {
445 u8 id_tag;
446 u8 id_len[2];
447 u8 id_data[ID_LEN];
448 u8 vpdr_tag;
449 u8 vpdr_len[2];
450 };
451
452 /*
453 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
454 */
455 #define EEPROM_MAX_RD_POLL 40
456 #define EEPROM_MAX_WR_POLL 6
457 #define EEPROM_STAT_ADDR 0x7bfc
458 #define VPD_BASE 0x400
459 #define VPD_BASE_OLD 0
460 #define VPD_LEN 512
461 #define VPD_INFO_FLD_HDR_SIZE 3
462
463 /*
464 * t4_seeprom_read - read a serial EEPROM location
465 * @adapter: adapter to read
466 * @addr: EEPROM virtual address
467 * @data: where to store the read data
468 *
469 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
470 * VPD capability. Note that this function must be called with a virtual
471 * address.
472 */
473 int
t4_seeprom_read(struct adapter * adapter,u32 addr,u32 * data)474 t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
475 {
476 u16 val;
477 int attempts = EEPROM_MAX_RD_POLL;
478 unsigned int base = adapter->params.pci.vpd_cap_addr;
479
480 if (addr >= EEPROMVSIZE || (addr & 3))
481 return (-EINVAL);
482
483 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
484 do {
485 udelay(10);
486 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
487 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
488
489 if (!(val & PCI_VPD_ADDR_F)) {
490 CH_ERR(adapter, "reading EEPROM address 0x%x failed", addr);
491 return (-EIO);
492 }
493 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
494 *data = le32_to_cpu(*data);
495 return (0);
496 }
497
498 /*
499 * t4_seeprom_write - write a serial EEPROM location
500 * @adapter: adapter to write
501 * @addr: virtual EEPROM address
502 * @data: value to write
503 *
504 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
505 * VPD capability. Note that this function must be called with a virtual
506 * address.
507 */
508 int
t4_seeprom_write(struct adapter * adapter,u32 addr,u32 data)509 t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
510 {
511 u16 val;
512 int attempts = EEPROM_MAX_WR_POLL;
513 unsigned int base = adapter->params.pci.vpd_cap_addr;
514
515 if (addr >= EEPROMVSIZE || (addr & 3))
516 return (-EINVAL);
517
518 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
519 cpu_to_le32(data));
520 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
521 (u16)addr | PCI_VPD_ADDR_F);
522 do {
523 msleep(1);
524 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
525 } while ((val & PCI_VPD_ADDR_F) && --attempts);
526
527 if (val & PCI_VPD_ADDR_F) {
528 CH_ERR(adapter, "write to EEPROM address %x failed", addr);
529 return (-EIO);
530 }
531 return (0);
532 }
533
534 /*
535 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
536 * @phys_addr: the physical EEPROM address
537 * @fn: the PCI function number
538 * @sz: size of function-specific area
539 *
540 * Translate a physical EEPROM address to virtual. The first 1K is
541 * accessed through virtual addresses starting at 31K, the rest is
542 * accessed through virtual addresses starting at 0.
543 *
544 * The mapping is as follows:
545 * [0..1K) -> [31K..32K)
546 * [1K..1K+A) -> [ES-A..ES)
547 * [1K+A..ES) -> [0..ES-A-1K)
548 *
549 * where A = @fn * @sz, and ES = EEPROM size.
550 */
551 int
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)552 t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
553 {
554 fn *= sz;
555 if (phys_addr < 1024)
556 return (phys_addr + (31 << 10));
557 if (phys_addr < 1024 + fn)
558 return (EEPROMSIZE - fn + phys_addr - 1024);
559 if (phys_addr < EEPROMSIZE)
560 return (phys_addr - 1024 - fn);
561 return (-EINVAL);
562 }
563
564 /*
565 * t4_seeprom_wp - enable/disable EEPROM write protection
566 * @adapter: the adapter
567 * @enable: whether to enable or disable write protection
568 *
569 * Enables or disables write protection on the serial EEPROM.
570 */
571 int
t4_seeprom_wp(struct adapter * adapter,int enable)572 t4_seeprom_wp(struct adapter *adapter, int enable)
573 {
574 return (t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0));
575 }
576
577 /*
578 * get_vpd_keyword_val - Locates an information field keyword in the VPD
579 * @v: Pointer to buffered vpd data structure
580 * @kw: The keyword to search for
581 *
582 * Returns the value of the information field keyword or
583 * -ENOENT otherwise.
584 */
585 static int
get_vpd_keyword_val(const struct t4_vpd_hdr * v,const char * kw)586 get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
587 {
588 int i;
589 unsigned int offset, len;
590 const u8 *buf = &v->id_tag;
591 const u8 *vpdr_len = &v->vpdr_tag;
592 offset = sizeof (struct t4_vpd_hdr);
593 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
594
595 if (len + sizeof (struct t4_vpd_hdr) > VPD_LEN) {
596 return (-ENOENT);
597 }
598
599 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len; ) {
600
601 if (memcmp(buf + i, kw, 2) == 0) {
602 i += VPD_INFO_FLD_HDR_SIZE;
603 return (i);
604 }
605
606 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
607 }
608
609 return (-ENOENT);
610 }
611
612 /*
613 * get_vpd_params - read VPD parameters from VPD EEPROM
614 * @adapter: adapter to read
615 * @p: where to store the parameters
616 *
617 * Reads card parameters stored in VPD EEPROM.
618 */
619 static int
get_vpd_params(struct adapter * adapter,struct vpd_params * p)620 get_vpd_params(struct adapter *adapter, struct vpd_params *p)
621 {
622 int i, ret, addr;
623 int ec, sn, pn, na;
624 u8 vpd[VPD_LEN], csum;
625 const struct t4_vpd_hdr *v;
626
627 /*
628 * Card information normally starts at VPD_BASE but early cards had
629 * it at 0.
630 */
631 /* LINTED: E_BAD_PTR_CAST_ALIGN */
632 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
633 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
634
635 for (i = 0; i < sizeof (vpd); i += 4) {
636 /* LINTED: E_BAD_PTR_CAST_ALIGN */
637 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
638 if (ret != 0)
639 return (ret);
640 }
641 v = (const struct t4_vpd_hdr *)vpd;
642
643 #define FIND_VPD_KW(var, name) do { \
644 var = get_vpd_keyword_val(v, name); \
645 if (var < 0) { \
646 CH_ERR(adapter, "missing VPD keyword " name); \
647 return (-EINVAL); \
648 } \
649 } while (0)
650
651 /* LINTED: E_CONSTANT_CONDITION */
652 FIND_VPD_KW(i, "RV");
653 for (csum = 0; i >= 0; i--)
654 csum += vpd[i];
655
656 if (csum != 0) {
657 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u", csum);
658 return (-EINVAL);
659 }
660 /* LINTED: E_CONSTANT_CONDITION */
661 FIND_VPD_KW(ec, "EC");
662 /* LINTED: E_CONSTANT_CONDITION */
663 FIND_VPD_KW(sn, "SN");
664 /* LINTED: E_CONSTANT_CONDITION */
665 FIND_VPD_KW(pn, "PN");
666 /* LINTED: E_CONSTANT_CONDITION */
667 FIND_VPD_KW(na, "NA");
668 #undef FIND_VPD_KW
669
670 (void) memcpy(p->id, v->id_data, ID_LEN);
671 (void) strstrip(p->id);
672 (void) memcpy(p->ec, vpd + ec, EC_LEN);
673 (void) strstrip(p->ec);
674 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
675 (void) memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
676 (void) strstrip(p->sn);
677 (void) memcpy(p->pn, vpd + pn, min(i, PN_LEN));
678 (void) strstrip(p->pn);
679 (void) memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
680 (void) strstrip(p->na);
681
682 return (0);
683 }
684
685 /* serial flash and firmware constants and flash config file constants */
686 enum {
687 SF_ATTEMPTS = 10, /* max retries for SF operations */
688
689 /* flash command opcodes */
690 SF_PROG_PAGE = 2, /* program page */
691 SF_WR_DISABLE = 4, /* disable writes */
692 SF_RD_STATUS = 5, /* read status register */
693 SF_WR_ENABLE = 6, /* enable writes */
694 SF_RD_DATA_FAST = 0xb, /* read flash */
695 SF_RD_ID = 0x9f, /* read ID */
696 SF_ERASE_SECTOR = 0xd8, /* erase sector */
697
698 };
699
700 /*
701 * sf1_read - read data from the serial flash
702 * @adapter: the adapter
703 * @byte_cnt: number of bytes to read
704 * @cont: whether another operation will be chained
705 * @lock: whether to lock SF for PL access only
706 * @valp: where to store the read data
707 *
708 * Reads up to 4 bytes of data from the serial flash. The location of
709 * the read needs to be specified prior to calling this by issuing the
710 * appropriate commands to the serial flash.
711 */
712 static int
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)713 sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock,
714 u32 *valp)
715 {
716 int ret;
717
718 if (!byte_cnt || byte_cnt > 4)
719 return (-EINVAL);
720 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
721 return (-EBUSY);
722 t4_write_reg(adapter, A_SF_OP,
723 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
724 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
725 if (!ret)
726 *valp = t4_read_reg(adapter, A_SF_DATA);
727 return (ret);
728 }
729
730 /*
731 * sf1_write - write data to the serial flash
732 * @adapter: the adapter
733 * @byte_cnt: number of bytes to write
734 * @cont: whether another operation will be chained
735 * @lock: whether to lock SF for PL access only
736 * @val: value to write
737 *
738 * Writes up to 4 bytes of data to the serial flash. The location of
739 * the write needs to be specified prior to calling this by issuing the
740 * appropriate commands to the serial flash.
741 */
742 static int
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)743 sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock,
744 u32 val)
745 {
746 if (!byte_cnt || byte_cnt > 4)
747 return (-EINVAL);
748 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
749 return (-EBUSY);
750 t4_write_reg(adapter, A_SF_DATA, val);
751 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
752 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
753 return (t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5));
754 }
755
756 /*
757 * flash_wait_op - wait for a flash operation to complete
758 * @adapter: the adapter
759 * @attempts: max number of polls of the status register
760 * @delay: delay between polls in ms
761 *
762 * Wait for a flash operation to complete by polling the status register.
763 */
764 static int
flash_wait_op(struct adapter * adapter,int attempts,int d)765 flash_wait_op(struct adapter *adapter, int attempts, int d)
766 {
767 int ret = 0;
768 u32 status;
769
770 /* LINTED: E_CONSTANT_CONDITION */
771 while (1) {
772 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
773 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
774 goto done;
775 if (!(status & 1))
776 goto done;
777 if (--attempts == 0) {
778 ret = -EAGAIN;
779 goto done;
780 }
781 if (d != 0)
782 msleep(d);
783 }
784
785 done:
786 return (ret);
787 }
788
789 /*
790 * t4_read_flash - read words from serial flash
791 * @adapter: the adapter
792 * @addr: the start address for the read
793 * @nwords: how many 32-bit words to read
794 * @data: where to store the read data
795 * @byte_oriented: whether to store data as bytes or as words
796 *
797 * Read the specified number of 32-bit words from the serial flash.
798 * If @byte_oriented is set the read data is stored as a byte array
799 * (i.e., big-endian), otherwise as 32-bit words in the platform's
800 * natural endianess.
801 */
802 int
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)803 t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords,
804 u32 *data, int byte_oriented)
805 {
806 int ret;
807
808 if (addr + nwords * sizeof (u32) > adapter->params.sf_size ||
809 (addr & 3))
810 return (-EINVAL);
811
812 addr = swab32(addr) | SF_RD_DATA_FAST;
813
814 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
815 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
816 return (ret);
817
818 for (/* */; nwords; nwords--, data++) {
819 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
820 if (nwords == 1)
821 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
822 if (ret != 0)
823 return (ret);
824 if (byte_oriented != 0)
825 *data = htonl(*data);
826 }
827 return (0);
828 }
829
830 /*
831 * t4_write_flash - write up to a page of data to the serial flash
832 * @adapter: the adapter
833 * @addr: the start address to write
834 * @n: length of data to write in bytes
835 * @data: the data to write
836 *
837 * Writes up to a page of data (256 bytes) to the serial flash starting
838 * at the given address. All the data must be written to the same page.
839 */
840 static int
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data)841 t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n,
842 const u8 *data)
843 {
844 int ret;
845 u32 buf[SF_PAGE_SIZE / 4];
846 unsigned int i, c, left, val, offset = addr & 0xff;
847
848 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
849 return (-EINVAL);
850
851 val = swab32(addr) | SF_PROG_PAGE;
852
853 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
854 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
855 goto unlock;
856
857 for (left = n; left; left -= c) {
858 c = min(left, 4U);
859 for (val = 0, i = 0; i < c; ++i)
860 val = (val << 8) + *data++;
861
862 ret = sf1_write(adapter, c, c != left, 1, val);
863 if (ret != 0)
864 goto unlock;
865 }
866 ret = flash_wait_op(adapter, 8, 1);
867 if (ret != 0)
868 goto unlock;
869
870 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
871
872 /* Read the page to verify the write succeeded */
873 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
874 if (ret != 0)
875 return (ret);
876
877 if (memcmp(data - n, (u8 *)buf + offset, n)) {
878 CH_ERR(adapter, "failed to correctly write the flash page "
879 "at %x", addr);
880 return (-EIO);
881 }
882 return (0);
883
884 unlock:
885 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
886 return (ret);
887 }
888
889 /*
890 * t4_get_fw_version - read the firmware version
891 * @adapter: the adapter
892 * @vers: where to place the version
893 *
894 * Reads the FW version from flash.
895 */
896 int
t4_get_fw_version(struct adapter * adapter,u32 * vers)897 t4_get_fw_version(struct adapter *adapter, u32 *vers)
898 {
899 return (t4_read_flash(adapter,
900 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0));
901 }
902
903 /*
904 * t4_get_tp_version - read the TP microcode version
905 * @adapter: the adapter
906 * @vers: where to place the version
907 *
908 * Reads the TP microcode version from flash.
909 */
910 int
t4_get_tp_version(struct adapter * adapter,u32 * vers)911 t4_get_tp_version(struct adapter *adapter, u32 *vers)
912 {
913 return (t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
914 tp_microcode_ver), 1, vers, 0));
915 }
916
917 /*
918 * t4_check_fw_version - check if the FW is compatible with this driver
919 * @adapter: the adapter
920 *
921 * Checks if an adapter's FW is compatible with the driver. Returns 0
922 * if there's exact match, a negative error if the version could not be
923 * read or there's a major version mismatch, and a positive value if the
924 * expected major version is found but there's a minor version mismatch.
925 */
926 int
t4_check_fw_version(struct adapter * adapter)927 t4_check_fw_version(struct adapter *adapter)
928 {
929 u32 api_vers[2];
930 int ret, major, minor, micro;
931
932 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
933 if (!ret)
934 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
935 if (!ret)
936 ret = t4_read_flash(adapter,
937 FLASH_FW_START + offsetof(struct fw_hdr, intfver_nic), 2,
938 api_vers, 1);
939 if (ret != 0)
940 return (ret);
941
942 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
943 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
944 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
945 (void) memcpy(adapter->params.api_vers, api_vers,
946 sizeof (adapter->params.api_vers));
947
948 if (major != T4FW_VERSION_MAJOR) { /* major mismatch - fail */
949 CH_ERR(adapter, "card FW has major version %u, driver wants %u",
950 major, T4FW_VERSION_MAJOR);
951 return (-EINVAL);
952 }
953
954 if (minor == T4FW_VERSION_MINOR && micro == T4FW_VERSION_MICRO)
955 return (0); /* perfect match */
956
957 /* Minor/micro version mismatch. Report it but often it's OK. */
958 return (1);
959 }
960
961 /*
962 * t4_flash_erase_sectors - erase a range of flash sectors
963 * @adapter: the adapter
964 * @start: the first sector to erase
965 * @end: the last sector to erase
966 *
967 * Erases the sectors in the given inclusive range.
968 */
969 static int
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)970 t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
971 {
972 int ret = 0;
973
974 while (start <= end) {
975 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
976 (ret = sf1_write(adapter, 4, 0, 1,
977 SF_ERASE_SECTOR | (start << 8))) != 0 ||
978 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
979 CH_ERR(adapter, "erase of flash sector %d failed, "
980 "error %d", start, ret);
981 break;
982 }
983 start++;
984 }
985 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
986 return (ret);
987 }
988
989 /*
990 * t4_flash_cfg_addr - return the address of the flash configuration file
991 * @adapter: the adapter
992 *
993 * Return the address within the flash where the Firmware Configuration
994 * File is stored.
995 */
996 unsigned int
t4_flash_cfg_addr(struct adapter * adapter)997 t4_flash_cfg_addr(struct adapter *adapter)
998 {
999 if (adapter->params.sf_size == 0x100000)
1000 return (FLASH_FPGA_CFG_START);
1001 else
1002 return (FLASH_CFG_START);
1003 }
1004
1005 /*
1006 * t4_load_cfg - download config file
1007 * @adap: the adapter
1008 * @cfg_data: the cfg text file to write
1009 * @size: text file size
1010 *
1011 * Write the supplied config text file to the card's serial flash.
1012 */
1013 int
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)1014 t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1015 {
1016 int ret, i, n;
1017 unsigned int addr;
1018 unsigned int flash_cfg_start_sec;
1019 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1020
1021 addr = t4_flash_cfg_addr(adap);
1022 flash_cfg_start_sec = addr / SF_SEC_SIZE;
1023
1024 if (!size) {
1025 CH_ERR(adap, "cfg file has no data");
1026 return (-EINVAL);
1027 }
1028
1029 if (size > FLASH_CFG_MAX_SIZE) {
1030 CH_ERR(adap, "cfg file too large, max is %u bytes",
1031 FLASH_CFG_MAX_SIZE);
1032 return (-EFBIG);
1033 }
1034
1035 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
1036 sf_sec_size);
1037 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1038 flash_cfg_start_sec + i - 1);
1039 if (ret != 0)
1040 goto out;
1041
1042 /* this will write to the flash up to SF_PAGE_SIZE at a time */
1043 for (i = 0; i < size; i += SF_PAGE_SIZE) {
1044 if ((size - i) < SF_PAGE_SIZE)
1045 n = size - i;
1046 else
1047 n = SF_PAGE_SIZE;
1048 ret = t4_write_flash(adap, addr, n, cfg_data);
1049 if (ret != 0)
1050 goto out;
1051
1052 addr += SF_PAGE_SIZE;
1053 cfg_data += SF_PAGE_SIZE;
1054 }
1055
1056 out:
1057 if (ret != 0)
1058 CH_ERR(adap, "config file download failed %d", ret);
1059 return (ret);
1060 }
1061
1062 /*
1063 * t4_load_fw - download firmware
1064 * @adap: the adapter
1065 * @fw_data: the firmware image to write
1066 * @size: image size
1067 *
1068 * Write the supplied firmware image to the card's serial flash.
1069 */
1070 int
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size)1071 t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1072 {
1073 u32 csum;
1074 int ret, addr;
1075 unsigned int i;
1076 u8 first_page[SF_PAGE_SIZE];
1077 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1078 const u32 *p = (const u32 *)fw_data;
1079 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1080 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1081 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1082
1083 if (!size) {
1084 CH_ERR(adap, "FW image has no data");
1085 return (-EINVAL);
1086 }
1087 if (size & 511) {
1088 CH_ERR(adap, "FW image size not multiple of 512 bytes");
1089 return (-EINVAL);
1090 }
1091 if (ntohs(hdr->len512) * 512 != size) {
1092 CH_ERR(adap, "FW image size differs from size in FW header");
1093 return (-EINVAL);
1094 }
1095 if (size > FLASH_FW_MAX_SIZE) {
1096 CH_ERR(adap, "FW image too large, max is %u bytes\n",
1097 FLASH_FW_MAX_SIZE);
1098 return (-EFBIG);
1099 }
1100
1101 for (csum = 0, i = 0; i < size / sizeof (csum); i++)
1102 csum += ntohl(p[i]);
1103
1104 if (csum != 0xffffffff) {
1105 CH_ERR(adap, "corrupted firmware image, checksum %x",
1106 csum);
1107 return (-EINVAL);
1108 }
1109
1110 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1111 ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1112 FLASH_FW_START_SEC + i - 1);
1113 if (ret != 0)
1114 goto out;
1115
1116 /*
1117 * We write the correct version at the end so the driver can see a bad
1118 * version if the FW write fails. Start by writing a copy of the
1119 * first page with a bad version.
1120 */
1121 (void) memcpy(first_page, fw_data, SF_PAGE_SIZE);
1122 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1123 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1124 ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page);
1125 if (ret != 0)
1126 goto out;
1127
1128 addr = FLASH_FW_START;
1129 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1130 addr += SF_PAGE_SIZE;
1131 fw_data += SF_PAGE_SIZE;
1132 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1133 if (ret != 0)
1134 goto out;
1135 }
1136
1137 ret = t4_write_flash(adap,
1138 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1139 sizeof (hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1140 out:
1141 if (ret != 0)
1142 CH_ERR(adap, "firmware download failed, error %d", ret);
1143 return (ret);
1144 }
1145
1146 /*
1147 * t4_read_cimq_cfg - read CIM queue configuration
1148 * @adap: the adapter
1149 * @base: holds the queue base addresses in bytes
1150 * @size: holds the queue sizes in bytes
1151 * @thres: holds the queue full thresholds in bytes
1152 *
1153 * Returns the current configuration of the CIM queues, starting with
1154 * the IBQs, then the OBQs.
1155 */
1156 void
t4_read_cimq_cfg(struct adapter * adap,u16 * base,u16 * size,u16 * thres)1157 t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1158 {
1159 unsigned int i, v;
1160
1161 for (i = 0; i < CIM_NUM_IBQ; i++) {
1162 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1163 V_QUENUMSELECT(i));
1164 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1165 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1166 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1167 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
1168 }
1169 for (i = 0; i < CIM_NUM_OBQ; i++) {
1170 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1171 V_QUENUMSELECT(i));
1172 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1173 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1174 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1175 }
1176 }
1177
1178 /*
1179 * t4_read_cim_ibq - read the contents of a CIM inbound queue
1180 * @adap: the adapter
1181 * @qid: the queue index
1182 * @data: where to store the queue contents
1183 * @n: capacity of @data in 32-bit words
1184 *
1185 * Reads the contents of the selected CIM queue starting at address 0 up
1186 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1187 * error and the number of 32-bit words actually read on success.
1188 */
1189 int
t4_read_cim_ibq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)1190 t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1191 {
1192 int i, err;
1193 unsigned int addr;
1194 const unsigned int nwords = CIM_IBQ_SIZE * 4;
1195
1196 if (qid > 5 || (n & 3))
1197 return (-EINVAL);
1198
1199 addr = qid * nwords;
1200 if (n > nwords)
1201 n = nwords;
1202
1203 for (i = 0; i < n; i++, addr++) {
1204 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1205 F_IBQDBGEN);
1206 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1207 2, 1);
1208 if (err != 0)
1209 return (err);
1210 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1211 }
1212 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1213 return (i);
1214 }
1215
1216 /*
1217 * t4_read_cim_obq - read the contents of a CIM outbound queue
1218 * @adap: the adapter
1219 * @qid: the queue index
1220 * @data: where to store the queue contents
1221 * @n: capacity of @data in 32-bit words
1222 *
1223 * Reads the contents of the selected CIM queue starting at address 0 up
1224 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1225 * error and the number of 32-bit words actually read on success.
1226 */
1227 int
t4_read_cim_obq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)1228 t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1229 {
1230 int i, err;
1231 unsigned int addr, v, nwords;
1232
1233 if (qid > 5 || (n & 3))
1234 return (-EINVAL);
1235
1236 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1237 V_QUENUMSELECT(qid));
1238 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1239
1240 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
1241 nwords = G_CIMQSIZE(v) * 64; /* same */
1242 if (n > nwords)
1243 n = nwords;
1244
1245 for (i = 0; i < n; i++, addr++) {
1246 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1247 F_OBQDBGEN);
1248 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1249 2, 1);
1250 if (err != 0)
1251 return (err);
1252 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1253 }
1254 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1255 return (i);
1256 }
1257
1258 enum {
1259 CIM_QCTL_BASE = 0,
1260 CIM_CTL_BASE = 0x2000,
1261 CIM_PBT_ADDR_BASE = 0x2800,
1262 CIM_PBT_LRF_BASE = 0x3000,
1263 CIM_PBT_DATA_BASE = 0x3800
1264 };
1265
1266 /*
1267 * t4_cim_read - read a block from CIM internal address space
1268 * @adap: the adapter
1269 * @addr: the start address within the CIM address space
1270 * @n: number of words to read
1271 * @valp: where to store the result
1272 *
1273 * Reads a block of 4-byte words from the CIM intenal address space.
1274 */
1275 int
t4_cim_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)1276 t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1277 unsigned int *valp)
1278 {
1279 int ret = 0;
1280
1281 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1282 return (-EBUSY);
1283
1284 for (/* */; !ret && n--; addr += 4) {
1285 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1286 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1287 0, 5, 2);
1288 if (!ret)
1289 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1290 }
1291 return (ret);
1292 }
1293
1294 /*
1295 * t4_cim_write - write a block into CIM internal address space
1296 * @adap: the adapter
1297 * @addr: the start address within the CIM address space
1298 * @n: number of words to write
1299 * @valp: set of values to write
1300 *
1301 * Writes a block of 4-byte words into the CIM intenal address space.
1302 */
1303 int
t4_cim_write(struct adapter * adap,unsigned int addr,unsigned int n,const unsigned int * valp)1304 t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1305 const unsigned int *valp)
1306 {
1307 int ret = 0;
1308
1309 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1310 return (-EBUSY);
1311
1312 for (/* */; !ret && n--; addr += 4) {
1313 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1314 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1315 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1316 0, 5, 2);
1317 }
1318 return (ret);
1319 }
1320
1321 static int
t4_cim_write1(struct adapter * adap,unsigned int addr,unsigned int val)1322 t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1323 {
1324 return (t4_cim_write(adap, addr, 1, &val));
1325 }
1326
1327 /*
1328 * t4_cim_ctl_read - read a block from CIM control region
1329 * @adap: the adapter
1330 * @addr: the start address within the CIM control region
1331 * @n: number of words to read
1332 * @valp: where to store the result
1333 *
1334 * Reads a block of 4-byte words from the CIM control region.
1335 */
1336 int
t4_cim_ctl_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)1337 t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1338 unsigned int *valp)
1339 {
1340 return (t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp));
1341 }
1342
1343 /*
1344 * t4_cim_read_la - read CIM LA capture buffer
1345 * @adap: the adapter
1346 * @la_buf: where to store the LA data
1347 * @wrptr: the HW write pointer within the capture buffer
1348 *
1349 * Reads the contents of the CIM LA buffer with the most recent entry at
1350 * the end of the returned data and with the entry at @wrptr first.
1351 * We try to leave the LA in the running state we find it in.
1352 */
1353 int
t4_cim_read_la(struct adapter * adap,u32 * la_buf,unsigned int * wrptr)1354 t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1355 {
1356 int i, ret;
1357 unsigned int cfg, val, idx;
1358
1359 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1360 if (ret != 0)
1361 return (ret);
1362
1363 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
1364 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1365 if (ret != 0)
1366 return (ret);
1367 }
1368
1369 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1370 if (ret != 0)
1371 goto restart;
1372
1373 idx = G_UPDBGLAWRPTR(val);
1374 if (wrptr != 0)
1375 *wrptr = idx;
1376
1377 for (i = 0; i < adap->params.cim_la_size; i++) {
1378 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1379 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1380 if (ret != 0)
1381 break;
1382 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1383 if (ret != 0)
1384 break;
1385 if (val & F_UPDBGLARDEN) {
1386 ret = -ETIMEDOUT;
1387 break;
1388 }
1389 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1390 if (ret != 0)
1391 break;
1392 idx = (idx + 1) & M_UPDBGLARDPTR;
1393 }
1394 restart:
1395 if (cfg & F_UPDBGLAEN) {
1396 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1397 cfg & ~F_UPDBGLARDEN);
1398 if (!ret)
1399 ret = r;
1400 }
1401 return (ret);
1402 }
1403
1404 void
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)1405 t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1406 unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr)
1407 {
1408 int i, j;
1409 u32 cfg, val, req, rsp;
1410
1411 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1412 if (cfg & F_LADBGEN)
1413 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1414
1415 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1416 req = G_POLADBGWRPTR(val);
1417 rsp = G_PILADBGWRPTR(val);
1418 if (pif_req_wrptr != NULL)
1419 *pif_req_wrptr = req;
1420 if (pif_rsp_wrptr != NULL)
1421 *pif_rsp_wrptr = rsp;
1422
1423 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1424 for (j = 0; j < 6; j++) {
1425 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1426 V_PILADBGRDPTR(rsp));
1427 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1428 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1429 req++;
1430 rsp++;
1431 }
1432 req = (req + 2) & M_POLADBGRDPTR;
1433 rsp = (rsp + 2) & M_PILADBGRDPTR;
1434 }
1435 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1436 }
1437
1438 void
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)1439 t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1440 {
1441 u32 cfg;
1442 int i, j, idx;
1443
1444 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1445 if (cfg & F_LADBGEN)
1446 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1447
1448 for (i = 0; i < CIM_MALA_SIZE; i++) {
1449 for (j = 0; j < 5; j++) {
1450 idx = 8 * i + j;
1451 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1452 V_PILADBGRDPTR(idx));
1453 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1454 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1455 }
1456 }
1457 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1458 }
1459
1460 /*
1461 * t4_tp_read_la - read TP LA capture buffer
1462 * @adap: the adapter
1463 * @la_buf: where to store the LA data
1464 * @wrptr: the HW write pointer within the capture buffer
1465 *
1466 * Reads the contents of the TP LA buffer with the most recent entry at
1467 * the end of the returned data and with the entry at @wrptr first.
1468 * We leave the LA in the running state we find it in.
1469 */
1470 void
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)1471 t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1472 {
1473 bool last_incomplete;
1474 unsigned int i, cfg, val, idx;
1475
1476 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1477 if (cfg & F_DBGLAENABLE) /* freeze LA */
1478 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1479 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1480
1481 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1482 idx = G_DBGLAWPTR(val);
1483 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1484 if (last_incomplete != 0)
1485 idx = (idx + 1) & M_DBGLARPTR;
1486 if (wrptr != NULL)
1487 *wrptr = idx;
1488
1489 val &= 0xffff;
1490 val &= ~V_DBGLARPTR(M_DBGLARPTR);
1491 val |= adap->params.tp.la_mask;
1492
1493 for (i = 0; i < TPLA_SIZE; i++) {
1494 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1495 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1496 idx = (idx + 1) & M_DBGLARPTR;
1497 }
1498
1499 /* Wipe out last entry if it isn't valid */
1500 if (last_incomplete != 0)
1501 la_buf[TPLA_SIZE - 1] = ~0ULL;
1502
1503 if (cfg & F_DBGLAENABLE) /* restore running state */
1504 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1505 cfg | adap->params.tp.la_mask);
1506 }
1507
1508 void
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)1509 t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1510 {
1511 unsigned int i, j;
1512
1513 for (i = 0; i < 8; i++) {
1514 u32 *p = la_buf + i;
1515
1516 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1517 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1518 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1519 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1520 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1521 }
1522 }
1523
1524 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1525 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1526
1527 /*
1528 * t4_link_start - apply link configuration to MAC/PHY
1529 * @phy: the PHY to setup
1530 * @mac: the MAC to setup
1531 * @lc: the requested link configuration
1532 *
1533 * Set up a port's MAC and PHY according to a desired link configuration.
1534 * - If the PHY can auto-negotiate first decide what to advertise, then
1535 * enable/disable auto-negotiation as desired, and reset.
1536 * - If the PHY does not auto-negotiate just reset it.
1537 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1538 * otherwise do it later based on the outcome of auto-negotiation.
1539 */
1540 int
t4_link_start(struct adapter * adap,unsigned int mbox,unsigned int port,struct link_config * lc)1541 t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1542 struct link_config *lc)
1543 {
1544 struct fw_port_cmd c;
1545 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1546
1547 lc->link_ok = 0;
1548 if (lc->requested_fc & PAUSE_RX)
1549 fc |= FW_PORT_CAP_FC_RX;
1550 if (lc->requested_fc & PAUSE_TX)
1551 fc |= FW_PORT_CAP_FC_TX;
1552
1553 (void) memset(&c, 0, sizeof (c));
1554 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1555 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1556 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1557 FW_LEN16(c));
1558
1559 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1560 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1561 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1562 } else if (lc->autoneg == AUTONEG_DISABLE) {
1563 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1564 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1565 } else
1566 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1567
1568 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
1569 }
1570
1571 /*
1572 * t4_restart_aneg - restart autonegotiation
1573 * @adap: the adapter
1574 * @mbox: mbox to use for the FW command
1575 * @port: the port id
1576 *
1577 * Restarts autonegotiation for the selected port.
1578 */
1579 int
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)1580 t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1581 {
1582 struct fw_port_cmd c;
1583
1584 (void) memset(&c, 0, sizeof (c));
1585 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1586 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1587 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1588 FW_LEN16(c));
1589 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1590 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
1591 }
1592
1593 struct intr_info {
1594 unsigned int mask; /* bits to check in interrupt status */
1595 const char *msg; /* message to print or NULL */
1596 short stat_idx; /* stat counter to increment or -1 */
1597 unsigned short fatal; /* whether the condition reported is fatal */
1598 };
1599
1600 /*
1601 * t4_handle_intr_status - table driven interrupt handler
1602 * @adapter: the adapter that generated the interrupt
1603 * @reg: the interrupt status register to process
1604 * @acts: table of interrupt actions
1605 *
1606 * A table driven interrupt handler that applies a set of masks to an
1607 * interrupt status word and performs the corresponding actions if the
1608 * interrupts described by the mask have occured. The actions include
1609 * optionally emitting a warning or alert message. The table is terminated
1610 * by an entry specifying mask 0. Returns the number of fatal interrupt
1611 * conditions.
1612 */
1613 static int
t4_handle_intr_status(struct adapter * adapter,unsigned int reg,const struct intr_info * acts)1614 t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1615 const struct intr_info *acts)
1616 {
1617 int fatal = 0;
1618 unsigned int mask = 0;
1619 unsigned int status = t4_read_reg(adapter, reg);
1620
1621 for (/* */; acts->mask; ++acts) {
1622 if (!(status & acts->mask))
1623 continue;
1624 if (acts->fatal != 0) {
1625 fatal++;
1626 CH_ALERT(adapter, "%s (0x%x)",
1627 acts->msg, status & acts->mask);
1628 } else if (acts->msg != NULL)
1629 CH_WARN_RATELIMIT(adapter, "%s (0x%x)",
1630 acts->msg, status & acts->mask);
1631 mask |= acts->mask;
1632 }
1633 status &= mask;
1634 if (status != 0) /* clear processed interrupts */
1635 t4_write_reg(adapter, reg, status);
1636 return (fatal);
1637 }
1638
1639 /*
1640 * Interrupt handler for the PCIE module.
1641 */
1642 static void
pcie_intr_handler(struct adapter * adapter)1643 pcie_intr_handler(struct adapter *adapter)
1644 {
1645 static struct intr_info sysbus_intr_info[] = {
1646 { F_RNPP, "RXNP array parity error", -1, 1 },
1647 { F_RPCP, "RXPC array parity error", -1, 1 },
1648 { F_RCIP, "RXCIF array parity error", -1, 1 },
1649 { F_RCCP, "Rx completions control array parity error", -1, 1 },
1650 { F_RFTP, "RXFT array parity error", -1, 1 },
1651 { 0 }
1652 };
1653 static struct intr_info pcie_port_intr_info[] = {
1654 { F_TPCP, "TXPC array parity error", -1, 1 },
1655 { F_TNPP, "TXNP array parity error", -1, 1 },
1656 { F_TFTP, "TXFT array parity error", -1, 1 },
1657 { F_TCAP, "TXCA array parity error", -1, 1 },
1658 { F_TCIP, "TXCIF array parity error", -1, 1 },
1659 { F_RCAP, "RXCA array parity error", -1, 1 },
1660 { F_OTDD, "outbound request TLP discarded", -1, 1 },
1661 { F_RDPE, "Rx data parity error", -1, 1 },
1662 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
1663 { 0 }
1664 };
1665 static struct intr_info pcie_intr_info[] = {
1666 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1667 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1668 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1669 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1670 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1671 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1672 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1673 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1674 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1675 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1676 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1677 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1678 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1679 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1680 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1681 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1682 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1683 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1684 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1685 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1686 { F_FIDPERR, "PCI FID parity error", -1, 1 },
1687 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1688 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1689 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1690 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1691 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1692 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1693 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
1694 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
1695 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1696 0 },
1697 { 0 }
1698 };
1699
1700 int fat;
1701
1702 fat = t4_handle_intr_status(adapter,
1703 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, sysbus_intr_info) +
1704 t4_handle_intr_status(adapter,
1705 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) +
1706 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1707 if (fat != 0)
1708 t4_fatal_err(adapter);
1709 }
1710
1711 /*
1712 * TP interrupt handler.
1713 */
1714 static void
tp_intr_handler(struct adapter * adapter)1715 tp_intr_handler(struct adapter *adapter)
1716 {
1717 static struct intr_info tp_intr_info[] = {
1718 { 0x3fffffff, "TP parity error", -1, 1 },
1719 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1720 { 0 }
1721 };
1722
1723 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info) != 0)
1724 t4_fatal_err(adapter);
1725 }
1726
1727 /*
1728 * SGE interrupt handler.
1729 */
1730 static void
sge_intr_handler(struct adapter * adapter)1731 sge_intr_handler(struct adapter *adapter)
1732 {
1733 u64 v;
1734 u32 err;
1735
1736 static struct intr_info sge_intr_info[] = {
1737 { F_ERR_CPL_EXCEED_IQE_SIZE,
1738 "SGE received CPL exceeding IQE size", -1, 1 },
1739 { F_ERR_INVALID_CIDX_INC,
1740 "SGE GTS CIDX increment too large", -1, 0 },
1741 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1742 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1743 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1744 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1745 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1746 0 },
1747 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1748 0 },
1749 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1750 0 },
1751 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1752 0 },
1753 { F_ERR_ING_CTXT_PRIO,
1754 "SGE too many priority ingress contexts", -1, 0 },
1755 { F_ERR_EGR_CTXT_PRIO,
1756 "SGE too many priority egress contexts", -1, 0 },
1757 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1758 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1759 { 0 }
1760 };
1761
1762 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1763 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1764 if (v != 0) {
1765 CH_ALERT(adapter, "SGE parity error (%llx)",
1766 (unsigned long long)v);
1767 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1768 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1769 }
1770
1771 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1772
1773 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1774 if (err & F_ERROR_QID_VALID) {
1775 CH_ERR(adapter, "SGE error for queue %u", G_ERROR_QID(err));
1776 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID);
1777 }
1778
1779 if (v != 0)
1780 t4_fatal_err(adapter);
1781 }
1782
1783 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1784 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1785 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1786 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1787
1788 /*
1789 * CIM interrupt handler.
1790 */
1791 static void
cim_intr_handler(struct adapter * adapter)1792 cim_intr_handler(struct adapter *adapter)
1793 {
1794 static struct intr_info cim_intr_info[] = {
1795 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1796 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1797 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1798 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1799 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1800 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1801 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1802 { 0 }
1803 };
1804 static struct intr_info cim_upintr_info[] = {
1805 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1806 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1807 { F_ILLWRINT, "CIM illegal write", -1, 1 },
1808 { F_ILLRDINT, "CIM illegal read", -1, 1 },
1809 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1810 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1811 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1812 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1813 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1814 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1815 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1816 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1817 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1818 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1819 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1820 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1821 { F_SGLRDCTLINT, "CIM single read from CTL space", -1, 1 },
1822 { F_SGLWRCTLINT, "CIM single write to CTL space", -1, 1 },
1823 { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1824 { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1825 { F_SGLRDPLINT, "CIM single read from PL space", -1, 1 },
1826 { F_SGLWRPLINT, "CIM single write to PL space", -1, 1 },
1827 { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1828 { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1829 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite", -1, 1 },
1830 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite", -1, 1 },
1831 { F_TIMEOUTINT, "CIM PIF timeout", -1, 1 },
1832 { F_TIMEOUTMAINT, "CIM PIF MA timeout", -1, 1 },
1833 { 0 }
1834 };
1835
1836 int fat;
1837
1838 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
1839 cim_intr_info) +
1840 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
1841 cim_upintr_info);
1842 if (fat != 0)
1843 t4_fatal_err(adapter);
1844 }
1845
1846 /*
1847 * ULP RX interrupt handler.
1848 */
1849 static void
ulprx_intr_handler(struct adapter * adapter)1850 ulprx_intr_handler(struct adapter *adapter)
1851 {
1852 static struct intr_info ulprx_intr_info[] = {
1853 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
1854 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
1855 { 0x7fffff, "ULPRX parity error", -1, 1 },
1856 { 0 }
1857 };
1858
1859 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)
1860 != 0)
1861 t4_fatal_err(adapter);
1862 }
1863
1864 /*
1865 * ULP TX interrupt handler.
1866 */
1867 static void
ulptx_intr_handler(struct adapter * adapter)1868 ulptx_intr_handler(struct adapter *adapter)
1869 {
1870 static struct intr_info ulptx_intr_info[] = {
1871 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1872 0 },
1873 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1874 0 },
1875 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1876 0 },
1877 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1878 0 },
1879 { 0xfffffff, "ULPTX parity error", -1, 1 },
1880 { 0 }
1881 };
1882
1883 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)
1884 != 0)
1885 t4_fatal_err(adapter);
1886 }
1887
1888 /*
1889 * PM TX interrupt handler.
1890 */
1891 static void
pmtx_intr_handler(struct adapter * adapter)1892 pmtx_intr_handler(struct adapter *adapter)
1893 {
1894 static struct intr_info pmtx_intr_info[] = {
1895 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1896 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1897 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1898 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1899 { 0xffffff0, "PMTX framing error", -1, 1 },
1900 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1901 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
1902 1 },
1903 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1904 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1905 { 0 }
1906 };
1907
1908 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
1909 t4_fatal_err(adapter);
1910 }
1911
1912 /*
1913 * PM RX interrupt handler.
1914 */
1915 static void
pmrx_intr_handler(struct adapter * adapter)1916 pmrx_intr_handler(struct adapter *adapter)
1917 {
1918 static struct intr_info pmrx_intr_info[] = {
1919 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1920 { 0x3ffff0, "PMRX framing error", -1, 1 },
1921 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1922 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
1923 1 },
1924 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1925 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1926 { 0 }
1927 };
1928
1929 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
1930 t4_fatal_err(adapter);
1931 }
1932
1933 /*
1934 * CPL switch interrupt handler.
1935 */
1936 static void
cplsw_intr_handler(struct adapter * adapter)1937 cplsw_intr_handler(struct adapter *adapter)
1938 {
1939 static struct intr_info cplsw_intr_info[] = {
1940 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1941 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1942 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1943 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1944 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1945 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1946 { 0 }
1947 };
1948
1949 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
1950 t4_fatal_err(adapter);
1951 }
1952
1953 /*
1954 * LE interrupt handler.
1955 */
1956 static void
le_intr_handler(struct adapter * adap)1957 le_intr_handler(struct adapter *adap)
1958 {
1959 static struct intr_info le_intr_info[] = {
1960 { F_LIPMISS, "LE LIP miss", -1, 0 },
1961 { F_LIP0, "LE 0 LIP error", -1, 0 },
1962 { F_PARITYERR, "LE parity error", -1, 1 },
1963 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
1964 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
1965 { 0 }
1966 };
1967
1968 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
1969 t4_fatal_err(adap);
1970 }
1971
1972 /*
1973 * MPS interrupt handler.
1974 */
1975 static void
mps_intr_handler(struct adapter * adapter)1976 mps_intr_handler(struct adapter *adapter)
1977 {
1978 static struct intr_info mps_rx_intr_info[] = {
1979 { 0xffffff, "MPS Rx parity error", -1, 1 },
1980 { 0 }
1981 };
1982 static struct intr_info mps_tx_intr_info[] = {
1983 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
1984 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1985 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
1986 -1, 1 },
1987 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
1988 -1, 1 },
1989 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
1990 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1991 { F_FRMERR, "MPS Tx framing error", -1, 1 },
1992 { 0 }
1993 };
1994 static struct intr_info mps_trc_intr_info[] = {
1995 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
1996 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
1997 1 },
1998 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
1999 { 0 }
2000 };
2001 static struct intr_info mps_stat_sram_intr_info[] = {
2002 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2003 { 0 }
2004 };
2005 static struct intr_info mps_stat_tx_intr_info[] = {
2006 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2007 { 0 }
2008 };
2009 static struct intr_info mps_stat_rx_intr_info[] = {
2010 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2011 { 0 }
2012 };
2013 static struct intr_info mps_cls_intr_info[] = {
2014 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2015 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2016 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2017 { 0 }
2018 };
2019
2020 int fat;
2021
2022 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2023 mps_rx_intr_info) +
2024 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2025 mps_tx_intr_info) +
2026 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2027 mps_trc_intr_info) +
2028 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2029 mps_stat_sram_intr_info) +
2030 t4_handle_intr_status(adapter,
2031 A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2032 mps_stat_tx_intr_info) +
2033 t4_handle_intr_status(adapter,
2034 A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2035 mps_stat_rx_intr_info) +
2036 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2037 mps_cls_intr_info);
2038
2039 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2040 (void) t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
2041 if (fat != 0)
2042 t4_fatal_err(adapter);
2043 }
2044
2045 #define MEM_INT_MASK \
2046 (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2047
2048 /*
2049 * EDC/MC interrupt handler.
2050 */
2051 static void
mem_intr_handler(struct adapter * adapter,int idx)2052 mem_intr_handler(struct adapter *adapter, int idx)
2053 {
2054 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2055
2056 unsigned int addr, cnt_addr, v;
2057
2058 if (idx <= MEM_EDC1) {
2059 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2060 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2061 } else {
2062 addr = A_MC_INT_CAUSE;
2063 cnt_addr = A_MC_ECC_STATUS;
2064 }
2065
2066 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2067 if (v & F_PERR_INT_CAUSE)
2068 CH_ALERT(adapter, "%s FIFO parity error", name[idx]);
2069 if (v & F_ECC_CE_INT_CAUSE) {
2070 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2071
2072 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2073 CH_WARN_RATELIMIT(adapter,
2074 "%u %s correctable ECC data error%s", cnt, name[idx],
2075 cnt > 1 ? "s" : "");
2076 }
2077 if (v & F_ECC_UE_INT_CAUSE)
2078 CH_ALERT(adapter, "%s uncorrectable ECC data error",
2079 name[idx]);
2080
2081 t4_write_reg(adapter, addr, v);
2082 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2083 t4_fatal_err(adapter);
2084 }
2085
2086 /*
2087 * MA interrupt handler.
2088 */
2089 static void
ma_intr_handler(struct adapter * adapter)2090 ma_intr_handler(struct adapter *adapter)
2091 {
2092 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2093
2094 if (status & F_MEM_PERR_INT_CAUSE)
2095 CH_ALERT(adapter, "MA parity error, parity status %x",
2096 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2097 if (status & F_MEM_WRAP_INT_CAUSE) {
2098 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2099 CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2100 " address %x", G_MEM_WRAP_CLIENT_NUM(v),
2101 G_MEM_WRAP_ADDRESS(v) << 4);
2102 }
2103 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2104 t4_fatal_err(adapter);
2105 }
2106
2107 /*
2108 * SMB interrupt handler.
2109 */
2110 static void
smb_intr_handler(struct adapter * adap)2111 smb_intr_handler(struct adapter *adap)
2112 {
2113 static struct intr_info smb_intr_info[] = {
2114 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2115 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2116 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2117 { 0 }
2118 };
2119
2120 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info) != 0)
2121 t4_fatal_err(adap);
2122 }
2123
2124 /*
2125 * NC-SI interrupt handler.
2126 */
2127 static void
ncsi_intr_handler(struct adapter * adap)2128 ncsi_intr_handler(struct adapter *adap)
2129 {
2130 static struct intr_info ncsi_intr_info[] = {
2131 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2132 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2133 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2134 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2135 { 0 }
2136 };
2137
2138 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info) != 0)
2139 t4_fatal_err(adap);
2140 }
2141
2142 /*
2143 * XGMAC interrupt handler.
2144 */
2145 static void
xgmac_intr_handler(struct adapter * adap,int port)2146 xgmac_intr_handler(struct adapter *adap, int port)
2147 {
2148 u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2149
2150 v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2151 if (!v)
2152 return;
2153
2154 if (v & F_TXFIFO_PRTY_ERR)
2155 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error", port);
2156 if (v & F_RXFIFO_PRTY_ERR)
2157 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error", port);
2158 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2159 t4_fatal_err(adap);
2160 }
2161
2162 /*
2163 * PL interrupt handler.
2164 */
2165 static void
pl_intr_handler(struct adapter * adap)2166 pl_intr_handler(struct adapter *adap)
2167 {
2168 static struct intr_info pl_intr_info[] = {
2169 { F_FATALPERR, "T4 fatal parity error", -1, 1 },
2170 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2171 { 0 }
2172 };
2173
2174 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info) != 0)
2175 t4_fatal_err(adap);
2176 }
2177
2178 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2179 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2180 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2181 F_CPL_SWITCH | F_SGE | F_ULP_TX)
2182
2183 /*
2184 * t4_slow_intr_handler - control path interrupt handler
2185 * @adapter: the adapter
2186 *
2187 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2188 * The designation 'slow' is because it involves register reads, while
2189 * data interrupts typically don't involve any MMIOs.
2190 */
2191 int
t4_slow_intr_handler(struct adapter * adapter)2192 t4_slow_intr_handler(struct adapter *adapter)
2193 {
2194 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2195
2196 if (!(cause & GLBL_INTR_MASK))
2197 return (0);
2198 if (cause & F_CIM)
2199 cim_intr_handler(adapter);
2200 if (cause & F_MPS)
2201 mps_intr_handler(adapter);
2202 if (cause & F_NCSI)
2203 ncsi_intr_handler(adapter);
2204 if (cause & F_PL)
2205 pl_intr_handler(adapter);
2206 if (cause & F_SMB)
2207 smb_intr_handler(adapter);
2208 if (cause & F_XGMAC0)
2209 xgmac_intr_handler(adapter, 0);
2210 if (cause & F_XGMAC1)
2211 xgmac_intr_handler(adapter, 1);
2212 if (cause & F_XGMAC_KR0)
2213 xgmac_intr_handler(adapter, 2);
2214 if (cause & F_XGMAC_KR1)
2215 xgmac_intr_handler(adapter, 3);
2216 if (cause & F_PCIE)
2217 pcie_intr_handler(adapter);
2218 if (cause & F_MC)
2219 mem_intr_handler(adapter, MEM_MC);
2220 if (cause & F_EDC0)
2221 mem_intr_handler(adapter, MEM_EDC0);
2222 if (cause & F_EDC1)
2223 mem_intr_handler(adapter, MEM_EDC1);
2224 if (cause & F_LE)
2225 le_intr_handler(adapter);
2226 if (cause & F_TP)
2227 tp_intr_handler(adapter);
2228 if (cause & F_MA)
2229 ma_intr_handler(adapter);
2230 if (cause & F_PM_TX)
2231 pmtx_intr_handler(adapter);
2232 if (cause & F_PM_RX)
2233 pmrx_intr_handler(adapter);
2234 if (cause & F_ULP_RX)
2235 ulprx_intr_handler(adapter);
2236 if (cause & F_CPL_SWITCH)
2237 cplsw_intr_handler(adapter);
2238 if (cause & F_SGE)
2239 sge_intr_handler(adapter);
2240 if (cause & F_ULP_TX)
2241 ulptx_intr_handler(adapter);
2242
2243 /* Clear the interrupts just processed for which we are the master. */
2244 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2245 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2246 return (1);
2247 }
2248
2249 /*
2250 * t4_intr_enable - enable interrupts
2251 * @adapter: the adapter whose interrupts should be enabled
2252 *
2253 * Enable PF-specific interrupts for the calling function and the top-level
2254 * interrupt concentrator for global interrupts. Interrupts are already
2255 * enabled at each module, here we just enable the roots of the interrupt
2256 * hierarchies.
2257 *
2258 * Note: this function should be called only when the driver manages
2259 * non PF-specific interrupts from the various HW modules. Only one PCI
2260 * function at a time should be doing this.
2261 */
2262 void
t4_intr_enable(struct adapter * adapter)2263 t4_intr_enable(struct adapter *adapter)
2264 {
2265 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2266
2267 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2268 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | F_ERR_DROPPED_DB |
2269 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0 |
2270 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2271 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_ERR_EGR_CTXT_PRIO |
2272 F_INGRESS_SIZE_ERR | F_EGRESS_SIZE_ERR);
2273 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2274 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2275 }
2276
2277 /*
2278 * t4_intr_disable - disable interrupts
2279 * @adapter: the adapter whose interrupts should be disabled
2280 *
2281 * Disable interrupts. We only disable the top-level interrupt
2282 * concentrators. The caller must be a PCI function managing global
2283 * interrupts.
2284 */
2285 void
t4_intr_disable(struct adapter * adapter)2286 t4_intr_disable(struct adapter *adapter)
2287 {
2288 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2289
2290 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2291 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2292 }
2293
2294 /*
2295 * t4_intr_clear - clear all interrupts
2296 * @adapter: the adapter whose interrupts should be cleared
2297 *
2298 * Clears all interrupts. The caller must be a PCI function managing
2299 * global interrupts.
2300 */
2301 void
t4_intr_clear(struct adapter * adapter)2302 t4_intr_clear(struct adapter *adapter)
2303 {
2304 static const unsigned int cause_reg[] = {
2305 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2306 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2307 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2308 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2309 A_MC_INT_CAUSE,
2310 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2311 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2312 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2313 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2314 A_TP_INT_CAUSE,
2315 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2316 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2317 A_MPS_RX_PERR_INT_CAUSE,
2318 A_CPL_INTR_CAUSE,
2319 MYPF_REG(A_PL_PF_INT_CAUSE),
2320 A_PL_PL_INT_CAUSE,
2321 A_LE_DB_INT_CAUSE,
2322 };
2323
2324 unsigned int i;
2325
2326 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2327 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2328
2329 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2330 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2331 }
2332
2333 /*
2334 * hash_mac_addr - return the hash value of a MAC address
2335 * @addr: the 48-bit Ethernet MAC address
2336 *
2337 * Hashes a MAC address according to the hash function used by HW inexact
2338 * (hash) address matching.
2339 */
2340 static int
hash_mac_addr(const u8 * addr)2341 hash_mac_addr(const u8 *addr)
2342 {
2343 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2344 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2345 a ^= b;
2346 a ^= (a >> 12);
2347 a ^= (a >> 6);
2348 return (a & 0x3f);
2349 }
2350
2351 /*
2352 * t4_config_rss_range - configure a portion of the RSS mapping table
2353 * @adapter: the adapter
2354 * @mbox: mbox to use for the FW command
2355 * @viid: virtual interface whose RSS subtable is to be written
2356 * @start: start entry in the table to write
2357 * @n: how many table entries to write
2358 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2359 * @nrspq: number of values in @rspq
2360 *
2361 * Programs the selected part of the VI's RSS mapping table with the
2362 * provided values. If @nrspq < @n the supplied values are used repeatedly
2363 * until the full table range is populated.
2364 *
2365 * The caller must ensure the values in @rspq are in the range allowed for
2366 * @viid.
2367 */
2368 int
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)2369 t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2370 int start, int n, const u16 *rspq, unsigned int nrspq)
2371 {
2372 int ret;
2373 const u16 *rsp = rspq;
2374 const u16 *rsp_end = rspq + nrspq;
2375 struct fw_rss_ind_tbl_cmd cmd;
2376
2377 (void) memset(&cmd, 0, sizeof (cmd));
2378 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2379 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2380 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2381 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2382
2383 /*
2384 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2385 * Queue Identifiers. These Ingress Queue IDs are packed three to
2386 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2387 * reserved.
2388 */
2389 while (n > 0) {
2390 int nq = min(n, 32);
2391 __be32 *qp = &cmd.iq0_to_iq2;
2392
2393 /*
2394 * Set up the firmware RSS command header to send the next
2395 * "nq" Ingress Queue IDs to the firmware.
2396 */
2397 cmd.niqid = htons(nq);
2398 cmd.startidx = htons(start);
2399
2400 /*
2401 * "nq" more done for the start of the next loop.
2402 */
2403 start += nq;
2404 n -= nq;
2405
2406 /*
2407 * While there are still Ingress Queue IDs to stuff into the
2408 * current firmware RSS command, retrieve them from the
2409 * Ingress Queue ID array and insert them into the command.
2410 */
2411 while (nq > 0) {
2412 unsigned int v;
2413 /*
2414 * Grab up to the next 3 Ingress Queue IDs (wrapping
2415 * around the Ingress Queue ID array if necessary) and
2416 * insert them into the firmware RSS command at the
2417 * current 3-tuple position within the commad.
2418 */
2419 v = V_FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2420 if (++rsp >= rsp_end)
2421 rsp = rspq;
2422 v |= V_FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2423 if (++rsp >= rsp_end)
2424 rsp = rspq;
2425 v |= V_FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2426 if (++rsp >= rsp_end)
2427 rsp = rspq;
2428
2429 *qp++ = htonl(v);
2430 nq -= 3;
2431 }
2432
2433 /*
2434 * Send this portion of the RRS table update to the firmware;
2435 * bail out on any errors.
2436 */
2437 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof (cmd), NULL);
2438 if (ret != 0)
2439 return (ret);
2440 }
2441
2442 return (0);
2443 }
2444
2445 /*
2446 * t4_config_glbl_rss - configure the global RSS mode
2447 * @adapter: the adapter
2448 * @mbox: mbox to use for the FW command
2449 * @mode: global RSS mode
2450 * @flags: mode-specific flags
2451 *
2452 * Sets the global RSS mode.
2453 */
2454 int
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)2455 t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2456 unsigned int flags)
2457 {
2458 struct fw_rss_glb_config_cmd c;
2459
2460 (void) memset(&c, 0, sizeof (c));
2461 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2462 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2463 c.retval_len16 = htonl(FW_LEN16(c));
2464 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2465 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2466 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2467 c.u.basicvirtual.mode_pkd =
2468 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2469 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2470 } else
2471 return (-EINVAL);
2472 return (t4_wr_mbox(adapter, mbox, &c, sizeof (c), NULL));
2473 }
2474
2475 /*
2476 * t4_config_vi_rss - configure per VI RSS settings
2477 * @adapter: the adapter
2478 * @mbox: mbox to use for the FW command
2479 * @viid: the VI id
2480 * @flags: RSS flags
2481 * @defq: id of the default RSS queue for the VI.
2482 *
2483 * Configures VI-specific RSS properties.
2484 */
2485 int
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq)2486 t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2487 unsigned int flags, unsigned int defq)
2488 {
2489 struct fw_rss_vi_config_cmd c;
2490
2491 (void) memset(&c, 0, sizeof (c));
2492 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2493 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2494 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2495 c.retval_len16 = htonl(FW_LEN16(c));
2496 c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2497 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2498 return (t4_wr_mbox(adapter, mbox, &c, sizeof (c), NULL));
2499 }
2500
2501 /* Read an RSS table row */
2502 static int
rd_rss_row(struct adapter * adap,int row,u32 * val)2503 rd_rss_row(struct adapter *adap, int row, u32 *val)
2504 {
2505 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2506 return (t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2507 5, 0, val));
2508 }
2509
2510 /*
2511 * t4_read_rss - read the contents of the RSS mapping table
2512 * @adapter: the adapter
2513 * @map: holds the contents of the RSS mapping table
2514 *
2515 * Reads the contents of the RSS hash->queue mapping table.
2516 */
2517 int
t4_read_rss(struct adapter * adapter,u16 * map)2518 t4_read_rss(struct adapter *adapter, u16 *map)
2519 {
2520 u32 val;
2521 int i, ret;
2522
2523 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2524 ret = rd_rss_row(adapter, i, &val);
2525 if (ret != 0)
2526 return (ret);
2527 *map++ = G_LKPTBLQUEUE0(val);
2528 *map++ = G_LKPTBLQUEUE1(val);
2529 }
2530 return (0);
2531 }
2532
2533 /*
2534 * t4_read_rss_key - read the global RSS key
2535 * @adap: the adapter
2536 * @key: 10-entry array holding the 320-bit RSS key
2537 *
2538 * Reads the global 320-bit RSS key.
2539 */
2540 void
t4_read_rss_key(struct adapter * adap,u32 * key)2541 t4_read_rss_key(struct adapter *adap, u32 *key)
2542 {
2543 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2544 A_TP_RSS_SECRET_KEY0);
2545 }
2546
2547 /*
2548 * t4_write_rss_key - program one of the RSS keys
2549 * @adap: the adapter
2550 * @key: 10-entry array holding the 320-bit RSS key
2551 * @idx: which RSS key to write
2552 *
2553 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2554 * 0..15 the corresponding entry in the RSS key table is written,
2555 * otherwise the global RSS key is written.
2556 */
2557 void
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx)2558 t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2559 {
2560 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2561 A_TP_RSS_SECRET_KEY0);
2562 if (idx >= 0 && idx < 16)
2563 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2564 V_KEYWRADDR(idx) | F_KEYWREN);
2565 }
2566
2567 /*
2568 * t4_read_rss_pf_config - read PF RSS Configuration Table
2569 * @adapter: the adapter
2570 * @index: the entry in the PF RSS table to read
2571 * @valp: where to store the returned value
2572 *
2573 * Reads the PF RSS Configuration Table at the specified index and returns
2574 * the value found there.
2575 */
2576 void
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp)2577 t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2578 {
2579 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2580 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2581 }
2582
2583 /*
2584 * t4_write_rss_pf_config - write PF RSS Configuration Table
2585 * @adapter: the adapter
2586 * @index: the entry in the VF RSS table to read
2587 * @val: the value to store
2588 *
2589 * Writes the PF RSS Configuration Table at the specified index with the
2590 * specified value.
2591 */
2592 void
t4_write_rss_pf_config(struct adapter * adapter,unsigned int index,u32 val)2593 t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2594 {
2595 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2596 &val, 1, A_TP_RSS_PF0_CONFIG + index);
2597 }
2598
2599 /*
2600 * t4_read_rss_vf_config - read VF RSS Configuration Table
2601 * @adapter: the adapter
2602 * @index: the entry in the VF RSS table to read
2603 * @vfl: where to store the returned VFL
2604 * @vfh: where to store the returned VFH
2605 *
2606 * Reads the VF RSS Configuration Table at the specified index and returns
2607 * the (VFL, VFH) values found there.
2608 */
2609 void
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh)2610 t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl,
2611 u32 *vfh)
2612 {
2613 u32 vrt;
2614
2615 /*
2616 * Request that the index'th VF Table values be read into VFL/VFH.
2617 */
2618 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2619 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2620 vrt |= V_VFWRADDR(index) | F_VFRDEN;
2621 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2622
2623 /*
2624 * Grab the VFL/VFH values ...
2625 */
2626 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2627 vfl, 1, A_TP_RSS_VFL_CONFIG);
2628 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2629 vfh, 1, A_TP_RSS_VFH_CONFIG);
2630 }
2631
2632 /*
2633 * t4_write_rss_vf_config - write VF RSS Configuration Table
2634 *
2635 * @adapter: the adapter
2636 * @index: the entry in the VF RSS table to write
2637 * @vfl: the VFL to store
2638 * @vfh: the VFH to store
2639 *
2640 * Writes the VF RSS Configuration Table at the specified index with the
2641 * specified (VFL, VFH) values.
2642 */
2643 void
t4_write_rss_vf_config(struct adapter * adapter,unsigned int index,u32 vfl,u32 vfh)2644 t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl,
2645 u32 vfh)
2646 {
2647 u32 vrt;
2648
2649 /*
2650 * Load up VFL/VFH with the values to be written ...
2651 */
2652 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2653 &vfl, 1, A_TP_RSS_VFL_CONFIG);
2654 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2655 &vfh, 1, A_TP_RSS_VFH_CONFIG);
2656
2657 /*
2658 * Write the VFL/VFH into the VF Table at index'th location.
2659 */
2660 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2661 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2662 vrt |= V_VFWRADDR(index) | F_VFWREN;
2663 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2664 }
2665
2666 /*
2667 * t4_read_rss_pf_map - read PF RSS Map
2668 * @adapter: the adapter
2669 *
2670 * Reads the PF RSS Map register and returns its value.
2671 */
2672 u32
t4_read_rss_pf_map(struct adapter * adapter)2673 t4_read_rss_pf_map(struct adapter *adapter)
2674 {
2675 u32 pfmap;
2676
2677 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2678 &pfmap, 1, A_TP_RSS_PF_MAP);
2679 return (pfmap);
2680 }
2681
2682 /*
2683 * t4_write_rss_pf_map - write PF RSS Map
2684 * @adapter: the adapter
2685 * @pfmap: PF RSS Map value
2686 *
2687 * Writes the specified value to the PF RSS Map register.
2688 */
2689 void
t4_write_rss_pf_map(struct adapter * adapter,u32 pfmap)2690 t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2691 {
2692 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2693 &pfmap, 1, A_TP_RSS_PF_MAP);
2694 }
2695
2696 /*
2697 * t4_read_rss_pf_mask - read PF RSS Mask
2698 * @adapter: the adapter
2699 *
2700 * Reads the PF RSS Mask register and returns its value.
2701 */
2702 u32
t4_read_rss_pf_mask(struct adapter * adapter)2703 t4_read_rss_pf_mask(struct adapter *adapter)
2704 {
2705 u32 pfmask;
2706
2707 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2708 &pfmask, 1, A_TP_RSS_PF_MSK);
2709 return (pfmask);
2710 }
2711
2712 /*
2713 * t4_write_rss_pf_mask - write PF RSS Mask
2714 * @adapter: the adapter
2715 * @pfmask: PF RSS Mask value
2716 *
2717 * Writes the specified value to the PF RSS Mask register.
2718 */
2719 void
t4_write_rss_pf_mask(struct adapter * adapter,u32 pfmask)2720 t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2721 {
2722 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2723 &pfmask, 1, A_TP_RSS_PF_MSK);
2724 }
2725
2726 /*
2727 * t4_set_filter_mode - configure the optional components of filter tuples
2728 * @adap: the adapter
2729 * @mode_map: a bitmap selcting which optional filter components to enable
2730 *
2731 * Sets the filter mode by selecting the optional components to enable
2732 * in filter tuples. Returns 0 on success and a negative error if the
2733 * requested mode needs more bits than are available for optional
2734 * components.
2735 */
2736 int
t4_set_filter_mode(struct adapter * adap,unsigned int mode_map)2737 t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2738 {
2739 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2740
2741 int i, nbits = 0;
2742
2743 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2744 if (mode_map & (1 << i))
2745 nbits += width[i];
2746 if (nbits > FILTER_OPT_LEN)
2747 return (-EINVAL);
2748 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2749 A_TP_VLAN_PRI_MAP);
2750 return (0);
2751 }
2752
2753 /*
2754 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2755 * @adap: the adapter
2756 * @v4: holds the TCP/IP counter values
2757 * @v6: holds the TCP/IPv6 counter values
2758 *
2759 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2760 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2761 */
2762 void
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6)2763 t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2764 struct tp_tcp_stats *v6)
2765 {
2766 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2767
2768 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2769 #define STAT(x) val[STAT_IDX(x)]
2770 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2771
2772 if (v4 != NULL) {
2773 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2774 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2775 v4->tcpOutRsts = STAT(OUT_RST);
2776 v4->tcpInSegs = STAT64(IN_SEG);
2777 v4->tcpOutSegs = STAT64(OUT_SEG);
2778 v4->tcpRetransSegs = STAT64(RXT_SEG);
2779 }
2780 if (v6 != NULL) {
2781 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2782 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2783 v6->tcpOutRsts = STAT(OUT_RST);
2784 v6->tcpInSegs = STAT64(IN_SEG);
2785 v6->tcpOutSegs = STAT64(OUT_SEG);
2786 v6->tcpRetransSegs = STAT64(RXT_SEG);
2787 }
2788 #undef STAT64
2789 #undef STAT
2790 #undef STAT_IDX
2791 }
2792
2793 /*
2794 * t4_tp_get_err_stats - read TP's error MIB counters
2795 * @adap: the adapter
2796 * @st: holds the counter values
2797 *
2798 * Returns the values of TP's error counters.
2799 */
2800 void
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st)2801 t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2802 {
2803 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2804 12, A_TP_MIB_MAC_IN_ERR_0);
2805 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2806 8, A_TP_MIB_TNL_CNG_DROP_0);
2807 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2808 4, A_TP_MIB_TNL_DROP_0);
2809 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2810 4, A_TP_MIB_OFD_VLN_DROP_0);
2811 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2812 4, A_TP_MIB_TCP_V6IN_ERR_0);
2813 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2814 2, A_TP_MIB_OFD_ARP_DROP);
2815 }
2816
2817 /*
2818 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
2819 * @adap: the adapter
2820 * @st: holds the counter values
2821 *
2822 * Returns the values of TP's proxy counters.
2823 */
2824 void
t4_tp_get_proxy_stats(struct adapter * adap,struct tp_proxy_stats * st)2825 t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2826 {
2827 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2828 4, A_TP_MIB_TNL_LPBK_0);
2829 }
2830
2831 /*
2832 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
2833 * @adap: the adapter
2834 * @st: holds the counter values
2835 *
2836 * Returns the values of TP's CPL counters.
2837 */
2838 void
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st)2839 t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
2840 {
2841 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
2842 8, A_TP_MIB_CPL_IN_REQ_0);
2843 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tx_err,
2844 4, A_TP_MIB_CPL_OUT_ERR_0);
2845 }
2846
2847 /*
2848 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
2849 * @adap: the adapter
2850 * @st: holds the counter values
2851 *
2852 * Returns the values of TP's RDMA counters.
2853 */
2854 void
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st)2855 t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
2856 {
2857 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
2858 2, A_TP_MIB_RQE_DFR_MOD);
2859 }
2860
2861 /*
2862 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
2863 * @adap: the adapter
2864 * @idx: the port index
2865 * @st: holds the counter values
2866 *
2867 * Returns the values of TP's FCoE counters for the selected port.
2868 */
2869 void
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st)2870 t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
2871 struct tp_fcoe_stats *st)
2872 {
2873 u32 val[2];
2874
2875 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
2876 1, A_TP_MIB_FCOE_DDP_0 + idx);
2877 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
2878 1, A_TP_MIB_FCOE_DROP_0 + idx);
2879 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2880 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
2881 st->octetsDDP = ((u64)val[0] << 32) | val[1];
2882 }
2883
2884 /*
2885 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
2886 * @adap: the adapter
2887 * @st: holds the counter values
2888 *
2889 * Returns the values of TP's counters for non-TCP directly-placed packets.
2890 */
2891 void
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st)2892 t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
2893 {
2894 u32 val[4];
2895
2896 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
2897 A_TP_MIB_USM_PKTS);
2898 st->frames = val[0];
2899 st->drops = val[1];
2900 st->octets = ((u64)val[2] << 32) | val[3];
2901 }
2902
2903 /*
2904 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2905 * @adap: the adapter
2906 * @mtus: where to store the MTU values
2907 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2908 *
2909 * Reads the HW path MTU table.
2910 */
2911 void
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)2912 t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2913 {
2914 u32 v;
2915 int i;
2916
2917 for (i = 0; i < NMTUS; ++i) {
2918 t4_write_reg(adap, A_TP_MTU_TABLE,
2919 V_MTUINDEX(0xffU) | V_MTUVALUE(i));
2920 v = t4_read_reg(adap, A_TP_MTU_TABLE);
2921 mtus[i] = G_MTUVALUE(v);
2922 if (mtu_log != NULL)
2923 mtu_log[i] = G_MTUWIDTH(v);
2924 }
2925 }
2926
2927 /*
2928 * t4_read_cong_tbl - reads the congestion control table
2929 * @adap: the adapter
2930 * @incr: where to store the alpha values
2931 *
2932 * Reads the additive increments programmed into the HW congestion
2933 * control table.
2934 */
2935 void
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])2936 t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2937 {
2938 unsigned int mtu, w;
2939
2940 for (mtu = 0; mtu < NMTUS; ++mtu)
2941 for (w = 0; w < NCCTRL_WIN; ++w) {
2942 t4_write_reg(adap, A_TP_CCTRL_TABLE,
2943 V_ROWINDEX(0xffffU) | (mtu << 5) | w);
2944 incr[mtu][w] = (u16)t4_read_reg(adap,
2945 A_TP_CCTRL_TABLE) & 0x1fff;
2946 }
2947 }
2948
2949 /*
2950 * t4_read_pace_tbl - read the pace table
2951 * @adap: the adapter
2952 * @pace_vals: holds the returned values
2953 *
2954 * Returns the values of TP's pace table in microseconds.
2955 */
2956 void
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])2957 t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
2958 {
2959 unsigned int i, v;
2960
2961 for (i = 0; i < NTX_SCHED; i++) {
2962 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2963 v = t4_read_reg(adap, A_TP_PACE_TABLE);
2964 pace_vals[i] = dack_ticks_to_usec(adap, v);
2965 }
2966 }
2967
2968 /*
2969 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2970 * @adap: the adapter
2971 * @addr: the indirect TP register address
2972 * @mask: specifies the field within the register to modify
2973 * @val: new value for the field
2974 *
2975 * Sets a field of an indirect TP register to the given value.
2976 */
2977 void
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)2978 t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2979 unsigned int mask, unsigned int val)
2980 {
2981 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
2982 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2983 t4_write_reg(adap, A_TP_PIO_DATA, val);
2984 }
2985
2986 /*
2987 * init_cong_ctrl - initialize congestion control parameters
2988 * @a: the alpha values for congestion control
2989 * @b: the beta values for congestion control
2990 *
2991 * Initialize the congestion control parameters.
2992 */
2993 static void __devinit
init_cong_ctrl(unsigned short * a,unsigned short * b)2994 init_cong_ctrl(unsigned short *a, unsigned short *b)
2995 {
2996 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2997 a[9] = 2;
2998 a[10] = 3;
2999 a[11] = 4;
3000 a[12] = 5;
3001 a[13] = 6;
3002 a[14] = 7;
3003 a[15] = 8;
3004 a[16] = 9;
3005 a[17] = 10;
3006 a[18] = 14;
3007 a[19] = 17;
3008 a[20] = 21;
3009 a[21] = 25;
3010 a[22] = 30;
3011 a[23] = 35;
3012 a[24] = 45;
3013 a[25] = 60;
3014 a[26] = 80;
3015 a[27] = 100;
3016 a[28] = 200;
3017 a[29] = 300;
3018 a[30] = 400;
3019 a[31] = 500;
3020
3021 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3022 b[9] = b[10] = 1;
3023 b[11] = b[12] = 2;
3024 b[13] = b[14] = b[15] = b[16] = 3;
3025 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3026 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3027 b[28] = b[29] = 6;
3028 b[30] = b[31] = 7;
3029 }
3030
3031 /* The minimum additive increment value for the congestion control table */
3032 #define CC_MIN_INCR 2U
3033
3034 /*
3035 * t4_load_mtus - write the MTU and congestion control HW tables
3036 * @adap: the adapter
3037 * @mtus: the values for the MTU table
3038 * @alpha: the values for the congestion control alpha parameter
3039 * @beta: the values for the congestion control beta parameter
3040 *
3041 * Write the HW MTU table with the supplied MTUs and the high-speed
3042 * congestion control table with the supplied alpha, beta, and MTUs.
3043 * We write the two tables together because the additive increments
3044 * depend on the MTUs.
3045 */
3046 void
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)3047 t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3048 const unsigned short *alpha, const unsigned short *beta)
3049 {
3050 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3051 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3052 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3053 28672, 40960, 57344, 81920, 114688, 163840, 229376
3054 };
3055
3056 unsigned int i, w;
3057
3058 for (i = 0; i < NMTUS; ++i) {
3059 unsigned int mtu = mtus[i];
3060 unsigned int log2 = fls(mtu);
3061
3062 if (!(mtu & ((1 << log2) >> 2))) /* round */
3063 log2--;
3064 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3065 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3066
3067 for (w = 0; w < NCCTRL_WIN; ++w) {
3068 unsigned int inc;
3069
3070 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3071 CC_MIN_INCR);
3072
3073 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3074 (w << 16) | (beta[w] << 13) | inc);
3075 }
3076 }
3077 }
3078
3079 /*
3080 * t4_set_pace_tbl - set the pace table
3081 * @adap: the adapter
3082 * @pace_vals: the pace values in microseconds
3083 * @start: index of the first entry in the HW pace table to set
3084 * @n: how many entries to set
3085 *
3086 * Sets (a subset of the) HW pace table.
3087 */
3088 int
t4_set_pace_tbl(struct adapter * adap,const unsigned int * pace_vals,unsigned int start,unsigned int n)3089 t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3090 unsigned int start, unsigned int n)
3091 {
3092 unsigned int vals[NTX_SCHED], i;
3093 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3094
3095 if (n > NTX_SCHED)
3096 return (-ERANGE);
3097
3098 /* convert values from us to dack ticks, rounding to closest value */
3099 for (i = 0; i < n; i++, pace_vals++) {
3100 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3101 if (vals[i] > 0x7ff)
3102 return (-ERANGE);
3103 if (*pace_vals && vals[i] == 0)
3104 return (-ERANGE);
3105 }
3106 for (i = 0; i < n; i++, start++)
3107 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3108 return (0);
3109 }
3110
3111 /*
3112 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3113 * @adap: the adapter
3114 * @kbps: target rate in Kbps
3115 * @sched: the scheduler index
3116 *
3117 * Configure a Tx HW scheduler for the target rate.
3118 */
3119 int
t4_set_sched_bps(struct adapter * adap,int sched,unsigned int kbps)3120 t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3121 {
3122 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0U;
3123 unsigned int clk = adap->params.vpd.cclk * 1000;
3124 unsigned int selected_cpt = 0, selected_bpt = 0;
3125
3126 if (kbps > 0) {
3127 kbps *= 125; /* -> bytes */
3128 for (cpt = 1; cpt <= 255; cpt++) {
3129 tps = clk / cpt;
3130 bpt = (kbps + tps / 2) / tps;
3131 if (bpt > 0 && bpt <= 255) {
3132 v = bpt * tps;
3133 delta = v >= kbps ? v - kbps : kbps - v;
3134 if (delta < mindelta) {
3135 mindelta = delta;
3136 selected_cpt = cpt;
3137 selected_bpt = bpt;
3138 }
3139 } else if (selected_cpt != 0)
3140 break;
3141 }
3142 if (!selected_cpt)
3143 return (-EINVAL);
3144 }
3145 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3146 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3147 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3148 if (sched & 1)
3149 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3150 else
3151 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3152 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3153 return (0);
3154 }
3155
3156 /*
3157 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3158 * @adap: the adapter
3159 * @sched: the scheduler index
3160 * @ipg: the interpacket delay in tenths of nanoseconds
3161 *
3162 * Set the interpacket delay for a HW packet rate scheduler.
3163 */
3164 int
t4_set_sched_ipg(struct adapter * adap,int sched,unsigned int ipg)3165 t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3166 {
3167 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3168
3169 /* convert ipg to nearest number of core clocks */
3170 ipg *= core_ticks_per_usec(adap);
3171 ipg = (ipg + 5000) / 10000;
3172 if (ipg > M_TXTIMERSEPQ0)
3173 return (-EINVAL);
3174
3175 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3176 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3177 if (sched & 1)
3178 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3179 else
3180 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3181 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3182 (void) t4_read_reg(adap, A_TP_TM_PIO_DATA);
3183 return (0);
3184 }
3185
3186 /*
3187 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3188 * @adap: the adapter
3189 * @sched: the scheduler index
3190 * @kbps: the byte rate in Kbps
3191 * @ipg: the interpacket delay in tenths of nanoseconds
3192 *
3193 * Return the current configuration of a HW Tx scheduler.
3194 */
3195 void
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg)3196 t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3197 unsigned int *ipg)
3198 {
3199 unsigned int v, addr, bpt, cpt;
3200
3201 if (kbps != NULL) {
3202 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3203 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3204 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3205 if (sched & 1)
3206 v >>= 16;
3207 bpt = (v >> 8) & 0xff;
3208 cpt = v & 0xff;
3209 if (!cpt)
3210 *kbps = 0; /* scheduler disabled */
3211 else {
3212 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3213 *kbps = (v * bpt) / 125;
3214 }
3215 }
3216 if (ipg != NULL) {
3217 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3218 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3219 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3220 if (sched & 1)
3221 v >>= 16;
3222 v &= 0xffff;
3223 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3224 }
3225 }
3226
3227 /*
3228 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3229 * clocks. The formula is
3230 *
3231 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3232 *
3233 * which is equivalent to
3234 *
3235 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3236 */
3237 static u64
chan_rate(struct adapter * adap,unsigned int bytes256)3238 chan_rate(struct adapter *adap, unsigned int bytes256)
3239 {
3240 u64 v = bytes256 * adap->params.vpd.cclk;
3241
3242 return (v * 62 + v / 2);
3243 }
3244
3245 /*
3246 * t4_get_chan_txrate - get the current per channel Tx rates
3247 * @adap: the adapter
3248 * @nic_rate: rates for NIC traffic
3249 * @ofld_rate: rates for offloaded traffic
3250 *
3251 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
3252 * for each channel.
3253 */
3254 void
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)3255 t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3256 {
3257 u32 v;
3258
3259 v = t4_read_reg(adap, A_TP_TX_TRATE);
3260 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3261 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3262 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3263 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3264
3265 v = t4_read_reg(adap, A_TP_TX_ORATE);
3266 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3267 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3268 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3269 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3270 }
3271
3272 /*
3273 * t4_set_trace_filter - configure one of the tracing filters
3274 * @adap: the adapter
3275 * @tp: the desired trace filter parameters
3276 * @idx: which filter to configure
3277 * @enable: whether to enable or disable the filter
3278 *
3279 * Configures one of the tracing filters available in HW. If @enable is
3280 * %0 @tp is not examined and may be %NULL.
3281 */
3282 int
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)3283 t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3284 int idx, int enable)
3285 {
3286 int i, ofst = idx * 4;
3287 u32 data_reg, mask_reg, cfg;
3288 u32 multitrc = F_TRCMULTIFILTER;
3289
3290 if (!enable) {
3291 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3292 goto out;
3293 }
3294
3295 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3296 tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
3297 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
3298 return (-EINVAL);
3299
3300 if (tp->snap_len > 256) { /* must be tracer 0 */
3301 if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
3302 t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
3303 t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
3304 F_TFEN)
3305 return (-EINVAL); /* other tracers are enabled */
3306 multitrc = 0;
3307 } else if (idx != 0) {
3308 i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
3309 if (G_TFCAPTUREMAX(i) > 256 &&
3310 (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
3311 return (-EINVAL);
3312 }
3313
3314 /* stop the tracer we'll be changing */
3315 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3316
3317 /* disable tracing globally if running in the wrong single/multi mode */
3318 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3319 if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
3320 t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
3321 (void) t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */
3322 msleep(1);
3323 if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
3324 return (-ETIMEDOUT);
3325 }
3326 /*
3327 * At this point either the tracing is enabled and in the right mode or
3328 * disabled.
3329 */
3330
3331 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3332 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3333 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3334
3335 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3336 t4_write_reg(adap, data_reg, tp->data[i]);
3337 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3338 }
3339 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3340 V_TFCAPTUREMAX(tp->snap_len) | V_TFMINPKTSIZE(tp->min_len));
3341 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3342 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3343 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3344
3345 cfg &= ~F_TRCMULTIFILTER;
3346 t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
3347 out: (void) t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */
3348 return (0);
3349 }
3350
3351 /*
3352 * t4_get_trace_filter - query one of the tracing filters
3353 * @adap: the adapter
3354 * @tp: the current trace filter parameters
3355 * @idx: which trace filter to query
3356 * @enabled: non-zero if the filter is enabled
3357 *
3358 * Returns the current settings of one of the HW tracing filters.
3359 */
3360 void
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)3361 t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3362 int *enabled)
3363 {
3364 u32 ctla, ctlb;
3365 int i, ofst = idx * 4;
3366 u32 data_reg, mask_reg;
3367
3368 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3369 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3370
3371 *enabled = !!(ctla & F_TFEN);
3372 tp->snap_len = G_TFCAPTUREMAX(ctlb);
3373 tp->min_len = G_TFMINPKTSIZE(ctlb);
3374 tp->skip_ofst = G_TFOFFSET(ctla);
3375 tp->skip_len = G_TFLENGTH(ctla);
3376 tp->invert = !!(ctla & F_TFINVERTMATCH);
3377 tp->port = G_TFPORT(ctla);
3378
3379 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3380 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3381 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3382
3383 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3384 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3385 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3386 }
3387 }
3388
3389 /*
3390 * t4_pmtx_get_stats - returns the HW stats from PMTX
3391 * @adap: the adapter
3392 * @cnt: where to store the count statistics
3393 * @cycles: where to store the cycle statistics
3394 *
3395 * Returns performance statistics from PMTX.
3396 */
3397 void
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])3398 t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3399 {
3400 int i;
3401
3402 for (i = 0; i < PM_NSTATS; i++) {
3403 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3404 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3405 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3406 }
3407 }
3408
3409 /*
3410 * t4_pmrx_get_stats - returns the HW stats from PMRX
3411 * @adap: the adapter
3412 * @cnt: where to store the count statistics
3413 * @cycles: where to store the cycle statistics
3414 *
3415 * Returns performance statistics from PMRX.
3416 */
3417 void
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])3418 t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3419 {
3420 int i;
3421
3422 for (i = 0; i < PM_NSTATS; i++) {
3423 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3424 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3425 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3426 }
3427 }
3428
3429 /*
3430 * get_mps_bg_map - return the buffer groups associated with a port
3431 * @adap: the adapter
3432 * @idx: the port index
3433 *
3434 * Returns a bitmap indicating which MPS buffer groups are associated
3435 * with the given port. Bit i is set if buffer group i is used by the
3436 * port.
3437 */
3438 static unsigned int
get_mps_bg_map(struct adapter * adap,int idx)3439 get_mps_bg_map(struct adapter *adap, int idx)
3440 {
3441 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3442
3443 if (n == 0)
3444 return (idx == 0 ? 0xf : 0);
3445 if (n == 1)
3446 return (idx < 2 ? (3 << (2 * idx)) : 0);
3447 return (1 << idx);
3448 }
3449
3450 /*
3451 * t4_get_port_stats - collect port statistics
3452 * @adap: the adapter
3453 * @idx: the port index
3454 * @p: the stats structure to fill
3455 *
3456 * Collect statistics related to the given port from HW.
3457 */
3458 void
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)3459 t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3460 {
3461 u32 bgmap = get_mps_bg_map(adap, idx);
3462
3463 #define GET_STAT(name) \
3464 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3465 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3466
3467 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3468 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3469 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3470 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3471 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3472 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3473 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3474 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3475 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3476 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3477 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3478 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3479 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3480 p->tx_drop = GET_STAT(TX_PORT_DROP);
3481 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3482 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3483 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3484 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3485 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3486 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3487 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3488 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3489 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3490
3491 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3492 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3493 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3494 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3495 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3496 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3497 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3498 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3499 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3500 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3501 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3502 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3503 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3504 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3505 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3506 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3507 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3508 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3509 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3510 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3511 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3512 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3513 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3514 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3515 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3516 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3517 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3518
3519 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3520 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3521 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3522 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3523 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3524 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3525 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3526 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3527
3528 #undef GET_STAT
3529 #undef GET_STAT_COM
3530 }
3531
3532 /*
3533 * t4_clr_port_stats - clear port statistics
3534 * @adap: the adapter
3535 * @idx: the port index
3536 *
3537 * Clear HW statistics for the given port.
3538 */
3539 void
t4_clr_port_stats(struct adapter * adap,int idx)3540 t4_clr_port_stats(struct adapter *adap, int idx)
3541 {
3542 unsigned int i;
3543 u32 bgmap = get_mps_bg_map(adap, idx);
3544
3545 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3546 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3547 t4_write_reg(adap, PORT_REG(idx, i), 0);
3548 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3549 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3550 t4_write_reg(adap, PORT_REG(idx, i), 0);
3551 for (i = 0; i < 4; i++)
3552 if (bgmap & (1 << i)) {
3553 t4_write_reg(adap,
3554 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3555 t4_write_reg(adap,
3556 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3557 }
3558 }
3559
3560 /*
3561 * t4_get_lb_stats - collect loopback port statistics
3562 * @adap: the adapter
3563 * @idx: the loopback port index
3564 * @p: the stats structure to fill
3565 *
3566 * Return HW statistics for the given loopback port.
3567 */
3568 void
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)3569 t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3570 {
3571 u32 bgmap = get_mps_bg_map(adap, idx);
3572
3573 #define GET_STAT(name) \
3574 t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3575 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3576
3577 p->octets = GET_STAT(BYTES);
3578 p->frames = GET_STAT(FRAMES);
3579 p->bcast_frames = GET_STAT(BCAST);
3580 p->mcast_frames = GET_STAT(MCAST);
3581 p->ucast_frames = GET_STAT(UCAST);
3582 p->error_frames = GET_STAT(ERROR);
3583
3584 p->frames_64 = GET_STAT(64B);
3585 p->frames_65_127 = GET_STAT(65B_127B);
3586 p->frames_128_255 = GET_STAT(128B_255B);
3587 p->frames_256_511 = GET_STAT(256B_511B);
3588 p->frames_512_1023 = GET_STAT(512B_1023B);
3589 p->frames_1024_1518 = GET_STAT(1024B_1518B);
3590 p->frames_1519_max = GET_STAT(1519B_MAX);
3591 p->drop = t4_read_reg(adap, PORT_REG(idx,
3592 A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3593
3594 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3595 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3596 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3597 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3598 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3599 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3600 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3601 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3602
3603 #undef GET_STAT
3604 #undef GET_STAT_COM
3605 }
3606
3607 /*
3608 * t4_wol_magic_enable - enable/disable magic packet WoL
3609 * @adap: the adapter
3610 * @port: the physical port index
3611 * @addr: MAC address expected in magic packets, %NULL to disable
3612 *
3613 * Enables/disables magic packet wake-on-LAN for the selected port.
3614 */
3615 void
t4_wol_magic_enable(struct adapter * adap,unsigned int port,const u8 * addr)3616 t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr)
3617 {
3618 if (addr != NULL) {
3619 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3620 (addr[2] << 24) | (addr[3] << 16) |
3621 (addr[4] << 8) | addr[5]);
3622 t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3623 (addr[0] << 8) | addr[1]);
3624 }
3625 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3626 V_MAGICEN(addr != NULL));
3627 }
3628
3629 /*
3630 * t4_wol_pat_enable - enable/disable pattern-based WoL
3631 * @adap: the adapter
3632 * @port: the physical port index
3633 * @map: bitmap of which HW pattern filters to set
3634 * @mask0: byte mask for bytes 0-63 of a packet
3635 * @mask1: byte mask for bytes 64-127 of a packet
3636 * @crc: Ethernet CRC for selected bytes
3637 * @enable: enable/disable switch
3638 *
3639 * Sets the pattern filters indicated in @map to mask out the bytes
3640 * specified in @mask0/@mask1 in received packets and compare the CRC of
3641 * the resulting packet against @crc. If @enable is %true pattern-based
3642 * WoL is enabled, otherwise disabled.
3643 */
3644 int
t4_wol_pat_enable(struct adapter * adap,unsigned int port,unsigned int map,u64 mask0,u64 mask1,unsigned int crc,bool enable)3645 t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3646 u64 mask0, u64 mask1, unsigned int crc, bool enable)
3647 {
3648 int i;
3649
3650 if (!enable) {
3651 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3652 F_PATEN, 0);
3653 return (0);
3654 }
3655 if (map > 0xff)
3656 return (-EINVAL);
3657
3658 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3659
3660 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3661 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3662 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3663
3664 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3665 if (!(map & 1))
3666 continue;
3667
3668 /* write byte masks */
3669 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3670 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3671 (void) t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3672 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3673 return (-ETIMEDOUT);
3674
3675 /* write CRC */
3676 t4_write_reg(adap, EPIO_REG(DATA0), crc);
3677 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3678 (void) t4_read_reg(adap, EPIO_REG(OP)); /* flush */
3679 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3680 return (-ETIMEDOUT);
3681 }
3682 #undef EPIO_REG
3683
3684 t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3685 return (0);
3686 }
3687
3688 /*
3689 * t4_mk_filtdelwr - create a delete filter WR
3690 * @ftid: the filter ID
3691 * @wr: the filter work request to populate
3692 * @qid: ingress queue to receive the delete notification
3693 *
3694 * Creates a filter work request to delete the supplied filter. If @qid is
3695 * negative the delete notification is suppressed.
3696 */
3697 void
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int qid)3698 t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3699 {
3700 (void) memset(wr, 0, sizeof (*wr));
3701 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3702 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof (*wr) / 16));
3703 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3704 V_FW_FILTER_WR_NOREPLY(qid < 0));
3705 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3706 if (qid >= 0)
3707 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3708 }
3709
3710 #define INIT_CMD(var, cmd, rd_wr) do { \
3711 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3712 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3713 (var).retval_len16 = htonl(FW_LEN16(var)); \
3714 } while (0)
3715
3716 /*
3717 * t4_mdio_rd - read a PHY register through MDIO
3718 * @adap: the adapter
3719 * @mbox: mailbox to use for the FW command
3720 * @phy_addr: the PHY address
3721 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3722 * @reg: the register to read
3723 * @valp: where to store the value
3724 *
3725 * Issues a FW command through the given mailbox to read a PHY register.
3726 */
3727 int
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int * valp)3728 t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3729 unsigned int mmd, unsigned int reg, unsigned int *valp)
3730 {
3731 int ret;
3732 struct fw_ldst_cmd c;
3733
3734 (void) memset(&c, 0, sizeof (c));
3735 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3736 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3737 c.cycles_to_len16 = htonl(FW_LEN16(c));
3738 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3739 V_FW_LDST_CMD_MMD(mmd));
3740 c.u.mdio.raddr = htons(reg);
3741
3742 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
3743 if (ret == 0)
3744 *valp = ntohs(c.u.mdio.rval);
3745 return (ret);
3746 }
3747
3748 /*
3749 * t4_mdio_wr - write a PHY register through MDIO
3750 * @adap: the adapter
3751 * @mbox: mailbox to use for the FW command
3752 * @phy_addr: the PHY address
3753 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
3754 * @reg: the register to write
3755 * @valp: value to write
3756 *
3757 * Issues a FW command through the given mailbox to write a PHY register.
3758 */
3759 int
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,unsigned int val)3760 t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3761 unsigned int mmd, unsigned int reg, unsigned int val)
3762 {
3763 struct fw_ldst_cmd c;
3764
3765 (void) memset(&c, 0, sizeof (c));
3766 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3767 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3768 c.cycles_to_len16 = htonl(FW_LEN16(c));
3769 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3770 V_FW_LDST_CMD_MMD(mmd));
3771 c.u.mdio.raddr = htons(reg);
3772 c.u.mdio.rval = htons(val);
3773
3774 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
3775 }
3776
3777 /*
3778 * t4_sge_ctxt_rd - read an SGE context through FW
3779 * @adap: the adapter
3780 * @mbox: mailbox to use for the FW command
3781 * @cid: the context id
3782 * @ctype: the context type
3783 * @data: where to store the context data
3784 *
3785 * Issues a FW command through the given mailbox to read an SGE context.
3786 */
3787 int
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)3788 t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3789 enum ctxt_type ctype, u32 *data)
3790 {
3791 int ret;
3792 struct fw_ldst_cmd c;
3793
3794 if (ctype == CTXT_EGRESS)
3795 ret = FW_LDST_ADDRSPC_SGE_EGRC;
3796 else if (ctype == CTXT_INGRESS)
3797 ret = FW_LDST_ADDRSPC_SGE_INGC;
3798 else if (ctype == CTXT_FLM)
3799 ret = FW_LDST_ADDRSPC_SGE_FLMC;
3800 else
3801 ret = FW_LDST_ADDRSPC_SGE_CONMC;
3802
3803 (void) memset(&c, 0, sizeof (c));
3804 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3805 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3806 c.cycles_to_len16 = htonl(FW_LEN16(c));
3807 c.u.idctxt.physid = htonl(cid);
3808
3809 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
3810 if (ret == 0) {
3811 data[0] = ntohl(c.u.idctxt.ctxt_data0);
3812 data[1] = ntohl(c.u.idctxt.ctxt_data1);
3813 data[2] = ntohl(c.u.idctxt.ctxt_data2);
3814 data[3] = ntohl(c.u.idctxt.ctxt_data3);
3815 data[4] = ntohl(c.u.idctxt.ctxt_data4);
3816 data[5] = ntohl(c.u.idctxt.ctxt_data5);
3817 }
3818 return (ret);
3819 }
3820
3821 /*
3822 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
3823 * @adap: the adapter
3824 * @cid: the context id
3825 * @ctype: the context type
3826 * @data: where to store the context data
3827 *
3828 * Reads an SGE context directly, bypassing FW. This is only for
3829 * debugging when FW is unavailable.
3830 */
3831 int
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)3832 t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
3833 u32 *data)
3834 {
3835 int i, ret;
3836
3837 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
3838 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
3839 if (!ret)
3840 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
3841 *data++ = t4_read_reg(adap, i);
3842 return (ret);
3843 }
3844
3845 /*
3846 * t4_fw_hello - establish communication with FW
3847 * @adap: the adapter
3848 * @mbox: mailbox to use for the FW command
3849 * @evt_mbox: mailbox to receive async FW events
3850 * @master: specifies the caller's willingness to be the device master
3851 * @state: returns the current device state (if non-NULL)
3852 *
3853 * Issues a command to establish communication with FW. Returns either
3854 * an error (negative integer) or the mailbox of the Master PF.
3855 */
3856 int
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)3857 t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3858 enum dev_master master, enum dev_state *state)
3859 {
3860 int ret;
3861 struct fw_hello_cmd c;
3862 u32 v;
3863 unsigned int master_mbox;
3864 int retries = FW_CMD_HELLO_RETRIES;
3865
3866 retry:
3867 (void) memset(&c, 0, sizeof (c));
3868 /* LINTED: E_CONSTANT_CONDITION */
3869 INIT_CMD(c, HELLO, WRITE);
3870 c.err_to_clearinit = htonl(
3871 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3872 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3873 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3874 M_FW_HELLO_CMD_MBMASTER) |
3875 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3876 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3877 F_FW_HELLO_CMD_CLEARINIT);
3878
3879 /*
3880 * Issue the HELLO command to the firmware. If it's not successful
3881 * but indicates that we got a "busy" or "timeout" condition, retry
3882 * the HELLO until we exhaust our retry limit.
3883 */
3884 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
3885 if (ret != FW_SUCCESS) {
3886 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3887 goto retry;
3888 return (ret);
3889 }
3890
3891 v = ntohl(c.err_to_clearinit);
3892 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3893 if (state != NULL) {
3894 if (v & F_FW_HELLO_CMD_ERR)
3895 *state = DEV_STATE_ERR;
3896 else if (v & F_FW_HELLO_CMD_INIT)
3897 *state = DEV_STATE_INIT;
3898 else
3899 *state = DEV_STATE_UNINIT;
3900 }
3901
3902 /*
3903 * If we're not the Master PF then we need to wait around for the
3904 * Master PF Driver to finish setting up the adapter.
3905 *
3906 * Note that we also do this wait if we're a non-Master-capable PF and
3907 * there is no current Master PF; a Master PF may show up momentarily
3908 * and we wouldn't want to fail pointlessly. (This can happen when an
3909 * OS loads lots of different drivers rapidly at the same time). In
3910 * this case, the Master PF returned by the firmware will be
3911 * M_PCIE_FW_MASTER so the test below will work ...
3912 */
3913 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
3914 master_mbox != mbox) {
3915 int waiting = FW_CMD_HELLO_TIMEOUT;
3916
3917 /*
3918 * Wait for the firmware to either indicate an error or
3919 * initialized state. If we see either of these we bail out
3920 * and report the issue to the caller. If we exhaust the
3921 * "hello timeout" and we haven't exhausted our retries, try
3922 * again. Otherwise bail with a timeout error.
3923 */
3924 for (;;) {
3925 u32 pcie_fw;
3926
3927 msleep(50);
3928 waiting -= 50;
3929
3930 /*
3931 * If neither Error nor Initialialized are indicated
3932 * by the firmware keep waiting till we exaust our
3933 * timeout ... and then retry if we haven't exhausted
3934 * our retries ...
3935 */
3936 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3937 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
3938 if (waiting <= 0) {
3939 if (retries-- > 0)
3940 goto retry;
3941
3942 return (-ETIMEDOUT);
3943 }
3944 continue;
3945 }
3946
3947 /*
3948 * We either have an Error or Initialized condition
3949 * report errors preferentially.
3950 */
3951 if (state != NULL) {
3952 if (pcie_fw & F_PCIE_FW_ERR)
3953 *state = DEV_STATE_ERR;
3954 else if (pcie_fw & F_PCIE_FW_INIT)
3955 *state = DEV_STATE_INIT;
3956 }
3957
3958 /*
3959 * If we arrived before a Master PF was selected and
3960 * there's not a valid Master PF, grab its identity
3961 * for our caller.
3962 */
3963 if (master_mbox == M_PCIE_FW_MASTER &&
3964 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3965 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3966 break;
3967 }
3968 }
3969
3970 return (master_mbox);
3971 }
3972
3973 /*
3974 * t4_fw_bye - end communication with FW
3975 * @adap: the adapter
3976 * @mbox: mailbox to use for the FW command
3977 *
3978 * Issues a command to terminate communication with FW.
3979 */
3980 int
t4_fw_bye(struct adapter * adap,unsigned int mbox)3981 t4_fw_bye(struct adapter *adap, unsigned int mbox)
3982 {
3983 struct fw_bye_cmd c;
3984
3985 (void) memset(&c, 0, sizeof (c));
3986 /* LINTED: E_CONSTANT_CONDITION */
3987 INIT_CMD(c, BYE, WRITE);
3988 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
3989 }
3990
3991 /*
3992 * t4_init_cmd - ask FW to initialize the device
3993 * @adap: the adapter
3994 * @mbox: mailbox to use for the FW command
3995 *
3996 * Issues a command to FW to partially initialize the device. This
3997 * performs initialization that generally doesn't depend on user input.
3998 */
3999 int
t4_early_init(struct adapter * adap,unsigned int mbox)4000 t4_early_init(struct adapter *adap, unsigned int mbox)
4001 {
4002 struct fw_initialize_cmd c;
4003
4004 (void) memset(&c, 0, sizeof (c));
4005 /* LINTED: E_CONSTANT_CONDITION */
4006 INIT_CMD(c, INITIALIZE, WRITE);
4007 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4008 }
4009
4010 /*
4011 * t4_fw_reset - issue a reset to FW
4012 * @adap: the adapter
4013 * @mbox: mailbox to use for the FW command
4014 * @reset: specifies the type of reset to perform
4015 *
4016 * Issues a reset command of the specified type to FW.
4017 */
4018 int
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)4019 t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4020 {
4021 struct fw_reset_cmd c;
4022
4023 (void) memset(&c, 0, sizeof (c));
4024 /* LINTED: E_CONSTANT_CONDITION */
4025 INIT_CMD(c, RESET, WRITE);
4026 c.val = htonl(reset);
4027 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4028 }
4029
4030 /*
4031 * t4_fw_config_file - setup an adapter via a Configuration File
4032 * @adap: the adapter
4033 * @mbox: mailbox to use for the FW command
4034 * @mtype: the memory type where the Configuration File is located
4035 * @maddr: the memory address where the Configuration File is located
4036 * @finiver: return value for CF [fini] version
4037 * @finicsum: return value for CF [fini] checksum
4038 * @cfcsum: return value for CF computed checksum
4039 *
4040 * Issue a command to get the firmware to process the Configuration
4041 * File located at the specified mtype/maddress. If the Configuration
4042 * File is processed successfully and return value pointers are
4043 * provided, the Configuration File "[fini] section version and
4044 * checksum values will be returned along with the computed checksum.
4045 * It's up to the caller to decide how it wants to respond to the
4046 * checksums not matching but it recommended that a prominant warning
4047 * be emitted in order to help people rapidly identify changed or
4048 * corrupted Configuration Files.
4049 *
4050 * Also note that it's possible to modify things like "niccaps",
4051 * "toecaps",etc. between processing the Configuration File and telling
4052 * the firmware to use the new configuration. Callers which want to
4053 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
4054 * Configuration Files if they want to do this.
4055 */
4056 int
t4_fw_config_file(struct adapter * adap,unsigned int mbox,unsigned int mtype,unsigned int maddr,u32 * finiver,u32 * finicsum,u32 * cfcsum)4057 t4_fw_config_file(struct adapter *adap, unsigned int mbox, unsigned int mtype,
4058 unsigned int maddr, u32 *finiver, u32 *finicsum, u32 *cfcsum)
4059 {
4060 struct fw_caps_config_cmd caps_cmd;
4061 int ret;
4062
4063 /*
4064 * Tell the firmware to process the indicated Configuration File.
4065 * If there are no errors and the caller has provided return value
4066 * pointers for the [fini] section version, checksum and computed
4067 * checksum, pass those back to the caller.
4068 */
4069 (void) memset(&caps_cmd, 0, sizeof (caps_cmd));
4070 caps_cmd.op_to_write =
4071 htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4072 F_FW_CMD_REQUEST |
4073 F_FW_CMD_READ);
4074 caps_cmd.cfvalid_to_len16 =
4075 htonl(F_FW_CAPS_CONFIG_CMD_CFVALID |
4076 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4077 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4078 FW_LEN16(caps_cmd));
4079 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof (caps_cmd), &caps_cmd);
4080 if (ret < 0)
4081 return (ret);
4082
4083 if (finiver != NULL)
4084 *finiver = ntohl(caps_cmd.finiver);
4085 if (finicsum != NULL)
4086 *finicsum = ntohl(caps_cmd.finicsum);
4087 if (cfcsum != NULL)
4088 *cfcsum = ntohl(caps_cmd.cfcsum);
4089
4090 /*
4091 * And now tell the firmware to use the configuration we just loaded.
4092 */
4093 caps_cmd.op_to_write =
4094 htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4095 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4096 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4097 return (t4_wr_mbox(adap, mbox, &caps_cmd, sizeof (caps_cmd), NULL));
4098 }
4099
4100 /*
4101 * t4_fixup_host_params - fix up host-dependent parameters
4102 * @adap: the adapter
4103 * @page_size: the host's Base Page Size
4104 * @cache_line_size: the host's Cache Line Size
4105 *
4106 * Various registers in T4 contain values which are dependent on the
4107 * host's Base Page and Cache Line Sizes. This function will fix all of
4108 * those registers with the appropriate values as passed in ...
4109 */
4110 int
t4_fixup_host_params(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size)4111 t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
4112 unsigned int cache_line_size)
4113 {
4114 unsigned int page_shift = fls(page_size) - 1;
4115 unsigned int sge_hps = page_shift - 10;
4116 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
4117 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
4118 unsigned int fl_align_log = fls(fl_align) - 1;
4119
4120 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
4121 V_HOSTPAGESIZEPF0(sge_hps) |
4122 V_HOSTPAGESIZEPF1(sge_hps) |
4123 V_HOSTPAGESIZEPF2(sge_hps) |
4124 V_HOSTPAGESIZEPF3(sge_hps) |
4125 V_HOSTPAGESIZEPF4(sge_hps) |
4126 V_HOSTPAGESIZEPF5(sge_hps) |
4127 V_HOSTPAGESIZEPF6(sge_hps) |
4128 V_HOSTPAGESIZEPF7(sge_hps));
4129
4130 t4_set_reg_field(adap, A_SGE_CONTROL,
4131 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
4132 F_EGRSTATUSPAGESIZE,
4133 V_INGPADBOUNDARY(fl_align_log - 5) |
4134 V_EGRSTATUSPAGESIZE(stat_len != 64));
4135
4136 /*
4137 * Adjust various SGE Free List Host Buffer Sizes.
4138 *
4139 * This is something of a crock since we're using fixed indices into
4140 * the array which are also known by the sge.c code and the T4
4141 * Firmware Configuration File. We need to come up with a much better
4142 * approach to managing this array. For now, the first four entries
4143 * are:
4144 *
4145 * 0: Host Page Size
4146 * 1: 64KB
4147 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
4148 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
4149 *
4150 * For the single-MTU buffers in unpacked mode we need to include
4151 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
4152 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
4153 * Padding boundry. All of these are accommodated in the Factory
4154 * Default Firmware Configuration File but we need to adjust it for
4155 * this host's cache line size.
4156 */
4157 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
4158 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
4159 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1) &
4160 ~(fl_align-1));
4161 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
4162 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1) &
4163 ~(fl_align-1));
4164
4165 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
4166
4167 return (0);
4168 }
4169
4170 /*
4171 * t4_fw_initialize - ask FW to initialize the device
4172 * @adap: the adapter
4173 * @mbox: mailbox to use for the FW command
4174 *
4175 * Issues a command to FW to partially initialize the device. This
4176 * performs initialization that generally doesn't depend on user input.
4177 */
4178 int
t4_fw_initialize(struct adapter * adap,unsigned int mbox)4179 t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4180 {
4181 struct fw_initialize_cmd c;
4182
4183 (void) memset(&c, 0, sizeof (c));
4184 /* LINTED: E_CONSTANT_CONDITION */
4185 INIT_CMD(c, INITIALIZE, WRITE);
4186 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4187 }
4188
4189 /*
4190 * t4_query_params - query FW or device parameters
4191 * @adap: the adapter
4192 * @mbox: mailbox to use for the FW command
4193 * @pf: the PF
4194 * @vf: the VF
4195 * @nparams: the number of parameters
4196 * @params: the parameter names
4197 * @val: the parameter values
4198 *
4199 * Reads the value of FW or device parameters. Up to 7 parameters can be
4200 * queried at once.
4201 */
4202 int
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)4203 t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4204 unsigned int vf, unsigned int nparams, const u32 *params, u32 *val)
4205 {
4206 int i, ret;
4207 struct fw_params_cmd c;
4208 __be32 *p = &c.param[0].mnem;
4209
4210 if (nparams > 7)
4211 return (-EINVAL);
4212
4213 (void) memset(&c, 0, sizeof (c));
4214 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4215 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4216 V_FW_PARAMS_CMD_VFN(vf));
4217 c.retval_len16 = htonl(FW_LEN16(c));
4218
4219 for (i = 0; i < nparams; i++, p += 2)
4220 *p = htonl(*params++);
4221
4222 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4223 if (ret == 0)
4224 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4225 *val++ = ntohl(*p);
4226 return (ret);
4227 }
4228
4229 /*
4230 * t4_set_params - sets FW or device parameters
4231 * @adap: the adapter
4232 * @mbox: mailbox to use for the FW command
4233 * @pf: the PF
4234 * @vf: the VF
4235 * @nparams: the number of parameters
4236 * @params: the parameter names
4237 * @val: the parameter values
4238 *
4239 * Sets the value of FW or device parameters. Up to 7 parameters can be
4240 * specified at once.
4241 */
4242 int
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)4243 t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4244 unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val)
4245 {
4246 struct fw_params_cmd c;
4247 __be32 *p = &c.param[0].mnem;
4248
4249 if (nparams > 7)
4250 return (-EINVAL);
4251
4252 (void) memset(&c, 0, sizeof (c));
4253 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4254 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4255 V_FW_PARAMS_CMD_VFN(vf));
4256 c.retval_len16 = htonl(FW_LEN16(c));
4257
4258 while (nparams--) {
4259 *p++ = htonl(*params++);
4260 *p++ = htonl(*val++);
4261 }
4262
4263 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4264 }
4265
4266 /*
4267 * t4_cfg_pfvf - configure PF/VF resource limits
4268 * @adap: the adapter
4269 * @mbox: mailbox to use for the FW command
4270 * @pf: the PF being configured
4271 * @vf: the VF being configured
4272 * @txq: the max number of egress queues
4273 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4274 * @rxqi: the max number of interrupt-capable ingress queues
4275 * @rxq: the max number of interruptless ingress queues
4276 * @tc: the PCI traffic class
4277 * @vi: the max number of virtual interfaces
4278 * @cmask: the channel access rights mask for the PF/VF
4279 * @pmask: the port access rights mask for the PF/VF
4280 * @nexact: the maximum number of exact MPS filters
4281 * @rcaps: read capabilities
4282 * @wxcaps: write/execute capabilities
4283 *
4284 * Configures resource limits and capabilities for a physical or virtual
4285 * function.
4286 */
4287 int
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)4288 t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4289 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4290 unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi,
4291 unsigned int cmask, unsigned int pmask, unsigned int nexact,
4292 unsigned int rcaps, unsigned int wxcaps)
4293 {
4294 struct fw_pfvf_cmd c;
4295
4296 (void) memset(&c, 0, sizeof (c));
4297 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4298 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | V_FW_PFVF_CMD_VFN(vf));
4299 c.retval_len16 = htonl(FW_LEN16(c));
4300 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4301 V_FW_PFVF_CMD_NIQ(rxq));
4302 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4303 V_FW_PFVF_CMD_PMASK(pmask) | V_FW_PFVF_CMD_NEQ(txq));
4304 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4305 V_FW_PFVF_CMD_NEXACTF(nexact));
4306 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4307 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4308 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4309 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4310 }
4311
4312 /*
4313 * t4_alloc_vi - allocate a virtual interface
4314 * @adap: the adapter
4315 * @mbox: mailbox to use for the FW command
4316 * @port: physical port associated with the VI
4317 * @pf: the PF owning the VI
4318 * @vf: the VF owning the VI
4319 * @nmac: number of MAC addresses needed (1 to 5)
4320 * @mac: the MAC addresses of the VI
4321 * @rss_size: size of RSS table slice associated with this VI
4322 *
4323 * Allocates a virtual interface for the given physical port. If @mac is
4324 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4325 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4326 * stored consecutively so the space needed is @nmac * 6 bytes.
4327 * Returns a negative error number or the non-negative VI id.
4328 */
4329 int
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size)4330 t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4331 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4332 unsigned int *rss_size)
4333 {
4334 int ret;
4335 struct fw_vi_cmd c;
4336
4337 (void) memset(&c, 0, sizeof (c));
4338 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4339 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4340 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4341 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4342 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4343 c.nmac = nmac - 1;
4344
4345 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4346 if (ret != 0)
4347 return (ret);
4348
4349 if (mac != NULL) {
4350 (void) memcpy(mac, c.mac, sizeof (c.mac));
4351 switch (nmac) {
4352 case 5:
4353 (void) memcpy(mac + 24, c.nmac3, sizeof (c.nmac3));
4354 /* FALLTHRU */
4355 case 4:
4356 (void) memcpy(mac + 18, c.nmac2, sizeof (c.nmac2));
4357 /* FALLTHRU */
4358 case 3:
4359 (void) memcpy(mac + 12, c.nmac1, sizeof (c.nmac1));
4360 /* FALLTHRU */
4361 case 2:
4362 (void) memcpy(mac + 6, c.nmac0, sizeof (c.nmac0));
4363 }
4364 }
4365 if (rss_size != NULL)
4366 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4367 return (G_FW_VI_CMD_VIID(ntohs(c.type_to_viid)));
4368 }
4369
4370 /*
4371 * t4_free_vi - free a virtual interface
4372 * @adap: the adapter
4373 * @mbox: mailbox to use for the FW command
4374 * @pf: the PF owning the VI
4375 * @vf: the VF owning the VI
4376 * @viid: virtual interface identifiler
4377 *
4378 * Free a previously allocated virtual interface.
4379 */
4380 int
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)4381 t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4382 unsigned int vf, unsigned int viid)
4383 {
4384 struct fw_vi_cmd c;
4385
4386 (void) memset(&c, 0, sizeof (c));
4387 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4388 F_FW_CMD_REQUEST |
4389 F_FW_CMD_EXEC |
4390 V_FW_VI_CMD_PFN(pf) |
4391 V_FW_VI_CMD_VFN(vf));
4392 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4393 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4394
4395 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), &c));
4396 }
4397
4398 /*
4399 * t4_set_rxmode - set Rx properties of a virtual interface
4400 * @adap: the adapter
4401 * @mbox: mailbox to use for the FW command
4402 * @viid: the VI id
4403 * @mtu: the new MTU or -1
4404 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4405 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4406 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4407 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4408 * @sleep_ok: if true we may sleep while awaiting command completion
4409 *
4410 * Sets Rx properties of a virtual interface.
4411 */
4412 int
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)4413 t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4414 int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok)
4415 {
4416 struct fw_vi_rxmode_cmd c;
4417
4418 /* convert to FW values */
4419 if (mtu < 0)
4420 mtu = M_FW_VI_RXMODE_CMD_MTU;
4421 if (promisc < 0)
4422 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4423 if (all_multi < 0)
4424 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4425 if (bcast < 0)
4426 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4427 if (vlanex < 0)
4428 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4429
4430 (void) memset(&c, 0, sizeof (c));
4431 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4432 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4433 c.retval_len16 = htonl(FW_LEN16(c));
4434 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4435 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4436 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4437 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4438 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4439 return (t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), NULL, sleep_ok));
4440 }
4441
4442 /*
4443 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4444 * @adap: the adapter
4445 * @mbox: mailbox to use for the FW command
4446 * @viid: the VI id
4447 * @free: if true any existing filters for this VI id are first removed
4448 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4449 * @addr: the MAC address(es)
4450 * @idx: where to store the index of each allocated filter
4451 * @hash: pointer to hash address filter bitmap
4452 * @sleep_ok: call is allowed to sleep
4453 *
4454 * Allocates an exact-match filter for each of the supplied addresses and
4455 * sets it to the corresponding address. If @idx is not %NULL it should
4456 * have at least @naddr entries, each of which will be set to the index of
4457 * the filter allocated for the corresponding MAC address. If a filter
4458 * could not be allocated for an address its index is set to 0xffff.
4459 * If @hash is not %NULL addresses that fail to allocate an exact filter
4460 * are hashed and update the hash filter bitmap pointed at by @hash.
4461 *
4462 * Returns a negative error number or the number of filters allocated.
4463 */
4464 int
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)4465 t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid,
4466 bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash,
4467 bool sleep_ok)
4468 {
4469 int offset, ret = 0;
4470 struct fw_vi_mac_cmd c;
4471 unsigned int nfilters = 0;
4472 unsigned int rem = naddr;
4473
4474 if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
4475 return (-EINVAL);
4476
4477 for (offset = 0; offset < naddr; /* */) {
4478 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4479 ? rem : ARRAY_SIZE(c.u.exact));
4480 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4481 u.exact[fw_naddr]), 16);
4482 struct fw_vi_mac_exact *p;
4483 int i;
4484
4485 (void) memset(&c, 0, sizeof (c));
4486 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4487 F_FW_CMD_REQUEST |
4488 F_FW_CMD_WRITE |
4489 V_FW_CMD_EXEC(free) |
4490 V_FW_VI_MAC_CMD_VIID(viid));
4491 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4492 V_FW_CMD_LEN16(len16));
4493
4494 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4495 p->valid_to_idx = htons(
4496 F_FW_VI_MAC_CMD_VALID |
4497 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4498 (void) memcpy(p->macaddr, addr[offset+i],
4499 sizeof (p->macaddr));
4500 }
4501
4502 /*
4503 * It's okay if we run out of space in our MAC address arena.
4504 * Some of the addresses we submit may get stored so we need
4505 * to run through the reply to see what the results were ...
4506 */
4507 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), &c, sleep_ok);
4508 if (ret && ret != -FW_ENOMEM)
4509 break;
4510
4511 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4512 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4513
4514 if (idx != NULL)
4515 idx[offset+i] =
4516 (index >= NUM_MPS_CLS_SRAM_L_INSTANCES ?
4517 0xffff : index);
4518 if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
4519 nfilters++;
4520 else if (hash != NULL)
4521 *hash |=
4522 (1ULL << hash_mac_addr(addr[offset+i]));
4523 }
4524
4525 free = false;
4526 offset += fw_naddr;
4527 rem -= fw_naddr;
4528 }
4529
4530 if (ret == 0 || ret == -FW_ENOMEM)
4531 ret = nfilters;
4532 return (ret);
4533 }
4534
4535 /*
4536 * t4_change_mac - modifies the exact-match filter for a MAC address
4537 * @adap: the adapter
4538 * @mbox: mailbox to use for the FW command
4539 * @viid: the VI id
4540 * @idx: index of existing filter for old value of MAC address, or -1
4541 * @addr: the new MAC address value
4542 * @persist: whether a new MAC allocation should be persistent
4543 * @add_smt: if true also add the address to the HW SMT
4544 *
4545 * Modifies an exact-match filter and sets it to the new MAC address if
4546 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4547 * latter case the address is added persistently if @persist is %true.
4548 *
4549 * Note that in general it is not possible to modify the value of a given
4550 * filter so the generic way to modify an address filter is to free the one
4551 * being used by the old address value and allocate a new filter for the
4552 * new address value.
4553 *
4554 * Returns a negative error number or the index of the filter with the new
4555 * MAC value. Note that this index may differ from @idx.
4556 */
4557 int
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,bool add_smt)4558 t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4559 int idx, const u8 *addr, bool persist, bool add_smt)
4560 {
4561 int ret, mode;
4562 struct fw_vi_mac_cmd c;
4563 struct fw_vi_mac_exact *p = c.u.exact;
4564
4565 if (idx < 0) /* new allocation */
4566 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4567 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4568
4569 (void) memset(&c, 0, sizeof (c));
4570 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4571 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4572 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4573 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4574 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | V_FW_VI_MAC_CMD_IDX(idx));
4575 (void) memcpy(p->macaddr, addr, sizeof (p->macaddr));
4576
4577 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4578 if (ret == 0) {
4579 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4580 if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
4581 ret = -ENOMEM;
4582 }
4583 return (ret);
4584 }
4585
4586 /*
4587 * t4_set_addr_hash - program the MAC inexact-match hash filter
4588 * @adap: the adapter
4589 * @mbox: mailbox to use for the FW command
4590 * @viid: the VI id
4591 * @ucast: whether the hash filter should also match unicast addresses
4592 * @vec: the value to be written to the hash filter
4593 * @sleep_ok: call is allowed to sleep
4594 *
4595 * Sets the 64-bit inexact-match hash filter for a virtual interface.
4596 */
4597 int
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)4598 t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4599 bool ucast, u64 vec, bool sleep_ok)
4600 {
4601 struct fw_vi_mac_cmd c;
4602
4603 (void) memset(&c, 0, sizeof (c));
4604 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4605 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4606 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4607 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1));
4608 c.u.hash.hashvec = cpu_to_be64(vec);
4609 return (t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), NULL, sleep_ok));
4610 }
4611
4612 /*
4613 * t4_enable_vi - enable/disable a virtual interface
4614 * @adap: the adapter
4615 * @mbox: mailbox to use for the FW command
4616 * @viid: the VI id
4617 * @rx_en: 1=enable Rx, 0=disable Rx
4618 * @tx_en: 1=enable Tx, 0=disable Tx
4619 *
4620 * Enables/disables a virtual interface.
4621 */
4622 int
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)4623 t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4624 bool rx_en, bool tx_en)
4625 {
4626 struct fw_vi_enable_cmd c;
4627
4628 (void) memset(&c, 0, sizeof (c));
4629 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4630 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4631 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4632 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4633 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4634 }
4635
4636 /*
4637 * t4_identify_port - identify a VI's port by blinking its LED
4638 * @adap: the adapter
4639 * @mbox: mailbox to use for the FW command
4640 * @viid: the VI id
4641 * @nblinks: how many times to blink LED at 2.5 Hz
4642 *
4643 * Identifies a VI's port by blinking its LED.
4644 */
4645 int
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)4646 t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4647 unsigned int nblinks)
4648 {
4649 struct fw_vi_enable_cmd c;
4650
4651 (void) memset(&c, 0, sizeof (c));
4652 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4653 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4654 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4655 c.blinkdur = htons(nblinks);
4656 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4657 }
4658
4659 /*
4660 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4661 * @adap: the adapter
4662 * @mbox: mailbox to use for the FW command
4663 * @start: %true to enable the queues, %false to disable them
4664 * @pf: the PF owning the queues
4665 * @vf: the VF owning the queues
4666 * @iqid: ingress queue id
4667 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4668 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4669 *
4670 * Starts or stops an ingress queue and its associated FLs, if any.
4671 */
4672 int
t4_iq_start_stop(struct adapter * adap,unsigned int mbox,bool start,unsigned int pf,unsigned int vf,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)4673 t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4674 unsigned int pf, unsigned int vf, unsigned int iqid, unsigned int fl0id,
4675 unsigned int fl1id)
4676 {
4677 struct fw_iq_cmd c;
4678
4679 (void) memset(&c, 0, sizeof (c));
4680 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4681 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4682 V_FW_IQ_CMD_VFN(vf));
4683 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4684 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4685 c.iqid = htons(iqid);
4686 c.fl0id = htons(fl0id);
4687 c.fl1id = htons(fl1id);
4688 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4689 }
4690
4691 /*
4692 * t4_iq_free - free an ingress queue and its FLs
4693 * @adap: the adapter
4694 * @mbox: mailbox to use for the FW command
4695 * @pf: the PF owning the queues
4696 * @vf: the VF owning the queues
4697 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4698 * @iqid: ingress queue id
4699 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4700 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4701 *
4702 * Frees an ingress queue and its associated FLs, if any.
4703 */
4704 int
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)4705 t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4706 unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id,
4707 unsigned int fl1id)
4708 {
4709 struct fw_iq_cmd c;
4710
4711 (void) memset(&c, 0, sizeof (c));
4712 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4713 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4714 V_FW_IQ_CMD_VFN(vf));
4715 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4716 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4717 c.iqid = htons(iqid);
4718 c.fl0id = htons(fl0id);
4719 c.fl1id = htons(fl1id);
4720 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4721 }
4722
4723 /*
4724 * t4_eth_eq_free - free an Ethernet egress queue
4725 * @adap: the adapter
4726 * @mbox: mailbox to use for the FW command
4727 * @pf: the PF owning the queue
4728 * @vf: the VF owning the queue
4729 * @eqid: egress queue id
4730 *
4731 * Frees an Ethernet egress queue.
4732 */
4733 int
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)4734 t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4735 unsigned int vf, unsigned int eqid)
4736 {
4737 struct fw_eq_eth_cmd c;
4738
4739 (void) memset(&c, 0, sizeof (c));
4740 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4741 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4742 V_FW_EQ_ETH_CMD_VFN(vf));
4743 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4744 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4745 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4746 }
4747
4748 /*
4749 * t4_ctrl_eq_free - free a control egress queue
4750 * @adap: the adapter
4751 * @mbox: mailbox to use for the FW command
4752 * @pf: the PF owning the queue
4753 * @vf: the VF owning the queue
4754 * @eqid: egress queue id
4755 *
4756 * Frees a control egress queue.
4757 */
4758 int
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)4759 t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4760 unsigned int vf, unsigned int eqid)
4761 {
4762 struct fw_eq_ctrl_cmd c;
4763
4764 (void) memset(&c, 0, sizeof (c));
4765 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4766 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4767 V_FW_EQ_CTRL_CMD_VFN(vf));
4768 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4769 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4770 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4771 }
4772
4773 /*
4774 * t4_ofld_eq_free - free an offload egress queue
4775 * @adap: the adapter
4776 * @mbox: mailbox to use for the FW command
4777 * @pf: the PF owning the queue
4778 * @vf: the VF owning the queue
4779 * @eqid: egress queue id
4780 *
4781 * Frees a control egress queue.
4782 */
4783 int
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)4784 t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4785 unsigned int vf, unsigned int eqid)
4786 {
4787 struct fw_eq_ofld_cmd c;
4788
4789 (void) memset(&c, 0, sizeof (c));
4790 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4791 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
4792 V_FW_EQ_OFLD_CMD_VFN(vf));
4793 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
4794 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
4795 return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4796 }
4797
4798 /*
4799 * t4_handle_fw_rpl - process a FW reply message
4800 * @adap: the adapter
4801 * @rpl: start of the FW message
4802 *
4803 * Processes a FW message, such as link state change messages.
4804 */
4805 int
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)4806 t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4807 {
4808 u8 opcode = *(const u8 *)rpl;
4809
4810 if (opcode == FW_PORT_CMD) { /* link/module state change message */
4811 int i;
4812 const struct fw_port_cmd *p = (const void *)rpl;
4813 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
4814 struct port_info *pi = NULL;
4815 struct link_config *lc;
4816 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
4817 unsigned char link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
4818 unsigned char fc = 0;
4819 unsigned short speed = 0;
4820 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
4821
4822 if (stat & F_FW_PORT_CMD_RXPAUSE)
4823 fc |= PAUSE_RX;
4824 if (stat & F_FW_PORT_CMD_TXPAUSE)
4825 fc |= PAUSE_TX;
4826 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4827 speed = SPEED_100;
4828 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4829 speed = SPEED_1000;
4830 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4831 speed = SPEED_10000;
4832
4833 for_each_port(adap, i) {
4834 pi = adap2pinfo(adap, i);
4835 if (pi->tx_chan == chan)
4836 break;
4837 }
4838 lc = &pi->link_cfg;
4839 if (link_ok != lc->link_ok || speed != lc->speed ||
4840 fc != lc->fc) { /* something changed */
4841 lc->link_ok = link_ok;
4842 lc->speed = speed;
4843 lc->fc = fc;
4844 t4_os_link_changed(adap, i, link_ok);
4845 }
4846 if (mod != pi->mod_type) {
4847 /* LINTED: E_ASSIGN_NARROW_CONV */
4848 pi->mod_type = mod;
4849 t4_os_portmod_changed(adap, i);
4850 }
4851 }
4852 return (0);
4853 }
4854
4855 /*
4856 * get_pci_mode - determine a card's PCI mode
4857 * @adapter: the adapter
4858 * @p: where to store the PCI settings
4859 *
4860 * Determines a card's PCI mode and associated parameters, such as speed
4861 * and width.
4862 */
4863 static void __devinit
get_pci_mode(struct adapter * adapter,struct pci_params * p)4864 get_pci_mode(struct adapter *adapter, struct pci_params *p)
4865 {
4866 u16 val;
4867 u32 pcie_cap;
4868
4869 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4870 if (pcie_cap != NULL) {
4871 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
4872 p->speed = val & PCI_EXP_LNKSTA_CLS;
4873 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
4874 }
4875 }
4876
4877 /*
4878 * init_link_config - initialize a link's SW state
4879 * @lc: structure holding the link state
4880 * @caps: link capabilities
4881 *
4882 * Initializes the SW state maintained for each link, including the link's
4883 * capabilities and default speed/flow-control/autonegotiation settings.
4884 */
4885 static void __devinit
init_link_config(struct link_config * lc,unsigned short caps)4886 init_link_config(struct link_config *lc, unsigned short caps)
4887 {
4888 lc->supported = caps;
4889 lc->requested_speed = 0;
4890 lc->speed = 0;
4891 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4892 if (lc->supported & FW_PORT_CAP_ANEG) {
4893 lc->advertising = lc->supported & ADVERT_MASK;
4894 lc->autoneg = AUTONEG_ENABLE;
4895 lc->requested_fc |= PAUSE_AUTONEG;
4896 } else {
4897 lc->advertising = 0;
4898 lc->autoneg = AUTONEG_DISABLE;
4899 }
4900 }
4901
4902 static int __devinit
wait_dev_ready(struct adapter * adap)4903 wait_dev_ready(struct adapter *adap)
4904 {
4905 u32 whoami;
4906
4907 whoami = t4_read_reg(adap, A_PL_WHOAMI);
4908
4909 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4910 return (0);
4911
4912 msleep(500);
4913 whoami = t4_read_reg(adap, A_PL_WHOAMI);
4914 return ((whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS ?
4915 0 : -EIO));
4916 }
4917
4918 static int __devinit
get_flash_params(struct adapter * adapter)4919 get_flash_params(struct adapter *adapter)
4920 {
4921 int ret;
4922 u32 info = 0;
4923
4924 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4925 if (!ret)
4926 ret = sf1_read(adapter, 3, 0, 1, &info);
4927 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4928 if (ret < 0)
4929 return (ret);
4930
4931 if ((info & 0xff) != 0x20) /* not a Numonix flash */
4932 return (-EINVAL);
4933 info >>= 16; /* log2 of size */
4934 if (info >= 0x14 && info < 0x18)
4935 adapter->params.sf_nsec = 1 << (info - 16);
4936 else if (info == 0x18)
4937 adapter->params.sf_nsec = 64;
4938 else
4939 return (-EINVAL);
4940 adapter->params.sf_size = 1 << info;
4941 return (0);
4942 }
4943
4944 /*
4945 * t4_prep_adapter - prepare SW and HW for operation
4946 * @adapter: the adapter
4947 * @reset: if true perform a HW reset
4948 *
4949 * Initialize adapter SW state for the various HW modules, set initial
4950 * values for some adapter tunables, take PHYs out of reset, and
4951 * initialize the MDIO interface.
4952 */
4953 int __devinit
t4_prep_adapter(struct adapter * adapter)4954 t4_prep_adapter(struct adapter *adapter)
4955 {
4956 int ret;
4957
4958 ret = wait_dev_ready(adapter);
4959 if (ret < 0)
4960 return (ret);
4961
4962 get_pci_mode(adapter, &adapter->params.pci);
4963
4964 adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
4965 if (adapter->params.rev == 1) {
4966 CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
4967 return (-EINVAL);
4968 }
4969 adapter->params.pci.vpd_cap_addr =
4970 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4971
4972 ret = get_flash_params(adapter);
4973 if (ret < 0)
4974 return (ret);
4975
4976 ret = get_vpd_params(adapter, &adapter->params.vpd);
4977 if (ret < 0)
4978 return (ret);
4979
4980 if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff)
4981 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
4982 else
4983 adapter->params.cim_la_size = CIMLA_SIZE;
4984
4985 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4986
4987 /*
4988 * Default port for debugging in case we can't reach FW.
4989 */
4990 adapter->params.nports = 1;
4991 adapter->params.portvec = 1;
4992 adapter->params.vpd.cclk = 50000;
4993
4994 return (0);
4995 }
4996
4997 int __devinit
t4_port_init(struct port_info * p,int mbox,int pf,int vf)4998 t4_port_init(struct port_info *p, int mbox, int pf, int vf)
4999 {
5000 u8 addr[6];
5001 int ret, i, j;
5002 struct fw_port_cmd c;
5003 unsigned int rss_size;
5004 adapter_t *adap = p->adapter;
5005
5006 (void) memset(&c, 0, sizeof (c));
5007
5008 for (i = 0, j = -1; i <= p->port_id; i++) {
5009 do {
5010 j++;
5011 } while ((adap->params.portvec & (1 << j)) == 0);
5012 }
5013
5014 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5015 F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PORT_CMD_PORTID(j));
5016 c.action_to_len16 = htonl(
5017 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5018 FW_LEN16(c));
5019 ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
5020 if (ret != 0)
5021 return (ret);
5022
5023 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5024 if (ret < 0)
5025 return (ret);
5026
5027 p->viid = (uint16_t)ret;
5028 p->tx_chan = (uint8_t)j;
5029 p->lport = (uint8_t)j;
5030 p->rss_size = (uint16_t)rss_size;
5031 t4_os_set_hw_addr(adap, p->port_id, addr);
5032
5033 ret = ntohl(c.u.info.lstatus_to_modtype);
5034 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5035 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5036 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5037 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5038
5039 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5040
5041 return (0);
5042 }
5043