1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011, 2025 Chelsio Communications.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29 #ifndef __CHELSIO_COMMON_H
30 #define __CHELSIO_COMMON_H
31
32 #include "t4_hw.h"
33
34 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC0 | F_EDC0 | \
35 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
36 F_CPL_SWITCH | F_SGE | F_ULP_TX | F_SF)
37
38 #define GLBL_T7_INTR_MASK (F_CIM | F_MPS | F_PL | F_T7_PCIE | F_T7_MC0 | \
39 F_T7_EDC0 | F_T7_EDC1 | F_T7_LE | F_T7_TP | \
40 F_T7_MA | F_T7_PM_TX | F_T7_PM_RX | F_T7_ULP_RX | \
41 F_T7_CPL_SWITCH | F_T7_SGE | F_T7_ULP_TX | F_SF)
42
43 enum {
44 MAX_NPORTS = 4, /* max # of ports */
45 SERNUM_LEN = 24, /* Serial # length */
46 EC_LEN = 16, /* E/C length */
47 ID_LEN = 16, /* ID length */
48 PN_LEN = 16, /* Part Number length */
49 MD_LEN = 16, /* MFG diags version length */
50 MACADDR_LEN = 12, /* MAC Address length */
51 };
52
53 enum {
54 T4_REGMAP_SIZE = (160 * 1024),
55 T5_REGMAP_SIZE = (332 * 1024),
56 };
57
58 enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1, MEM_HMA };
59
60 enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
61
62 enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
63
64 enum {
65 PAUSE_RX = 1 << 0,
66 PAUSE_TX = 1 << 1,
67 PAUSE_AUTONEG = 1 << 2
68 };
69
70 enum {
71 /*
72 * Real FECs. In the same order as the FEC portion of caps32 so that
73 * the code can do (fec & M_FW_PORT_CAP32_FEC) to get all the real FECs.
74 */
75 FEC_RS = 1 << 0, /* Reed-Solomon */
76 FEC_BASER_RS = 1 << 1, /* BASE-R, aka Firecode */
77 FEC_NONE = 1 << 2, /* no FEC */
78
79 /*
80 * Pseudo FECs that translate to real FECs. The firmware knows nothing
81 * about these and they start at M_FW_PORT_CAP32_FEC + 1. AUTO should
82 * be set all by itself.
83 */
84 FEC_AUTO = 1 << 5,
85 FEC_MODULE = 1 << 6, /* FEC suggested by the cable/transceiver. */
86 };
87
88 enum {
89 ULP_T10DIF_ISCSI = 1 << 0,
90 ULP_T10DIF_FCOE = 1 << 1
91 };
92
93 enum {
94 ULP_CRYPTO_LOOKASIDE = 1 << 0,
95 ULP_CRYPTO_INLINE_TLS = 1 << 1,
96 ULP_CRYPTO_INLINE_IPSEC = 1 << 2,
97 ULP_CRYPTO_OFLD_OVER_IPSEC_INLINE = 1 << 4
98 };
99
100 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
101
102 struct port_stats {
103 u64 tx_octets; /* total # of octets in good frames */
104 u64 tx_frames; /* all good frames */
105 u64 tx_bcast_frames; /* all broadcast frames */
106 u64 tx_mcast_frames; /* all multicast frames */
107 u64 tx_ucast_frames; /* all unicast frames */
108 u64 tx_error_frames; /* all error frames */
109
110 u64 tx_frames_64; /* # of Tx frames in a particular range */
111 u64 tx_frames_65_127;
112 u64 tx_frames_128_255;
113 u64 tx_frames_256_511;
114 u64 tx_frames_512_1023;
115 u64 tx_frames_1024_1518;
116 u64 tx_frames_1519_max;
117
118 u64 tx_drop; /* # of dropped Tx frames */
119 u64 tx_pause; /* # of transmitted pause frames */
120 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
121 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
122 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
123 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
124 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
125 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
126 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
127 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
128
129 u64 rx_octets; /* total # of octets in good frames */
130 u64 rx_frames; /* all good frames */
131 u64 rx_bcast_frames; /* all broadcast frames */
132 u64 rx_mcast_frames; /* all multicast frames */
133 u64 rx_ucast_frames; /* all unicast frames */
134 u64 rx_too_long; /* # of frames exceeding MTU */
135 u64 rx_jabber; /* # of jabber frames */
136 u64 rx_fcs_err; /* # of received frames with bad FCS */
137 u64 rx_len_err; /* # of received frames with length error */
138 u64 rx_symbol_err; /* symbol errors */
139 u64 rx_runt; /* # of short frames */
140
141 u64 rx_frames_64; /* # of Rx frames in a particular range */
142 u64 rx_frames_65_127;
143 u64 rx_frames_128_255;
144 u64 rx_frames_256_511;
145 u64 rx_frames_512_1023;
146 u64 rx_frames_1024_1518;
147 u64 rx_frames_1519_max;
148
149 u64 rx_pause; /* # of received pause frames */
150 u64 rx_ppp0; /* # of received PPP prio 0 frames */
151 u64 rx_ppp1; /* # of received PPP prio 1 frames */
152 u64 rx_ppp2; /* # of received PPP prio 2 frames */
153 u64 rx_ppp3; /* # of received PPP prio 3 frames */
154 u64 rx_ppp4; /* # of received PPP prio 4 frames */
155 u64 rx_ppp5; /* # of received PPP prio 5 frames */
156 u64 rx_ppp6; /* # of received PPP prio 6 frames */
157 u64 rx_ppp7; /* # of received PPP prio 7 frames */
158
159 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
160 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
161 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
162 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
163 u64 rx_trunc0; /* buffer-group 0 truncated packets */
164 u64 rx_trunc1; /* buffer-group 1 truncated packets */
165 u64 rx_trunc2; /* buffer-group 2 truncated packets */
166 u64 rx_trunc3; /* buffer-group 3 truncated packets */
167 };
168
169 struct lb_port_stats {
170 u64 octets;
171 u64 frames;
172 u64 bcast_frames;
173 u64 mcast_frames;
174 u64 ucast_frames;
175 u64 error_frames;
176
177 u64 frames_64;
178 u64 frames_65_127;
179 u64 frames_128_255;
180 u64 frames_256_511;
181 u64 frames_512_1023;
182 u64 frames_1024_1518;
183 u64 frames_1519_max;
184
185 u64 drop;
186
187 u64 ovflow0;
188 u64 ovflow1;
189 u64 ovflow2;
190 u64 ovflow3;
191 u64 trunc0;
192 u64 trunc1;
193 u64 trunc2;
194 u64 trunc3;
195 };
196
197 struct tp_tcp_stats {
198 u32 tcp_out_rsts;
199 u64 tcp_in_segs;
200 u64 tcp_out_segs;
201 u64 tcp_retrans_segs;
202 };
203
204 struct tp_usm_stats {
205 u32 frames;
206 u32 drops;
207 u64 octets;
208 };
209
210 struct tp_tid_stats {
211 u32 del;
212 u32 inv;
213 u32 act;
214 u32 pas;
215 };
216
217 struct tp_fcoe_stats {
218 u32 frames_ddp;
219 u32 frames_drop;
220 u64 octets_ddp;
221 };
222
223 struct tp_err_stats {
224 u32 mac_in_errs[MAX_NCHAN];
225 u32 hdr_in_errs[MAX_NCHAN];
226 u32 tcp_in_errs[MAX_NCHAN];
227 u32 tnl_cong_drops[MAX_NCHAN];
228 u32 ofld_chan_drops[MAX_NCHAN];
229 u32 tnl_tx_drops[MAX_NCHAN];
230 u32 ofld_vlan_drops[MAX_NCHAN];
231 u32 tcp6_in_errs[MAX_NCHAN];
232 u32 ofld_no_neigh;
233 u32 ofld_cong_defer;
234 };
235
236 struct tp_tnl_stats {
237 u32 out_pkt[MAX_NCHAN];
238 u32 in_pkt[MAX_NCHAN];
239 };
240
241 struct tp_proxy_stats {
242 u32 proxy[MAX_NCHAN];
243 };
244
245 struct tp_cpl_stats {
246 u32 req[MAX_NCHAN];
247 u32 rsp[MAX_NCHAN];
248 };
249
250 struct tp_rdma_stats {
251 u32 rqe_dfr_pkt;
252 u32 rqe_dfr_mod;
253 u32 pkts_in[MAX_NCHAN];
254 u64 bytes_in[MAX_NCHAN];
255 /*
256 * When reading rdma stats, the address difference b/w RDMA_IN and
257 * RDMA_OUT is 4*u32, to read both at once, added padding
258 */
259 u32 padding[4];
260 u32 pkts_out[MAX_NCHAN];
261 u64 bytes_out[MAX_NCHAN];
262 };
263
264 struct sge_params {
265 int timer_val[SGE_NTIMERS]; /* final, scaled values */
266 int counter_val[SGE_NCOUNTERS];
267 int fl_starve_threshold;
268 int fl_starve_threshold2;
269 int page_shift;
270 int eq_s_qpp;
271 int iq_s_qpp;
272 int spg_len;
273 int pad_boundary;
274 int pack_boundary;
275 int fl_pktshift;
276 u32 sge_control;
277 u32 sge_fl_buffer_size[SGE_FLBUF_SIZES];
278 };
279
280 struct tp_params {
281 unsigned int tre; /* log2 of core clocks per TP tick */
282 unsigned int dack_re; /* DACK timer resolution */
283 unsigned int la_mask; /* what events are recorded by TP LA */
284
285 uint16_t filter_mode;
286 uint16_t filter_mask; /* Used by TOE and hashfilters */
287 int vnic_mode;
288 uint32_t max_rx_pdu;
289 uint32_t max_tx_pdu;
290 bool rx_pkt_encap;
291 uint8_t lb_mode;
292 uint8_t lb_nchan;
293
294 int8_t ipsecidx_shift;
295 int8_t fcoe_shift;
296 int8_t port_shift;
297 int8_t vnic_shift;
298 int8_t vlan_shift;
299 int8_t tos_shift;
300 int8_t protocol_shift;
301 int8_t ethertype_shift;
302 int8_t macmatch_shift;
303 int8_t matchtype_shift;
304 int8_t frag_shift;
305 int8_t roce_shift;
306 int8_t synonly_shift;
307 int8_t tcpflags_shift;
308 };
309
310 /* Use same modulation queue as the tx channel. */
311 #define TX_MODQ(tx_chan) (tx_chan)
312
313 struct vpd_params {
314 unsigned int cclk;
315 u8 ec[EC_LEN + 1];
316 u8 sn[SERNUM_LEN + 1];
317 u8 id[ID_LEN + 1];
318 u8 pn[PN_LEN + 1];
319 u8 na[MACADDR_LEN + 1];
320 u8 md[MD_LEN + 1];
321 };
322
323 /*
324 * Maximum resources provisioned for a PCI PF.
325 */
326 struct pf_resources {
327 unsigned int nvi; /* N virtual interfaces */
328 unsigned int neq; /* N egress Qs */
329 unsigned int nethctrl; /* N egress ETH or CTRL Qs */
330 unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
331 unsigned int niq; /* N ingress Qs */
332 unsigned int tc; /* PCI-E traffic class */
333 unsigned int pmask; /* port access rights mask */
334 unsigned int nexactf; /* N exact MPS filters */
335 unsigned int r_caps; /* read capabilities */
336 unsigned int wx_caps; /* write/execute capabilities */
337 };
338
339 struct pci_params {
340 unsigned int vpd_cap_addr;
341 unsigned int mps;
342 unsigned short speed;
343 unsigned short width;
344 };
345
346 /*
347 * Firmware device log.
348 */
349 struct devlog_params {
350 u32 memtype; /* which memory (FW_MEMTYPE_* ) */
351 u32 start; /* start of log in firmware memory */
352 u32 size; /* size of log */
353 u32 addr; /* start address in flat addr space */
354 };
355
356 /* Stores chip specific parameters */
357 struct chip_params {
358 u8 nchan;
359 u8 pm_stats_cnt;
360 u8 cng_ch_bits_log; /* congestion channel map bits width */
361 u8 nsched_cls;
362 u8 cim_num_ibq;
363 u8 cim_num_obq;
364 u8 filter_opt_len; /* number of bits for optional fields */
365 u8 filter_num_opt; /* number of optional fields */
366 u8 sge_ctxt_size;
367 u16 mps_rplc_size;
368 u16 vfcount;
369 u32 sge_fl_db;
370 u16 mps_tcam_size;
371 u16 rss_nentries;
372 u16 cim_la_size;
373 };
374
375 /* VF-only parameters. */
376
377 /*
378 * Global Receive Side Scaling (RSS) parameters in host-native format.
379 */
380 struct rss_params {
381 unsigned int mode; /* RSS mode */
382 union {
383 struct {
384 u_int synmapen:1; /* SYN Map Enable */
385 u_int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
386 u_int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
387 u_int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
388 u_int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
389 u_int ofdmapen:1; /* Offload Map Enable */
390 u_int tnlmapen:1; /* Tunnel Map Enable */
391 u_int tnlalllookup:1; /* Tunnel All Lookup */
392 u_int hashtoeplitz:1; /* use Toeplitz hash */
393 } basicvirtual;
394 } u;
395 };
396
397 /*
398 * Maximum resources provisioned for a PCI VF.
399 */
400 struct vf_resources {
401 unsigned int nvi; /* N virtual interfaces */
402 unsigned int neq; /* N egress Qs */
403 unsigned int nethctrl; /* N egress ETH or CTRL Qs */
404 unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
405 unsigned int niq; /* N ingress Qs */
406 unsigned int tc; /* PCI-E traffic class */
407 unsigned int pmask; /* port access rights mask */
408 unsigned int nexactf; /* N exact MPS filters */
409 unsigned int r_caps; /* read capabilities */
410 unsigned int wx_caps; /* write/execute capabilities */
411 };
412
413 struct adapter_params {
414 struct sge_params sge;
415 struct tp_params tp; /* PF-only */
416 struct vpd_params vpd;
417 struct pf_resources pfres; /* PF-only */
418 struct pci_params pci;
419 struct devlog_params devlog; /* PF-only */
420 struct rss_params rss; /* VF-only */
421 struct vf_resources vfres; /* VF-only */
422 unsigned int core_vdd;
423
424 unsigned int sf_size; /* serial flash size in bytes */
425 unsigned int sf_nsec; /* # of flash sectors */
426
427 unsigned int fw_vers; /* firmware version */
428 unsigned int bs_vers; /* bootstrap version */
429 unsigned int tp_vers; /* TP microcode version */
430 unsigned int er_vers; /* expansion ROM version */
431 unsigned int scfg_vers; /* Serial Configuration version */
432 unsigned int vpd_vers; /* VPD version */
433
434 unsigned short mtus[NMTUS];
435 unsigned short a_wnd[NCCTRL_WIN];
436 unsigned short b_wnd[NCCTRL_WIN];
437
438 unsigned int cim_la_size;
439
440 uint8_t nports; /* # of ethernet ports */
441 uint8_t portvec;
442 unsigned int chipid:4; /* chip ID. T4 = 4, T5 = 5, ... */
443 unsigned int rev:4; /* chip revision */
444 unsigned int fpga:1; /* this is an FPGA */
445 unsigned int offload:1; /* hw is TOE capable, fw has divvied up card
446 resources for TOE operation. */
447 unsigned int bypass:1; /* this is a bypass card */
448 unsigned int ethoffload:1;
449 unsigned int hash_filter:1;
450 unsigned int filter2_wr_support:1;
451 unsigned int port_caps32:1;
452 unsigned int smac_add_support:1;
453
454 unsigned int ofldq_wr_cred;
455 unsigned int eo_wr_cred;
456
457 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
458 unsigned int max_ird_adapter; /* Max read depth per adapter */
459
460 unsigned int nipsec_tunnel;
461 unsigned int nipsec_transport;
462 unsigned int nofld_ipsec_tunnel;
463
464 /* These values are for all ports (8b/port, upto 4 ports) */
465 uint32_t mps_bg_map; /* MPS rx buffer group map */
466 uint32_t tp_ch_map; /* TPCHMAP from firmware */
467 uint32_t tx_tp_ch_map; /* TX_TPCHMAP from firmware */
468
469 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
470 bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
471 bool dev_512sgl_mr; /* FW support for 512 SGL per FR MR */
472 bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
473 unsigned int max_pkts_per_eth_tx_pkts_wr;
474 uint8_t nsched_cls; /* # of usable sched classes per port */
475
476 uint8_t ncores;
477 uint32_t tid_qid_sel_mask; /* TID based QID selection mask */
478 };
479
480 #define CHELSIO_T4 0x4
481 #define CHELSIO_T5 0x5
482 #define CHELSIO_T6 0x6
483 #define CHELSIO_T7 0x7
484
485 /*
486 * State needed to monitor the forward progress of SGE Ingress DMA activities
487 * and possible hangs.
488 */
489 struct sge_idma_monitor_state {
490 unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */
491 unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */
492 unsigned int idma_state[2]; /* IDMA Hang detect state */
493 unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */
494 unsigned int idma_warn[2]; /* time to warning in HZ */
495 };
496
497 struct trace_params {
498 u32 data[TRACE_LEN / 4];
499 u32 mask[TRACE_LEN / 4];
500 unsigned short snap_len;
501 unsigned short min_len;
502 unsigned char skip_ofst;
503 unsigned char skip_len;
504 unsigned char invert;
505 unsigned char port;
506 };
507
508 struct link_config {
509 /* OS-specific code owns all the requested_* fields. */
510 int8_t requested_aneg; /* link autonegotiation */
511 int8_t requested_fc; /* flow control */
512 int8_t requested_fec; /* FEC */
513 int8_t force_fec; /* FORCE_FEC in L1_CFG32 command. */
514 u_int requested_speed; /* speed (Mbps) */
515 uint32_t requested_caps;/* rcap in last l1cfg issued by the driver. */
516
517 /* These are populated with information from the firmware. */
518 uint32_t pcaps; /* link capabilities */
519 uint32_t acaps; /* advertised capabilities */
520 uint32_t lpacaps; /* peer advertised capabilities */
521 u_int speed; /* actual link speed (Mbps) */
522 int8_t fc; /* actual link flow control */
523 int8_t fec_hint; /* cable/transceiver recommended fec */
524 int8_t fec; /* actual FEC */
525 bool link_ok; /* link up? */
526 uint8_t link_down_rc; /* link down reason */
527 };
528
529 #include "adapter.h"
530
531 #ifndef PCI_VENDOR_ID_CHELSIO
532 # define PCI_VENDOR_ID_CHELSIO 0x1425
533 #endif
534
535 #define for_each_port(adapter, iter) \
536 for (iter = 0; iter < (adapter)->params.nports; ++iter)
537
is_ftid(const struct adapter * sc,u_int tid)538 static inline int is_ftid(const struct adapter *sc, u_int tid)
539 {
540
541 return (sc->tids.nftids > 0 && tid >= sc->tids.ftid_base &&
542 tid <= sc->tids.ftid_end);
543 }
544
is_hpftid(const struct adapter * sc,u_int tid)545 static inline int is_hpftid(const struct adapter *sc, u_int tid)
546 {
547
548 return (sc->tids.nhpftids > 0 && tid >= sc->tids.hpftid_base &&
549 tid <= sc->tids.hpftid_end);
550 }
551
is_etid(const struct adapter * sc,u_int tid)552 static inline int is_etid(const struct adapter *sc, u_int tid)
553 {
554
555 return (sc->tids.netids > 0 && tid >= sc->tids.etid_base &&
556 tid <= sc->tids.etid_end);
557 }
558
is_offload(const struct adapter * adap)559 static inline int is_offload(const struct adapter *adap)
560 {
561 return adap->params.offload;
562 }
563
is_ethoffload(const struct adapter * adap)564 static inline int is_ethoffload(const struct adapter *adap)
565 {
566 return adap->params.ethoffload;
567 }
568
is_hashfilter(const struct adapter * adap)569 static inline int is_hashfilter(const struct adapter *adap)
570 {
571 return adap->params.hash_filter;
572 }
573
is_ktls(const struct adapter * adap)574 static inline int is_ktls(const struct adapter *adap)
575 {
576 return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW ||
577 adap->params.chipid == CHELSIO_T7;
578 }
579
chip_id(const struct adapter * adap)580 static inline int chip_id(const struct adapter *adap)
581 {
582 return adap->params.chipid;
583 }
584
chip_rev(struct adapter * adap)585 static inline int chip_rev(struct adapter *adap)
586 {
587 return adap->params.rev;
588 }
589
is_t4(struct adapter * adap)590 static inline int is_t4(struct adapter *adap)
591 {
592 return adap->params.chipid == CHELSIO_T4;
593 }
594
is_t5(struct adapter * adap)595 static inline int is_t5(struct adapter *adap)
596 {
597 return adap->params.chipid == CHELSIO_T5;
598 }
599
is_t6(struct adapter * adap)600 static inline int is_t6(struct adapter *adap)
601 {
602 return adap->params.chipid == CHELSIO_T6;
603 }
604
is_t7(struct adapter * adap)605 static inline int is_t7(struct adapter *adap)
606 {
607 return adap->params.chipid == CHELSIO_T7;
608 }
609
is_fpga(struct adapter * adap)610 static inline int is_fpga(struct adapter *adap)
611 {
612 return adap->params.fpga;
613 }
614
core_ticks_per_usec(const struct adapter * adap)615 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
616 {
617 return adap->params.vpd.cclk / 1000;
618 }
619
us_to_core_ticks(const struct adapter * adap,unsigned int us)620 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
621 unsigned int us)
622 {
623 return (us * adap->params.vpd.cclk) / 1000;
624 }
625
core_ticks_to_us(const struct adapter * adapter,unsigned int ticks)626 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
627 unsigned int ticks)
628 {
629 /* add Core Clock / 2 to round ticks to nearest uS */
630 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
631 adapter->params.vpd.cclk);
632 }
633
dack_ticks_to_usec(const struct adapter * adap,unsigned int ticks)634 static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
635 unsigned int ticks)
636 {
637 return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
638 }
639
us_to_tcp_ticks(const struct adapter * adap,u_long us)640 static inline u_int us_to_tcp_ticks(const struct adapter *adap, u_long us)
641 {
642
643 return (us * adap->params.vpd.cclk / 1000 >> adap->params.tp.tre);
644 }
645
tcp_ticks_to_us(const struct adapter * adap,u_int ticks)646 static inline u_int tcp_ticks_to_us(const struct adapter *adap, u_int ticks)
647 {
648 return ((uint64_t)ticks << adap->params.tp.tre) /
649 core_ticks_per_usec(adap);
650 }
651
652 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val);
653
654 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
655 int size, void *rpl, bool sleep_ok, int timeout);
656 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
657 void *rpl, bool sleep_ok);
658 void t4_report_fw_error(struct adapter *adap);
659
t4_wr_mbox_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,int timeout)660 static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
661 const void *cmd, int size, void *rpl,
662 int timeout)
663 {
664 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
665 timeout);
666 }
667
t4_wr_mbox(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)668 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
669 int size, void *rpl)
670 {
671 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
672 }
673
t4_wr_mbox_ns(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl)674 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
675 int size, void *rpl)
676 {
677 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
678 }
679
680 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
681 unsigned int data_reg, u32 *vals, unsigned int nregs,
682 unsigned int start_idx);
683 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
684 unsigned int data_reg, const u32 *vals,
685 unsigned int nregs, unsigned int start_idx);
686
687 u32 t4_hw_pci_read_cfg4(adapter_t *adapter, int reg);
688
689 struct fw_filter_wr;
690
691 void t4_intr_clear(struct adapter *adapter);
692 void t4_intr_enable(struct adapter *adapter);
693 void t4_intr_disable(struct adapter *adapter);
694 bool t4_slow_intr_handler(struct adapter *adapter, int flags);
695
696 int t4_hash_mac_addr(const u8 *addr);
697 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
698 struct link_config *lc);
699 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
700 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
701 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
702 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
703 int t4_seeprom_wp(struct adapter *adapter, int enable);
704 int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords,
705 u32 *data, int byte_oriented);
706 int t4_write_flash(struct adapter *adapter, unsigned int addr,
707 unsigned int n, const u8 *data, int byte_oriented);
708 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
709 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
710 int t5_fw_init_extern_mem(struct adapter *adap);
711 int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
712 int t4_load_boot(struct adapter *adap, u8 *boot_data,
713 unsigned int boot_addr, unsigned int size);
714 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end);
715 int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp);
716 int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
717 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
718 int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr);
719 int t4_get_bs_version(struct adapter *adapter, u32 *vers);
720 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
721 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
722 int t4_get_scfg_version(struct adapter *adapter, u32 *vers);
723 int t4_get_vpd_version(struct adapter *adapter, u32 *vers);
724 int t4_get_version_info(struct adapter *adapter);
725 int t4_init_hw(struct adapter *adapter, u32 fw_params);
726 const struct chip_params *t4_get_chip_params(int chipid);
727 int t4_prep_adapter(struct adapter *adapter, u32 *buf);
728 int t4_shutdown_adapter(struct adapter *adapter);
729 int t4_init_devlog_ncores_params(struct adapter *adapter, int fw_attach);
730 int t4_init_sge_params(struct adapter *adapter);
731 int t4_init_tp_params(struct adapter *adap);
732 int t4_filter_field_width(const struct adapter *adap, int filter_field);
733 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
734 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id);
735 void t4_fatal_err(struct adapter *adapter, bool fw_error);
736 int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
737 int filter_index, int enable);
738 void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
739 int filter_index, int *enabled);
740 void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid);
741 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
742 int start, int n, const u16 *rspq, unsigned int nrspq);
743 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
744 unsigned int flags);
745 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
746 unsigned int flags, unsigned int defq, unsigned int skeyidx,
747 unsigned int skey);
748 int t4_read_rss(struct adapter *adapter, u16 *entries);
749 void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok);
750 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
751 bool sleep_ok);
752 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
753 u32 *valp, bool sleep_ok);
754 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
755 u32 val, bool sleep_ok);
756 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
757 u32 *vfl, u32 *vfh, bool sleep_ok);
758 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
759 u32 vfl, u32 vfh, bool sleep_ok);
760 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok);
761 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok);
762 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok);
763 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok);
764 int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
765 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
766 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
767 void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[]);
768 void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
769 u16 *size, u16 *thres);
770 int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
771 size_t n);
772 int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
773 size_t n);
774 int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
775 unsigned int addr, unsigned int n,
776 unsigned int *valp);
777 int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
778 unsigned int addr, unsigned int n,
779 const unsigned int *valp);
780 int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
781 u32 *wrptr);
782 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
783 unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
784 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
785
t4_read_cimq_cfg(struct adapter * adap,u16 * base,u16 * size,u16 * thres)786 static inline void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size,
787 u16 *thres)
788 {
789 t4_read_cimq_cfg_core(adap, 0, base, size, thres);
790 }
791
t4_read_cim_ibq(struct adapter * adap,u32 qid,u32 * data,size_t n)792 static inline int t4_read_cim_ibq(struct adapter *adap, u32 qid, u32 *data,
793 size_t n)
794 {
795 return t4_read_cim_ibq_core(adap, 0, qid, data, n);
796 }
797
t4_read_cim_obq(struct adapter * adap,u32 qid,u32 * data,size_t n)798 static inline int t4_read_cim_obq(struct adapter *adap, u32 qid, u32 *data,
799 size_t n)
800 {
801 return t4_read_cim_obq_core(adap, 0, qid, data, n);
802 }
803
t4_cim_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)804 static inline int t4_cim_read(struct adapter *adap, unsigned int addr,
805 unsigned int n, unsigned int *valp)
806 {
807 return t4_cim_read_core(adap, 0, 0, addr, n, valp);
808 }
809
t4_cim_write(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)810 static inline int t4_cim_write(struct adapter *adap, unsigned int addr,
811 unsigned int n, unsigned int *valp)
812 {
813 return t4_cim_write_core(adap, 0, 0, addr, n, valp);
814 }
815
t4_cim_read_la(struct adapter * adap,u32 * la_buf,u32 * wrptr)816 static inline int t4_cim_read_la(struct adapter *adap, u32 *la_buf, u32 *wrptr)
817 {
818 return t4_cim_read_la_core(adap, 0, la_buf, wrptr);
819 }
820
821 int t4_get_flash_params(struct adapter *adapter);
822
823 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach);
824 int t4_mc_read(struct adapter *adap, int idx, u32 addr,
825 __be32 *data, u64 *parity);
826 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity);
827 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size,
828 __be32 *data);
829 void t4_idma_monitor_init(struct adapter *adapter,
830 struct sge_idma_monitor_state *idma);
831 void t4_idma_monitor(struct adapter *adapter,
832 struct sge_idma_monitor_state *idma,
833 int hz, int ticks);
834 int t4_set_vf_mac(struct adapter *adapter, unsigned int pf, unsigned int vf,
835 unsigned int naddr, u8 *addr);
836
837 unsigned int t4_get_regs_len(struct adapter *adapter);
838 void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size);
839
840 u32 t4_port_reg(struct adapter *adap, u8 port, u32 reg);
841 const char *t4_get_port_type_description(enum fw_port_type port_type);
842 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
843 void t4_get_port_stats_offset(struct adapter *adap, int idx,
844 struct port_stats *stats,
845 struct port_stats *offset);
846 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
847 void t4_clr_port_stats(struct adapter *adap, int idx);
848
849 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
850 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
851 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
852 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
853 unsigned int *ipg, bool sleep_ok);
854 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
855 unsigned int mask, unsigned int val);
856 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
857 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
858 bool sleep_ok);
859 void t4_tp_get_tnl_stats(struct adapter *adap, struct tp_tnl_stats *st,
860 bool sleep_ok);
861 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
862 bool sleep_ok);
863 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
864 bool sleep_ok);
865 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
866 bool sleep_ok);
867 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
868 bool sleep_ok);
869 void t4_tp_get_tid_stats(struct adapter *adap, struct tp_tid_stats *st,
870 bool sleep_ok);
871 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
872 struct tp_tcp_stats *v6, bool sleep_ok);
873 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
874 struct tp_fcoe_stats *st, bool sleep_ok);
875 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
876 const unsigned short *alpha, const unsigned short *beta);
877
878 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
879
880 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps);
881 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg);
882 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
883 unsigned int start, unsigned int n);
884 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
885 int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode);
886 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
887
888 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr);
889 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
890 u64 mask0, u64 mask1, unsigned int crc, bool enable);
891
892 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
893 enum dev_master master, enum dev_state *state);
894 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
895 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
896 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
897 int t4_fw_restart(struct adapter *adap, unsigned int mbox);
898 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
899 const u8 *fw_data, unsigned int size, int force);
900 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
901 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
902 unsigned int vf, unsigned int nparams, const u32 *params,
903 u32 *val);
904 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
905 unsigned int vf, unsigned int nparams, const u32 *params,
906 u32 *val, int rw);
907 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
908 unsigned int pf, unsigned int vf,
909 unsigned int nparams, const u32 *params,
910 const u32 *val, int timeout);
911 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
912 unsigned int vf, unsigned int nparams, const u32 *params,
913 const u32 *val);
914 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
915 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
916 unsigned int rxqi, unsigned int rxq, unsigned int tc,
917 unsigned int vi, unsigned int cmask, unsigned int pmask,
918 unsigned int exactf, unsigned int rcaps, unsigned int wxcaps);
919 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
920 unsigned int port, unsigned int pf, unsigned int vf,
921 unsigned int nmac, u8 *mac, u16 *rss_size,
922 uint8_t *vfvld, uint16_t *vin,
923 unsigned int portfunc, unsigned int idstype);
924 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
925 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
926 u16 *rss_size, uint8_t *vfvld, uint16_t *vin);
927 int t4_free_vi(struct adapter *adap, unsigned int mbox,
928 unsigned int pf, unsigned int vf,
929 unsigned int viid);
930 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
931 int mtu, int promisc, int all_multi, int bcast, int vlanex,
932 bool sleep_ok);
933 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid,
934 bool free, unsigned int naddr, const u8 **addr, u16 *idx,
935 u64 *hash, bool sleep_ok);
936 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
937 unsigned int viid, unsigned int naddr,
938 const u8 **addr, bool sleep_ok);
939 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
940 int idx, bool sleep_ok);
941 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
942 const u8 *addr, const u8 *mask, unsigned int idx,
943 u8 lookup_type, u8 port_id, bool sleep_ok);
944 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
945 const u8 *addr, const u8 *mask, unsigned int idx,
946 u8 lookup_type, u8 port_id, bool sleep_ok);
947 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
948 const u8 *addr, const u8 *mask, unsigned int vni,
949 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
950 bool sleep_ok);
951 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
952 int idx, const u8 *addr, bool persist, uint16_t *smt_idx);
953 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
954 const u8 *addr, bool smac);
955 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
956 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac);
957 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
958 bool ucast, u64 vec, bool sleep_ok);
959 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
960 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
961 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
962 bool rx_en, bool tx_en);
963 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
964 unsigned int nblinks);
965 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
966 unsigned int mmd, unsigned int reg, unsigned int *valp);
967 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
968 unsigned int mmd, unsigned int reg, unsigned int val);
969 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
970 int port, unsigned int devid,
971 unsigned int offset, unsigned int len,
972 u8 *buf, bool write);
973 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
974 int port, unsigned int devid,
975 unsigned int offset, unsigned int len,
976 u8 *buf);
977 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
978 int port, unsigned int devid,
979 unsigned int offset, unsigned int len,
980 u8 *buf);
981 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
982 unsigned int vf, unsigned int iqtype, unsigned int iqid,
983 unsigned int fl0id, unsigned int fl1id);
984 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
985 unsigned int vf, unsigned int iqtype, unsigned int iqid,
986 unsigned int fl0id, unsigned int fl1id);
987 int t4_eth_eq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
988 unsigned int vf, unsigned int eqid);
989 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
990 unsigned int vf, unsigned int eqid);
991 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
992 unsigned int vf, unsigned int eqid);
993 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
994 unsigned int vf, unsigned int eqid);
995 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
996 enum ctxt_type ctype, u32 *data);
997 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
998 u32 *data);
999 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
1000 const char *t4_link_down_rc_str(unsigned char link_down_rc);
1001 int t4_update_port_info(struct port_info *pi);
1002 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
1003 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val);
1004 int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
1005 int sleep_ok);
1006 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
1007 int rateunit, int ratemode, int channel, int cl,
1008 int minrate, int maxrate, int weight, int pktsize,
1009 int burstsize, int sleep_ok);
1010 int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
1011 unsigned int maxrate, int sleep_ok);
1012 int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
1013 int weight, int sleep_ok);
1014 int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
1015 int mode, unsigned int maxrate, int pktsize,
1016 int sleep_ok);
1017 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
1018 unsigned int pf, unsigned int vf,
1019 unsigned int timeout, unsigned int action);
1020 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level);
1021 int t4_set_devlog_level(struct adapter *adapter, unsigned int level);
1022 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1023
1024 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
1025 u32 start_index, bool sleep_ok);
1026 void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
1027 u32 start_index, bool sleep_ok);
1028 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
1029 u32 start_index, bool sleep_ok);
1030 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs,
1031 u32 start_index, bool sleep_ok);
1032 int t4_configure_ringbb(struct adapter *adap);
1033 int t4_configure_add_smac(struct adapter *adap);
1034 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
1035 u16 vlan);
1036 int t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
1037 unsigned int *lenp);
1038
t4vf_query_params(struct adapter * adapter,unsigned int nparams,const u32 * params,u32 * vals)1039 static inline int t4vf_query_params(struct adapter *adapter,
1040 unsigned int nparams, const u32 *params,
1041 u32 *vals)
1042 {
1043 return t4_query_params(adapter, 0, 0, 0, nparams, params, vals);
1044 }
1045
t4vf_set_params(struct adapter * adapter,unsigned int nparams,const u32 * params,const u32 * vals)1046 static inline int t4vf_set_params(struct adapter *adapter,
1047 unsigned int nparams, const u32 *params,
1048 const u32 *vals)
1049 {
1050 return t4_set_params(adapter, 0, 0, 0, nparams, params, vals);
1051 }
1052
t4vf_wr_mbox(struct adapter * adap,const void * cmd,int size,void * rpl)1053 static inline int t4vf_wr_mbox(struct adapter *adap, const void *cmd,
1054 int size, void *rpl)
1055 {
1056 return t4_wr_mbox(adap, adap->mbox, cmd, size, rpl);
1057 }
1058
1059 int t4vf_wait_dev_ready(struct adapter *adapter);
1060 int t4vf_fw_reset(struct adapter *adapter);
1061 int t4vf_get_sge_params(struct adapter *adapter);
1062 int t4vf_get_rss_glb_config(struct adapter *adapter);
1063 int t4vf_get_vfres(struct adapter *adapter);
1064 int t4vf_prep_adapter(struct adapter *adapter);
1065 int t4vf_get_vf_mac(struct adapter *adapter, unsigned int port,
1066 unsigned int *naddr, u8 *addr);
1067 int t4vf_get_vf_vlan(struct adapter *adapter);
1068 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
1069 enum t4_bar2_qtype qtype, int user, u64 *pbar2_qoffset,
1070 unsigned int *pbar2_qid);
1071 unsigned int fwcap_to_speed(uint32_t caps);
1072 uint32_t speed_to_fwcap(unsigned int speed);
1073 uint32_t fwcap_top_speed(uint32_t caps);
1074
1075 static inline int
port_top_speed(const struct port_info * pi)1076 port_top_speed(const struct port_info *pi)
1077 {
1078
1079 /* Mbps -> Gbps */
1080 return (fwcap_to_speed(pi->link_cfg.pcaps) / 1000);
1081 }
1082
1083 /* SET_TCB_FIELD sent as a ULP command looks like this */
1084 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1085 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1086
1087 static inline void *
mk_set_tcb_field_ulp_with_rpl(struct adapter * sc,void * cur,int tid,uint16_t word,uint64_t mask,uint64_t val,const int qid)1088 mk_set_tcb_field_ulp_with_rpl(struct adapter *sc, void *cur, int tid,
1089 uint16_t word, uint64_t mask, uint64_t val, const int qid)
1090 {
1091 struct ulp_txpkt *ulpmc;
1092 struct ulptx_idata *ulpsc;
1093 struct cpl_set_tcb_field_core *req;
1094
1095 MPASS(((uintptr_t)cur & 7) == 0);
1096
1097 ulpmc = cur;
1098 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
1099 V_ULP_TXPKT_DEST(ULP_TXPKT_DEST_TP));
1100 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1101
1102 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1103 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1104 ulpsc->len = htobe32(sizeof(*req));
1105
1106 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1107 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1108
1109 if (qid == -1) {
1110 req->reply_ctrl = htobe16(F_NO_REPLY);
1111 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1112 } else {
1113 if (chip_id(sc) >= CHELSIO_T7) {
1114 req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
1115 V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
1116 } else {
1117 req->reply_ctrl = htobe16(V_QUEUENO(qid) |
1118 V_REPLY_CHAN(0) | V_NO_REPLY(0));
1119 }
1120 req->word_cookie = htobe16(V_WORD(word) |
1121 V_COOKIE(CPL_COOKIE_TOM));
1122 }
1123 req->mask = htobe64(mask);
1124 req->val = htobe64(val);
1125
1126 /*
1127 * ULP_TX is an 8B processor but the firmware transfers WRs in 16B
1128 * chunks. The master command for set_tcb_field does not end at a 16B
1129 * boundary so it needs to be padded with a no-op.
1130 */
1131 MPASS((LEN__SET_TCB_FIELD_ULP & 0xf) != 0);
1132 ulpsc = (struct ulptx_idata *)(req + 1);
1133 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1134 ulpsc->len = htobe32(0);
1135
1136 return (ulpsc + 1);
1137 }
1138
1139 static inline void *
mk_set_tcb_field_ulp(struct adapter * sc,void * cur,int tid,uint16_t word,uint64_t mask,uint64_t val)1140 mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
1141 uint64_t mask, uint64_t val)
1142 {
1143 return (mk_set_tcb_field_ulp_with_rpl(sc, cur, tid, word, mask, val, -1));
1144 }
1145 #endif /* __CHELSIO_COMMON_H */
1146