1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #ifndef _QED_IF_H
8 #define _QED_IF_H
9
10 #include <linux/ethtool.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/skbuff.h>
16 #include <asm/byteorder.h>
17 #include <linux/io.h>
18 #include <linux/compiler.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/qed/common_hsi.h>
23 #include <linux/qed/qed_chain.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
25 #include <net/devlink.h>
26
27 #define QED_TX_SWS_TIMER_DFLT 500
28 #define QED_TWO_MSL_TIMER_DFLT 4000
29
30 enum dcbx_protocol_type {
31 DCBX_PROTOCOL_ISCSI,
32 DCBX_PROTOCOL_FCOE,
33 DCBX_PROTOCOL_ROCE,
34 DCBX_PROTOCOL_ROCE_V2,
35 DCBX_PROTOCOL_ETH,
36 DCBX_MAX_PROTOCOL_TYPE
37 };
38
39 #define QED_ROCE_PROTOCOL_INDEX (3)
40
41 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
42 #define QED_LLDP_PORT_ID_STAT_LEN 4
43 #define QED_DCBX_MAX_APP_PROTOCOL 32
44 #define QED_MAX_PFC_PRIORITIES 8
45 #define QED_DCBX_DSCP_SIZE 64
46
47 struct qed_dcbx_lldp_remote {
48 u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
49 u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
50 bool enable_rx;
51 bool enable_tx;
52 u32 tx_interval;
53 u32 max_credit;
54 };
55
56 struct qed_dcbx_lldp_local {
57 u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
58 u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
59 };
60
61 struct qed_dcbx_app_prio {
62 u8 roce;
63 u8 roce_v2;
64 u8 fcoe;
65 u8 iscsi;
66 u8 eth;
67 };
68
69 struct qed_dbcx_pfc_params {
70 bool willing;
71 bool enabled;
72 u8 prio[QED_MAX_PFC_PRIORITIES];
73 u8 max_tc;
74 };
75
76 enum qed_dcbx_sf_ieee_type {
77 QED_DCBX_SF_IEEE_ETHTYPE,
78 QED_DCBX_SF_IEEE_TCP_PORT,
79 QED_DCBX_SF_IEEE_UDP_PORT,
80 QED_DCBX_SF_IEEE_TCP_UDP_PORT
81 };
82
83 struct qed_app_entry {
84 bool ethtype;
85 enum qed_dcbx_sf_ieee_type sf_ieee;
86 bool enabled;
87 u8 prio;
88 u16 proto_id;
89 enum dcbx_protocol_type proto_type;
90 };
91
92 struct qed_dcbx_params {
93 struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
94 u16 num_app_entries;
95 bool app_willing;
96 bool app_valid;
97 bool app_error;
98 bool ets_willing;
99 bool ets_enabled;
100 bool ets_cbs;
101 bool valid;
102 u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
103 u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
104 u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
105 struct qed_dbcx_pfc_params pfc;
106 u8 max_ets_tc;
107 };
108
109 struct qed_dcbx_admin_params {
110 struct qed_dcbx_params params;
111 bool valid;
112 };
113
114 struct qed_dcbx_remote_params {
115 struct qed_dcbx_params params;
116 bool valid;
117 };
118
119 struct qed_dcbx_operational_params {
120 struct qed_dcbx_app_prio app_prio;
121 struct qed_dcbx_params params;
122 bool valid;
123 bool enabled;
124 bool ieee;
125 bool cee;
126 bool local;
127 u32 err;
128 };
129
130 struct qed_dcbx_get {
131 struct qed_dcbx_operational_params operational;
132 struct qed_dcbx_lldp_remote lldp_remote;
133 struct qed_dcbx_lldp_local lldp_local;
134 struct qed_dcbx_remote_params remote;
135 struct qed_dcbx_admin_params local;
136 };
137
138 enum qed_nvm_images {
139 QED_NVM_IMAGE_ISCSI_CFG,
140 QED_NVM_IMAGE_FCOE_CFG,
141 QED_NVM_IMAGE_MDUMP,
142 QED_NVM_IMAGE_NVM_CFG1,
143 QED_NVM_IMAGE_DEFAULT_CFG,
144 QED_NVM_IMAGE_NVM_META,
145 };
146
147 struct qed_link_eee_params {
148 u32 tx_lpi_timer;
149 #define QED_EEE_1G_ADV BIT(0)
150 #define QED_EEE_10G_ADV BIT(1)
151
152 /* Capabilities are represented using QED_EEE_*_ADV values */
153 u8 adv_caps;
154 u8 lp_adv_caps;
155 bool enable;
156 bool tx_lpi_enable;
157 };
158
159 enum qed_led_mode {
160 QED_LED_MODE_OFF,
161 QED_LED_MODE_ON,
162 QED_LED_MODE_RESTORE
163 };
164
165 struct qed_mfw_tlv_eth {
166 u16 lso_maxoff_size;
167 bool lso_maxoff_size_set;
168 u16 lso_minseg_size;
169 bool lso_minseg_size_set;
170 u8 prom_mode;
171 bool prom_mode_set;
172 u16 tx_descr_size;
173 bool tx_descr_size_set;
174 u16 rx_descr_size;
175 bool rx_descr_size_set;
176 u16 netq_count;
177 bool netq_count_set;
178 u32 tcp4_offloads;
179 bool tcp4_offloads_set;
180 u32 tcp6_offloads;
181 bool tcp6_offloads_set;
182 u16 tx_descr_qdepth;
183 bool tx_descr_qdepth_set;
184 u16 rx_descr_qdepth;
185 bool rx_descr_qdepth_set;
186 u8 iov_offload;
187 #define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
188 #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
189 #define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
190 #define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
191 bool iov_offload_set;
192 u8 txqs_empty;
193 bool txqs_empty_set;
194 u8 rxqs_empty;
195 bool rxqs_empty_set;
196 u8 num_txqs_full;
197 bool num_txqs_full_set;
198 u8 num_rxqs_full;
199 bool num_rxqs_full_set;
200 };
201
202 #define QED_MFW_TLV_TIME_SIZE 14
203 struct qed_mfw_tlv_time {
204 bool b_set;
205 u8 month;
206 u8 day;
207 u8 hour;
208 u8 min;
209 u16 msec;
210 u16 usec;
211 };
212
213 struct qed_mfw_tlv_fcoe {
214 u8 scsi_timeout;
215 bool scsi_timeout_set;
216 u32 rt_tov;
217 bool rt_tov_set;
218 u32 ra_tov;
219 bool ra_tov_set;
220 u32 ed_tov;
221 bool ed_tov_set;
222 u32 cr_tov;
223 bool cr_tov_set;
224 u8 boot_type;
225 bool boot_type_set;
226 u8 npiv_state;
227 bool npiv_state_set;
228 u32 num_npiv_ids;
229 bool num_npiv_ids_set;
230 u8 switch_name[8];
231 bool switch_name_set;
232 u16 switch_portnum;
233 bool switch_portnum_set;
234 u8 switch_portid[3];
235 bool switch_portid_set;
236 u8 vendor_name[8];
237 bool vendor_name_set;
238 u8 switch_model[8];
239 bool switch_model_set;
240 u8 switch_fw_version[8];
241 bool switch_fw_version_set;
242 u8 qos_pri;
243 bool qos_pri_set;
244 u8 port_alias[3];
245 bool port_alias_set;
246 u8 port_state;
247 #define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
248 #define QED_MFW_TLV_PORT_STATE_LOOP (1)
249 #define QED_MFW_TLV_PORT_STATE_P2P (2)
250 #define QED_MFW_TLV_PORT_STATE_FABRIC (3)
251 bool port_state_set;
252 u16 fip_tx_descr_size;
253 bool fip_tx_descr_size_set;
254 u16 fip_rx_descr_size;
255 bool fip_rx_descr_size_set;
256 u16 link_failures;
257 bool link_failures_set;
258 u8 fcoe_boot_progress;
259 bool fcoe_boot_progress_set;
260 u64 rx_bcast;
261 bool rx_bcast_set;
262 u64 tx_bcast;
263 bool tx_bcast_set;
264 u16 fcoe_txq_depth;
265 bool fcoe_txq_depth_set;
266 u16 fcoe_rxq_depth;
267 bool fcoe_rxq_depth_set;
268 u64 fcoe_rx_frames;
269 bool fcoe_rx_frames_set;
270 u64 fcoe_rx_bytes;
271 bool fcoe_rx_bytes_set;
272 u64 fcoe_tx_frames;
273 bool fcoe_tx_frames_set;
274 u64 fcoe_tx_bytes;
275 bool fcoe_tx_bytes_set;
276 u16 crc_count;
277 bool crc_count_set;
278 u32 crc_err_src_fcid[5];
279 bool crc_err_src_fcid_set[5];
280 struct qed_mfw_tlv_time crc_err[5];
281 u16 losync_err;
282 bool losync_err_set;
283 u16 losig_err;
284 bool losig_err_set;
285 u16 primtive_err;
286 bool primtive_err_set;
287 u16 disparity_err;
288 bool disparity_err_set;
289 u16 code_violation_err;
290 bool code_violation_err_set;
291 u32 flogi_param[4];
292 bool flogi_param_set[4];
293 struct qed_mfw_tlv_time flogi_tstamp;
294 u32 flogi_acc_param[4];
295 bool flogi_acc_param_set[4];
296 struct qed_mfw_tlv_time flogi_acc_tstamp;
297 u32 flogi_rjt;
298 bool flogi_rjt_set;
299 struct qed_mfw_tlv_time flogi_rjt_tstamp;
300 u32 fdiscs;
301 bool fdiscs_set;
302 u8 fdisc_acc;
303 bool fdisc_acc_set;
304 u8 fdisc_rjt;
305 bool fdisc_rjt_set;
306 u8 plogi;
307 bool plogi_set;
308 u8 plogi_acc;
309 bool plogi_acc_set;
310 u8 plogi_rjt;
311 bool plogi_rjt_set;
312 u32 plogi_dst_fcid[5];
313 bool plogi_dst_fcid_set[5];
314 struct qed_mfw_tlv_time plogi_tstamp[5];
315 u32 plogi_acc_src_fcid[5];
316 bool plogi_acc_src_fcid_set[5];
317 struct qed_mfw_tlv_time plogi_acc_tstamp[5];
318 u8 tx_plogos;
319 bool tx_plogos_set;
320 u8 plogo_acc;
321 bool plogo_acc_set;
322 u8 plogo_rjt;
323 bool plogo_rjt_set;
324 u32 plogo_src_fcid[5];
325 bool plogo_src_fcid_set[5];
326 struct qed_mfw_tlv_time plogo_tstamp[5];
327 u8 rx_logos;
328 bool rx_logos_set;
329 u8 tx_accs;
330 bool tx_accs_set;
331 u8 tx_prlis;
332 bool tx_prlis_set;
333 u8 rx_accs;
334 bool rx_accs_set;
335 u8 tx_abts;
336 bool tx_abts_set;
337 u8 rx_abts_acc;
338 bool rx_abts_acc_set;
339 u8 rx_abts_rjt;
340 bool rx_abts_rjt_set;
341 u32 abts_dst_fcid[5];
342 bool abts_dst_fcid_set[5];
343 struct qed_mfw_tlv_time abts_tstamp[5];
344 u8 rx_rscn;
345 bool rx_rscn_set;
346 u32 rx_rscn_nport[4];
347 bool rx_rscn_nport_set[4];
348 u8 tx_lun_rst;
349 bool tx_lun_rst_set;
350 u8 abort_task_sets;
351 bool abort_task_sets_set;
352 u8 tx_tprlos;
353 bool tx_tprlos_set;
354 u8 tx_nos;
355 bool tx_nos_set;
356 u8 rx_nos;
357 bool rx_nos_set;
358 u8 ols;
359 bool ols_set;
360 u8 lr;
361 bool lr_set;
362 u8 lrr;
363 bool lrr_set;
364 u8 tx_lip;
365 bool tx_lip_set;
366 u8 rx_lip;
367 bool rx_lip_set;
368 u8 eofa;
369 bool eofa_set;
370 u8 eofni;
371 bool eofni_set;
372 u8 scsi_chks;
373 bool scsi_chks_set;
374 u8 scsi_cond_met;
375 bool scsi_cond_met_set;
376 u8 scsi_busy;
377 bool scsi_busy_set;
378 u8 scsi_inter;
379 bool scsi_inter_set;
380 u8 scsi_inter_cond_met;
381 bool scsi_inter_cond_met_set;
382 u8 scsi_rsv_conflicts;
383 bool scsi_rsv_conflicts_set;
384 u8 scsi_tsk_full;
385 bool scsi_tsk_full_set;
386 u8 scsi_aca_active;
387 bool scsi_aca_active_set;
388 u8 scsi_tsk_abort;
389 bool scsi_tsk_abort_set;
390 u32 scsi_rx_chk[5];
391 bool scsi_rx_chk_set[5];
392 struct qed_mfw_tlv_time scsi_chk_tstamp[5];
393 };
394
395 struct qed_mfw_tlv_iscsi {
396 u8 target_llmnr;
397 bool target_llmnr_set;
398 u8 header_digest;
399 bool header_digest_set;
400 u8 data_digest;
401 bool data_digest_set;
402 u8 auth_method;
403 #define QED_MFW_TLV_AUTH_METHOD_NONE (1)
404 #define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
405 #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
406 bool auth_method_set;
407 u16 boot_taget_portal;
408 bool boot_taget_portal_set;
409 u16 frame_size;
410 bool frame_size_set;
411 u16 tx_desc_size;
412 bool tx_desc_size_set;
413 u16 rx_desc_size;
414 bool rx_desc_size_set;
415 u8 boot_progress;
416 bool boot_progress_set;
417 u16 tx_desc_qdepth;
418 bool tx_desc_qdepth_set;
419 u16 rx_desc_qdepth;
420 bool rx_desc_qdepth_set;
421 u64 rx_frames;
422 bool rx_frames_set;
423 u64 rx_bytes;
424 bool rx_bytes_set;
425 u64 tx_frames;
426 bool tx_frames_set;
427 u64 tx_bytes;
428 bool tx_bytes_set;
429 };
430
431 enum qed_db_rec_width {
432 DB_REC_WIDTH_32B,
433 DB_REC_WIDTH_64B,
434 };
435
436 enum qed_db_rec_space {
437 DB_REC_KERNEL,
438 DB_REC_USER,
439 };
440
441 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
442 (void __iomem *)(reg_addr))
443
444 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
445
446 #define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
447 (void __iomem *)(reg_addr))
448
449 #define QED_COALESCE_MAX 0x1FF
450 #define QED_DEFAULT_RX_USECS 12
451 #define QED_DEFAULT_TX_USECS 48
452
453 /* forward */
454 struct qed_dev;
455
456 struct qed_eth_pf_params {
457 /* The following parameters are used during HW-init
458 * and these parameters need to be passed as arguments
459 * to update_pf_params routine invoked before slowpath start
460 */
461 u16 num_cons;
462
463 /* per-VF number of CIDs */
464 u8 num_vf_cons;
465 #define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
466
467 /* To enable arfs, previous to HW-init a positive number needs to be
468 * set [as filters require allocated searcher ILT memory].
469 * This will set the maximal number of configured steering-filters.
470 */
471 u32 num_arfs_filters;
472 };
473
474 struct qed_fcoe_pf_params {
475 /* The following parameters are used during protocol-init */
476 u64 glbl_q_params_addr;
477 u64 bdq_pbl_base_addr[2];
478
479 /* The following parameters are used during HW-init
480 * and these parameters need to be passed as arguments
481 * to update_pf_params routine invoked before slowpath start
482 */
483 u16 num_cons;
484 u16 num_tasks;
485
486 /* The following parameters are used during protocol-init */
487 u16 sq_num_pbl_pages;
488
489 u16 cq_num_entries;
490 u16 cmdq_num_entries;
491 u16 rq_buffer_log_size;
492 u16 mtu;
493 u16 dummy_icid;
494 u16 bdq_xoff_threshold[2];
495 u16 bdq_xon_threshold[2];
496 u16 rq_buffer_size;
497 u8 num_cqs; /* num of global CQs */
498 u8 log_page_size;
499 u8 gl_rq_pi;
500 u8 gl_cmd_pi;
501 u8 debug_mode;
502 u8 is_target;
503 u8 bdq_pbl_num_entries[2];
504 };
505
506 /* Most of the parameters below are described in the FW iSCSI / TCP HSI */
507 struct qed_iscsi_pf_params {
508 u64 glbl_q_params_addr;
509 u64 bdq_pbl_base_addr[3];
510 u16 cq_num_entries;
511 u16 cmdq_num_entries;
512 u32 two_msl_timer;
513 u16 tx_sws_timer;
514
515 /* The following parameters are used during HW-init
516 * and these parameters need to be passed as arguments
517 * to update_pf_params routine invoked before slowpath start
518 */
519 u16 num_cons;
520 u16 num_tasks;
521
522 /* The following parameters are used during protocol-init */
523 u16 half_way_close_timeout;
524 u16 bdq_xoff_threshold[3];
525 u16 bdq_xon_threshold[3];
526 u16 cmdq_xoff_threshold;
527 u16 cmdq_xon_threshold;
528 u16 rq_buffer_size;
529
530 u8 num_sq_pages_in_ring;
531 u8 num_r2tq_pages_in_ring;
532 u8 num_uhq_pages_in_ring;
533 u8 num_queues;
534 u8 log_page_size;
535 u8 rqe_log_size;
536 u8 max_fin_rt;
537 u8 gl_rq_pi;
538 u8 gl_cmd_pi;
539 u8 debug_mode;
540 u8 ll2_ooo_queue_id;
541
542 u8 is_target;
543 u8 is_soc_en;
544 u8 soc_num_of_blocks_log;
545 u8 bdq_pbl_num_entries[3];
546 };
547
548 struct qed_nvmetcp_pf_params {
549 u64 glbl_q_params_addr;
550 u16 cq_num_entries;
551 u16 num_cons;
552 u16 num_tasks;
553 u8 num_sq_pages_in_ring;
554 u8 num_r2tq_pages_in_ring;
555 u8 num_uhq_pages_in_ring;
556 u8 num_queues;
557 u8 gl_rq_pi;
558 u8 gl_cmd_pi;
559 u8 debug_mode;
560 u8 ll2_ooo_queue_id;
561 u16 min_rto;
562 };
563
564 struct qed_rdma_pf_params {
565 /* Supplied to QED during resource allocation (may affect the ILT and
566 * the doorbell BAR).
567 */
568 u32 min_dpis; /* number of requested DPIs */
569 u32 num_qps; /* number of requested Queue Pairs */
570 u32 num_srqs; /* number of requested SRQ */
571 u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
572 u8 gl_pi; /* protocol index */
573
574 /* Will allocate rate limiters to be used with QPs */
575 u8 enable_dcqcn;
576 };
577
578 struct qed_pf_params {
579 struct qed_eth_pf_params eth_pf_params;
580 struct qed_fcoe_pf_params fcoe_pf_params;
581 struct qed_iscsi_pf_params iscsi_pf_params;
582 struct qed_nvmetcp_pf_params nvmetcp_pf_params;
583 struct qed_rdma_pf_params rdma_pf_params;
584 };
585
586 enum qed_int_mode {
587 QED_INT_MODE_INTA,
588 QED_INT_MODE_MSIX,
589 QED_INT_MODE_MSI,
590 QED_INT_MODE_POLL,
591 };
592
593 struct qed_sb_info {
594 struct status_block *sb_virt;
595 dma_addr_t sb_phys;
596 u32 sb_ack; /* Last given ack */
597 u16 igu_sb_id;
598 void __iomem *igu_addr;
599 u8 flags;
600 #define QED_SB_INFO_INIT 0x1
601 #define QED_SB_INFO_SETUP 0x2
602
603 struct qed_dev *cdev;
604 };
605
606 enum qed_hw_err_type {
607 QED_HW_ERR_FAN_FAIL,
608 QED_HW_ERR_MFW_RESP_FAIL,
609 QED_HW_ERR_HW_ATTN,
610 QED_HW_ERR_DMAE_FAIL,
611 QED_HW_ERR_RAMROD_FAIL,
612 QED_HW_ERR_FW_ASSERT,
613 QED_HW_ERR_LAST,
614 };
615
616 enum qed_dev_type {
617 QED_DEV_TYPE_BB,
618 QED_DEV_TYPE_AH,
619 };
620
621 struct qed_dev_info {
622 unsigned long pci_mem_start;
623 unsigned long pci_mem_end;
624 unsigned int pci_irq;
625 u8 num_hwfns;
626
627 u8 hw_mac[ETH_ALEN];
628
629 /* FW version */
630 u16 fw_major;
631 u16 fw_minor;
632 u16 fw_rev;
633 u16 fw_eng;
634
635 /* MFW version */
636 u32 mfw_rev;
637 #define QED_MFW_VERSION_0_MASK 0x000000FF
638 #define QED_MFW_VERSION_0_OFFSET 0
639 #define QED_MFW_VERSION_1_MASK 0x0000FF00
640 #define QED_MFW_VERSION_1_OFFSET 8
641 #define QED_MFW_VERSION_2_MASK 0x00FF0000
642 #define QED_MFW_VERSION_2_OFFSET 16
643 #define QED_MFW_VERSION_3_MASK 0xFF000000
644 #define QED_MFW_VERSION_3_OFFSET 24
645
646 u32 flash_size;
647 bool b_arfs_capable;
648 bool b_inter_pf_switch;
649 bool tx_switching;
650 bool rdma_supported;
651 u16 mtu;
652
653 bool wol_support;
654 bool smart_an;
655 bool esl;
656
657 /* MBI version */
658 u32 mbi_version;
659 #define QED_MBI_VERSION_0_MASK 0x000000FF
660 #define QED_MBI_VERSION_0_OFFSET 0
661 #define QED_MBI_VERSION_1_MASK 0x0000FF00
662 #define QED_MBI_VERSION_1_OFFSET 8
663 #define QED_MBI_VERSION_2_MASK 0x00FF0000
664 #define QED_MBI_VERSION_2_OFFSET 16
665
666 enum qed_dev_type dev_type;
667
668 /* Output parameters for qede */
669 bool vxlan_enable;
670 bool gre_enable;
671 bool geneve_enable;
672
673 u8 abs_pf_id;
674 };
675
676 enum qed_sb_type {
677 QED_SB_TYPE_L2_QUEUE,
678 QED_SB_TYPE_CNQ,
679 QED_SB_TYPE_STORAGE,
680 };
681
682 enum qed_protocol {
683 QED_PROTOCOL_ETH,
684 QED_PROTOCOL_ISCSI,
685 QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
686 QED_PROTOCOL_FCOE,
687 };
688
689 enum qed_fec_mode {
690 QED_FEC_MODE_NONE = BIT(0),
691 QED_FEC_MODE_FIRECODE = BIT(1),
692 QED_FEC_MODE_RS = BIT(2),
693 QED_FEC_MODE_AUTO = BIT(3),
694 QED_FEC_MODE_UNSUPPORTED = BIT(4),
695 };
696
697 struct qed_link_params {
698 bool link_up;
699
700 u32 override_flags;
701 #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
702 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
703 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
704 #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
705 #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
706 #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
707 #define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
708
709 bool autoneg;
710 __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
711 u32 forced_speed;
712
713 u32 pause_config;
714 #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
715 #define QED_LINK_PAUSE_RX_ENABLE BIT(1)
716 #define QED_LINK_PAUSE_TX_ENABLE BIT(2)
717
718 u32 loopback_mode;
719 #define QED_LINK_LOOPBACK_NONE BIT(0)
720 #define QED_LINK_LOOPBACK_INT_PHY BIT(1)
721 #define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
722 #define QED_LINK_LOOPBACK_EXT BIT(3)
723 #define QED_LINK_LOOPBACK_MAC BIT(4)
724 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
725 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
726 #define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
727 #define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
728 #define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
729
730 struct qed_link_eee_params eee;
731 u32 fec;
732 };
733
734 struct qed_link_output {
735 bool link_up;
736
737 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
738 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
739 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
740
741 u32 speed; /* In Mb/s */
742 u8 duplex; /* In DUPLEX defs */
743 u8 port; /* In PORT defs */
744 bool autoneg;
745 u32 pause_config;
746
747 /* EEE - capability & param */
748 bool eee_supported;
749 bool eee_active;
750 u8 sup_caps;
751 struct qed_link_eee_params eee;
752
753 u32 sup_fec;
754 u32 active_fec;
755 };
756
757 struct qed_probe_params {
758 enum qed_protocol protocol;
759 u32 dp_module;
760 u8 dp_level;
761 bool is_vf;
762 bool recov_in_prog;
763 };
764
765 #define QED_DRV_VER_STR_SIZE 12
766 struct qed_slowpath_params {
767 u32 int_mode;
768 u8 drv_major;
769 u8 drv_minor;
770 u8 drv_rev;
771 u8 drv_eng;
772 u8 name[QED_DRV_VER_STR_SIZE];
773 };
774
775 #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
776
777 struct qed_int_info {
778 struct msix_entry *msix;
779 u8 msix_cnt;
780
781 /* This should be updated by the protocol driver */
782 u8 used_cnt;
783 };
784
785 struct qed_generic_tlvs {
786 #define QED_TLV_IP_CSUM BIT(0)
787 #define QED_TLV_LSO BIT(1)
788 u16 feat_flags;
789 #define QED_TLV_MAC_COUNT 3
790 u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
791 };
792
793 #define QED_I2C_DEV_ADDR_A0 0xA0
794 #define QED_I2C_DEV_ADDR_A2 0xA2
795
796 #define QED_NVM_SIGNATURE 0x12435687
797
798 enum qed_nvm_flash_cmd {
799 QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
800 QED_NVM_FLASH_CMD_FILE_START = 0x3,
801 QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
802 QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
803 QED_NVM_FLASH_CMD_NVM_MAX,
804 };
805
806 struct qed_devlink {
807 struct qed_dev *cdev;
808 struct devlink_health_reporter *fw_reporter;
809 };
810
811 struct qed_sb_info_dbg {
812 u32 igu_prod;
813 u32 igu_cons;
814 u16 pi[PIS_PER_SB];
815 };
816
817 struct qed_common_cb_ops {
818 void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
819 void (*link_update)(void *dev, struct qed_link_output *link);
820 void (*schedule_recovery_handler)(void *dev);
821 void (*schedule_hw_err_handler)(void *dev,
822 enum qed_hw_err_type err_type);
823 void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
824 void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
825 void (*get_protocol_tlv_data)(void *dev, void *data);
826 void (*bw_update)(void *dev);
827 };
828
829 struct qed_selftest_ops {
830 /**
831 * selftest_interrupt(): Perform interrupt test.
832 *
833 * @cdev: Qed dev pointer.
834 *
835 * Return: 0 on success, error otherwise.
836 */
837 int (*selftest_interrupt)(struct qed_dev *cdev);
838
839 /**
840 * selftest_memory(): Perform memory test.
841 *
842 * @cdev: Qed dev pointer.
843 *
844 * Return: 0 on success, error otherwise.
845 */
846 int (*selftest_memory)(struct qed_dev *cdev);
847
848 /**
849 * selftest_register(): Perform register test.
850 *
851 * @cdev: Qed dev pointer.
852 *
853 * Return: 0 on success, error otherwise.
854 */
855 int (*selftest_register)(struct qed_dev *cdev);
856
857 /**
858 * selftest_clock(): Perform clock test.
859 *
860 * @cdev: Qed dev pointer.
861 *
862 * Return: 0 on success, error otherwise.
863 */
864 int (*selftest_clock)(struct qed_dev *cdev);
865
866 /**
867 * selftest_nvram(): Perform nvram test.
868 *
869 * @cdev: Qed dev pointer.
870 *
871 * Return: 0 on success, error otherwise.
872 */
873 int (*selftest_nvram) (struct qed_dev *cdev);
874 };
875
876 struct qed_common_ops {
877 struct qed_selftest_ops *selftest;
878
879 struct qed_dev* (*probe)(struct pci_dev *dev,
880 struct qed_probe_params *params);
881
882 void (*remove)(struct qed_dev *cdev);
883
884 int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
885
886 void (*set_name) (struct qed_dev *cdev, char name[]);
887
888 /* Client drivers need to make this call before slowpath_start.
889 * PF params required for the call before slowpath_start is
890 * documented within the qed_pf_params structure definition.
891 */
892 void (*update_pf_params)(struct qed_dev *cdev,
893 struct qed_pf_params *params);
894
895 int (*slowpath_start)(struct qed_dev *cdev,
896 struct qed_slowpath_params *params);
897
898 int (*slowpath_stop)(struct qed_dev *cdev);
899
900 /* Requests to use `cnt' interrupts for fastpath.
901 * upon success, returns number of interrupts allocated for fastpath.
902 */
903 int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
904
905 /* Fills `info' with pointers required for utilizing interrupts */
906 int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
907
908 u32 (*sb_init)(struct qed_dev *cdev,
909 struct qed_sb_info *sb_info,
910 void *sb_virt_addr,
911 dma_addr_t sb_phy_addr,
912 u16 sb_id,
913 enum qed_sb_type type);
914
915 u32 (*sb_release)(struct qed_dev *cdev,
916 struct qed_sb_info *sb_info,
917 u16 sb_id,
918 enum qed_sb_type type);
919
920 void (*simd_handler_config)(struct qed_dev *cdev,
921 void *token,
922 int index,
923 void (*handler)(void *));
924
925 void (*simd_handler_clean)(struct qed_dev *cdev, int index);
926
927 int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
928
929 int (*dbg_grc_size)(struct qed_dev *cdev);
930
931 int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
932
933 int (*dbg_all_data_size)(struct qed_dev *cdev);
934
935 int (*report_fatal_error)(struct devlink *devlink,
936 enum qed_hw_err_type err_type);
937
938 /**
939 * can_link_change(): can the instance change the link or not.
940 *
941 * @cdev: Qed dev pointer.
942 *
943 * Return: true if link-change is allowed, false otherwise.
944 */
945 bool (*can_link_change)(struct qed_dev *cdev);
946
947 /**
948 * set_link(): set links according to params.
949 *
950 * @cdev: Qed dev pointer.
951 * @params: values used to override the default link configuration.
952 *
953 * Return: 0 on success, error otherwise.
954 */
955 int (*set_link)(struct qed_dev *cdev,
956 struct qed_link_params *params);
957
958 /**
959 * get_link(): returns the current link state.
960 *
961 * @cdev: Qed dev pointer.
962 * @if_link: structure to be filled with current link configuration.
963 *
964 * Return: Void.
965 */
966 void (*get_link)(struct qed_dev *cdev,
967 struct qed_link_output *if_link);
968
969 /**
970 * drain(): drains chip in case Tx completions fail to arrive due to pause.
971 *
972 * @cdev: Qed dev pointer.
973 *
974 * Return: Int.
975 */
976 int (*drain)(struct qed_dev *cdev);
977
978 /**
979 * update_msglvl(): update module debug level.
980 *
981 * @cdev: Qed dev pointer.
982 * @dp_module: Debug module.
983 * @dp_level: Debug level.
984 *
985 * Return: Void.
986 */
987 void (*update_msglvl)(struct qed_dev *cdev,
988 u32 dp_module,
989 u8 dp_level);
990
991 int (*chain_alloc)(struct qed_dev *cdev,
992 struct qed_chain *chain,
993 struct qed_chain_init_params *params);
994
995 void (*chain_free)(struct qed_dev *cdev,
996 struct qed_chain *p_chain);
997
998 /**
999 * nvm_flash(): Flash nvm data.
1000 *
1001 * @cdev: Qed dev pointer.
1002 * @name: file containing the data.
1003 *
1004 * Return: 0 on success, error otherwise.
1005 */
1006 int (*nvm_flash)(struct qed_dev *cdev, const char *name);
1007
1008 /**
1009 * nvm_get_image(): reads an entire image from nvram.
1010 *
1011 * @cdev: Qed dev pointer.
1012 * @type: type of the request nvram image.
1013 * @buf: preallocated buffer to fill with the image.
1014 * @len: length of the allocated buffer.
1015 *
1016 * Return: 0 on success, error otherwise.
1017 */
1018 int (*nvm_get_image)(struct qed_dev *cdev,
1019 enum qed_nvm_images type, u8 *buf, u16 len);
1020
1021 /**
1022 * set_coalesce(): Configure Rx coalesce value in usec.
1023 *
1024 * @cdev: Qed dev pointer.
1025 * @rx_coal: Rx coalesce value in usec.
1026 * @tx_coal: Tx coalesce value in usec.
1027 * @handle: Handle.
1028 *
1029 * Return: 0 on success, error otherwise.
1030 */
1031 int (*set_coalesce)(struct qed_dev *cdev,
1032 u16 rx_coal, u16 tx_coal, void *handle);
1033
1034 /**
1035 * set_led() - Configure LED mode.
1036 *
1037 * @cdev: Qed dev pointer.
1038 * @mode: LED mode.
1039 *
1040 * Return: 0 on success, error otherwise.
1041 */
1042 int (*set_led)(struct qed_dev *cdev,
1043 enum qed_led_mode mode);
1044
1045 /**
1046 * attn_clr_enable(): Prevent attentions from being reasserted.
1047 *
1048 * @cdev: Qed dev pointer.
1049 * @clr_enable: Clear enable.
1050 *
1051 * Return: Void.
1052 */
1053 void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
1054
1055 /**
1056 * db_recovery_add(): add doorbell information to the doorbell
1057 * recovery mechanism.
1058 *
1059 * @cdev: Qed dev pointer.
1060 * @db_addr: Doorbell address.
1061 * @db_data: Dddress of where db_data is stored.
1062 * @db_width: Doorbell is 32b or 64b.
1063 * @db_space: Doorbell recovery addresses are user or kernel space.
1064 *
1065 * Return: Int.
1066 */
1067 int (*db_recovery_add)(struct qed_dev *cdev,
1068 void __iomem *db_addr,
1069 void *db_data,
1070 enum qed_db_rec_width db_width,
1071 enum qed_db_rec_space db_space);
1072
1073 /**
1074 * db_recovery_del(): remove doorbell information from the doorbell
1075 * recovery mechanism. db_data serves as key (db_addr is not unique).
1076 *
1077 * @cdev: Qed dev pointer.
1078 * @db_addr: Doorbell address.
1079 * @db_data: Address where db_data is stored. Serves as key for the
1080 * entry to delete.
1081 *
1082 * Return: Int.
1083 */
1084 int (*db_recovery_del)(struct qed_dev *cdev,
1085 void __iomem *db_addr, void *db_data);
1086
1087 /**
1088 * recovery_process(): Trigger a recovery process.
1089 *
1090 * @cdev: Qed dev pointer.
1091 *
1092 * Return: 0 on success, error otherwise.
1093 */
1094 int (*recovery_process)(struct qed_dev *cdev);
1095
1096 /**
1097 * recovery_prolog(): Execute the prolog operations of a recovery process.
1098 *
1099 * @cdev: Qed dev pointer.
1100 *
1101 * Return: 0 on success, error otherwise.
1102 */
1103 int (*recovery_prolog)(struct qed_dev *cdev);
1104
1105 /**
1106 * update_drv_state(): API to inform the change in the driver state.
1107 *
1108 * @cdev: Qed dev pointer.
1109 * @active: Active
1110 *
1111 * Return: Int.
1112 */
1113 int (*update_drv_state)(struct qed_dev *cdev, bool active);
1114
1115 /**
1116 * update_mac(): API to inform the change in the mac address.
1117 *
1118 * @cdev: Qed dev pointer.
1119 * @mac: MAC.
1120 *
1121 * Return: Int.
1122 */
1123 int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
1124
1125 /**
1126 * update_mtu(): API to inform the change in the mtu.
1127 *
1128 * @cdev: Qed dev pointer.
1129 * @mtu: MTU.
1130 *
1131 * Return: Int.
1132 */
1133 int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
1134
1135 /**
1136 * update_wol(): Update of changes in the WoL configuration.
1137 *
1138 * @cdev: Qed dev pointer.
1139 * @enabled: true iff WoL should be enabled.
1140 *
1141 * Return: Int.
1142 */
1143 int (*update_wol) (struct qed_dev *cdev, bool enabled);
1144
1145 /**
1146 * read_module_eeprom(): Read EEPROM.
1147 *
1148 * @cdev: Qed dev pointer.
1149 * @buf: buffer.
1150 * @dev_addr: PHY device memory region.
1151 * @offset: offset into eeprom contents to be read.
1152 * @len: buffer length, i.e., max bytes to be read.
1153 *
1154 * Return: Int.
1155 */
1156 int (*read_module_eeprom)(struct qed_dev *cdev,
1157 char *buf, u8 dev_addr, u32 offset, u32 len);
1158
1159 /**
1160 * get_affin_hwfn_idx(): Get affine HW function.
1161 *
1162 * @cdev: Qed dev pointer.
1163 *
1164 * Return: u8.
1165 */
1166 u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
1167
1168 /**
1169 * read_nvm_cfg(): Read NVM config attribute value.
1170 *
1171 * @cdev: Qed dev pointer.
1172 * @buf: Buffer.
1173 * @cmd: NVM CFG command id.
1174 * @entity_id: Entity id.
1175 *
1176 * Return: Int.
1177 */
1178 int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
1179 u32 entity_id);
1180 /**
1181 * read_nvm_cfg_len(): Read NVM config attribute value.
1182 *
1183 * @cdev: Qed dev pointer.
1184 * @cmd: NVM CFG command id.
1185 *
1186 * Return: config id length, 0 on error.
1187 */
1188 int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
1189
1190 /**
1191 * set_grc_config(): Configure value for grc config id.
1192 *
1193 * @cdev: Qed dev pointer.
1194 * @cfg_id: grc config id
1195 * @val: grc config value
1196 *
1197 * Return: Int.
1198 */
1199 int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
1200
1201 struct devlink* (*devlink_register)(struct qed_dev *cdev);
1202
1203 void (*devlink_unregister)(struct devlink *devlink);
1204
1205 __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
1206
1207 int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
1208 u16 qid, struct qed_sb_info_dbg *sb_dbg);
1209
1210 int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
1211 };
1212
1213 #define MASK_FIELD(_name, _value) \
1214 ((_value) &= (_name ## _MASK))
1215
1216 #define FIELD_VALUE(_name, _value) \
1217 ((_value & _name ## _MASK) << _name ## _SHIFT)
1218
1219 #define SET_FIELD(value, name, flag) \
1220 do { \
1221 (value) &= ~(name ## _MASK << name ## _SHIFT); \
1222 (value) |= (((u64)flag) << (name ## _SHIFT)); \
1223 } while (0)
1224
1225 #define GET_FIELD(value, name) \
1226 (((value) >> (name ## _SHIFT)) & name ## _MASK)
1227
1228 #define GET_MFW_FIELD(name, field) \
1229 (((name) & (field ## _MASK)) >> (field ## _OFFSET))
1230
1231 #define SET_MFW_FIELD(name, field, value) \
1232 do { \
1233 (name) &= ~(field ## _MASK); \
1234 (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
1235 } while (0)
1236
1237 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
1238
1239 /* Debug print definitions */
1240 #define DP_ERR(cdev, fmt, ...) \
1241 do { \
1242 pr_err("[%s:%d(%s)]" fmt, \
1243 __func__, __LINE__, \
1244 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1245 ## __VA_ARGS__); \
1246 } while (0)
1247
1248 #define DP_NOTICE(cdev, fmt, ...) \
1249 do { \
1250 if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
1251 pr_notice("[%s:%d(%s)]" fmt, \
1252 __func__, __LINE__, \
1253 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1254 ## __VA_ARGS__); \
1255 \
1256 } \
1257 } while (0)
1258
1259 #define DP_INFO(cdev, fmt, ...) \
1260 do { \
1261 if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
1262 pr_notice("[%s:%d(%s)]" fmt, \
1263 __func__, __LINE__, \
1264 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1265 ## __VA_ARGS__); \
1266 } \
1267 } while (0)
1268
1269 #define DP_VERBOSE(cdev, module, fmt, ...) \
1270 do { \
1271 if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
1272 ((cdev)->dp_module & module))) { \
1273 pr_notice("[%s:%d(%s)]" fmt, \
1274 __func__, __LINE__, \
1275 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1276 ## __VA_ARGS__); \
1277 } \
1278 } while (0)
1279
1280 enum DP_LEVEL {
1281 QED_LEVEL_VERBOSE = 0x0,
1282 QED_LEVEL_INFO = 0x1,
1283 QED_LEVEL_NOTICE = 0x2,
1284 QED_LEVEL_ERR = 0x3,
1285 };
1286
1287 #define QED_LOG_LEVEL_SHIFT (30)
1288 #define QED_LOG_VERBOSE_MASK (0x3fffffff)
1289 #define QED_LOG_INFO_MASK (0x40000000)
1290 #define QED_LOG_NOTICE_MASK (0x80000000)
1291
1292 enum DP_MODULE {
1293 QED_MSG_SPQ = 0x10000,
1294 QED_MSG_STATS = 0x20000,
1295 QED_MSG_DCB = 0x40000,
1296 QED_MSG_IOV = 0x80000,
1297 QED_MSG_SP = 0x100000,
1298 QED_MSG_STORAGE = 0x200000,
1299 QED_MSG_CXT = 0x800000,
1300 QED_MSG_LL2 = 0x1000000,
1301 QED_MSG_ILT = 0x2000000,
1302 QED_MSG_RDMA = 0x4000000,
1303 QED_MSG_DEBUG = 0x8000000,
1304 /* to be added...up to 0x8000000 */
1305 };
1306
1307 enum qed_mf_mode {
1308 QED_MF_DEFAULT,
1309 QED_MF_OVLAN,
1310 QED_MF_NPAR,
1311 };
1312
1313 struct qed_eth_stats_common {
1314 u64 no_buff_discards;
1315 u64 packet_too_big_discard;
1316 u64 ttl0_discard;
1317 u64 rx_ucast_bytes;
1318 u64 rx_mcast_bytes;
1319 u64 rx_bcast_bytes;
1320 u64 rx_ucast_pkts;
1321 u64 rx_mcast_pkts;
1322 u64 rx_bcast_pkts;
1323 u64 mftag_filter_discards;
1324 u64 mac_filter_discards;
1325 u64 gft_filter_drop;
1326 u64 tx_ucast_bytes;
1327 u64 tx_mcast_bytes;
1328 u64 tx_bcast_bytes;
1329 u64 tx_ucast_pkts;
1330 u64 tx_mcast_pkts;
1331 u64 tx_bcast_pkts;
1332 u64 tx_err_drop_pkts;
1333 u64 tpa_coalesced_pkts;
1334 u64 tpa_coalesced_events;
1335 u64 tpa_aborts_num;
1336 u64 tpa_not_coalesced_pkts;
1337 u64 tpa_coalesced_bytes;
1338
1339 /* port */
1340 u64 rx_64_byte_packets;
1341 u64 rx_65_to_127_byte_packets;
1342 u64 rx_128_to_255_byte_packets;
1343 u64 rx_256_to_511_byte_packets;
1344 u64 rx_512_to_1023_byte_packets;
1345 u64 rx_1024_to_1518_byte_packets;
1346 u64 rx_crc_errors;
1347 u64 rx_mac_crtl_frames;
1348 u64 rx_pause_frames;
1349 u64 rx_pfc_frames;
1350 u64 rx_align_errors;
1351 u64 rx_carrier_errors;
1352 u64 rx_oversize_packets;
1353 u64 rx_jabbers;
1354 u64 rx_undersize_packets;
1355 u64 rx_fragments;
1356 u64 tx_64_byte_packets;
1357 u64 tx_65_to_127_byte_packets;
1358 u64 tx_128_to_255_byte_packets;
1359 u64 tx_256_to_511_byte_packets;
1360 u64 tx_512_to_1023_byte_packets;
1361 u64 tx_1024_to_1518_byte_packets;
1362 u64 tx_pause_frames;
1363 u64 tx_pfc_frames;
1364 u64 brb_truncates;
1365 u64 brb_discards;
1366 u64 rx_mac_bytes;
1367 u64 rx_mac_uc_packets;
1368 u64 rx_mac_mc_packets;
1369 u64 rx_mac_bc_packets;
1370 u64 rx_mac_frames_ok;
1371 u64 tx_mac_bytes;
1372 u64 tx_mac_uc_packets;
1373 u64 tx_mac_mc_packets;
1374 u64 tx_mac_bc_packets;
1375 u64 tx_mac_ctrl_frames;
1376 u64 link_change_count;
1377 };
1378
1379 struct qed_eth_stats_bb {
1380 u64 rx_1519_to_1522_byte_packets;
1381 u64 rx_1519_to_2047_byte_packets;
1382 u64 rx_2048_to_4095_byte_packets;
1383 u64 rx_4096_to_9216_byte_packets;
1384 u64 rx_9217_to_16383_byte_packets;
1385 u64 tx_1519_to_2047_byte_packets;
1386 u64 tx_2048_to_4095_byte_packets;
1387 u64 tx_4096_to_9216_byte_packets;
1388 u64 tx_9217_to_16383_byte_packets;
1389 u64 tx_lpi_entry_count;
1390 u64 tx_total_collisions;
1391 };
1392
1393 struct qed_eth_stats_ah {
1394 u64 rx_1519_to_max_byte_packets;
1395 u64 tx_1519_to_max_byte_packets;
1396 };
1397
1398 struct qed_eth_stats {
1399 struct qed_eth_stats_common common;
1400
1401 union {
1402 struct qed_eth_stats_bb bb;
1403 struct qed_eth_stats_ah ah;
1404 };
1405 };
1406
1407 #define QED_SB_IDX 0x0002
1408
1409 #define RX_PI 0
1410 #define TX_PI(tc) (RX_PI + 1 + tc)
1411
1412 struct qed_sb_cnt_info {
1413 /* Original, current, and free SBs for PF */
1414 int orig;
1415 int cnt;
1416 int free_cnt;
1417
1418 /* Original, current and free SBS for child VFs */
1419 int iov_orig;
1420 int iov_cnt;
1421 int free_cnt_iov;
1422 };
1423
qed_sb_update_sb_idx(struct qed_sb_info * sb_info)1424 static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
1425 {
1426 u32 prod = 0;
1427 u16 rc = 0;
1428
1429 prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
1430 STATUS_BLOCK_PROD_INDEX_MASK;
1431 if (sb_info->sb_ack != prod) {
1432 sb_info->sb_ack = prod;
1433 rc |= QED_SB_IDX;
1434 }
1435
1436 /* Let SB update */
1437 return rc;
1438 }
1439
1440 /**
1441 * qed_sb_ack(): This function creates an update command for interrupts
1442 * that is written to the IGU.
1443 *
1444 * @sb_info: This is the structure allocated and
1445 * initialized per status block. Assumption is
1446 * that it was initialized using qed_sb_init
1447 * @int_cmd: Enable/Disable/Nop
1448 * @upd_flg: Whether igu consumer should be updated.
1449 *
1450 * Return: inline void.
1451 */
qed_sb_ack(struct qed_sb_info * sb_info,enum igu_int_cmd int_cmd,u8 upd_flg)1452 static inline void qed_sb_ack(struct qed_sb_info *sb_info,
1453 enum igu_int_cmd int_cmd,
1454 u8 upd_flg)
1455 {
1456 u32 igu_ack;
1457
1458 igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1459 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1460 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1461 (IGU_SEG_ACCESS_REG <<
1462 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1463
1464 DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
1465
1466 /* Both segments (interrupts & acks) are written to same place address;
1467 * Need to guarantee all commands will be received (in-order) by HW.
1468 */
1469 barrier();
1470 }
1471
__internal_ram_wr(void * p_hwfn,void __iomem * addr,int size,u32 * data)1472 static inline void __internal_ram_wr(void *p_hwfn,
1473 void __iomem *addr,
1474 int size,
1475 u32 *data)
1476
1477 {
1478 unsigned int i;
1479
1480 for (i = 0; i < size / sizeof(*data); i++)
1481 DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
1482 }
1483
internal_ram_wr(void __iomem * addr,int size,u32 * data)1484 static inline void internal_ram_wr(void __iomem *addr,
1485 int size,
1486 u32 *data)
1487 {
1488 __internal_ram_wr(NULL, addr, size, data);
1489 }
1490
1491 enum qed_rss_caps {
1492 QED_RSS_IPV4 = 0x1,
1493 QED_RSS_IPV6 = 0x2,
1494 QED_RSS_IPV4_TCP = 0x4,
1495 QED_RSS_IPV6_TCP = 0x8,
1496 QED_RSS_IPV4_UDP = 0x10,
1497 QED_RSS_IPV6_UDP = 0x20,
1498 };
1499
1500 #define QED_RSS_IND_TABLE_SIZE 128
1501 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
1502 #endif
1503