xref: /linux/drivers/net/ethernet/qlogic/qed/qed.h (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #ifndef _QED_H
10 #define _QED_H
11 
12 #include <linux/types.h>
13 #include <linux/io.h>
14 #include <linux/delay.h>
15 #include <linux/firmware.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/mutex.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <linux/workqueue.h>
23 #include <linux/zlib.h>
24 #include <linux/hashtable.h>
25 #include <linux/qed/qed_if.h>
26 #include "qed_hsi.h"
27 
28 extern const struct qed_common_ops qed_common_ops_pass;
29 #define DRV_MODULE_VERSION "8.7.1.20"
30 
31 #define MAX_HWFNS_PER_DEVICE    (4)
32 #define NAME_SIZE 16
33 #define VER_SIZE 16
34 
35 #define QED_WFQ_UNIT	100
36 
37 /* cau states */
38 enum qed_coalescing_mode {
39 	QED_COAL_MODE_DISABLE,
40 	QED_COAL_MODE_ENABLE
41 };
42 
43 struct qed_eth_cb_ops;
44 struct qed_dev_info;
45 
46 /* helpers */
47 static inline u32 qed_db_addr(u32 cid, u32 DEMS)
48 {
49 	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
50 		      FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
51 
52 	return db_addr;
53 }
54 
55 #define ALIGNED_TYPE_SIZE(type_name, p_hwfn)				     \
56 	((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
57 	 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
58 
59 #define for_each_hwfn(cdev, i)  for (i = 0; i < cdev->num_hwfns; i++)
60 
61 #define D_TRINE(val, cond1, cond2, true1, true2, def) \
62 	(val == (cond1) ? true1 :		      \
63 	 (val == (cond2) ? true2 : def))
64 
65 /* forward */
66 struct qed_ptt_pool;
67 struct qed_spq;
68 struct qed_sb_info;
69 struct qed_sb_attn_info;
70 struct qed_cxt_mngr;
71 struct qed_sb_sp_info;
72 struct qed_mcp_info;
73 
74 struct qed_rt_data {
75 	u32	*init_val;
76 	bool	*b_valid;
77 };
78 
79 enum qed_tunn_mode {
80 	QED_MODE_L2GENEVE_TUNN,
81 	QED_MODE_IPGENEVE_TUNN,
82 	QED_MODE_L2GRE_TUNN,
83 	QED_MODE_IPGRE_TUNN,
84 	QED_MODE_VXLAN_TUNN,
85 };
86 
87 enum qed_tunn_clss {
88 	QED_TUNN_CLSS_MAC_VLAN,
89 	QED_TUNN_CLSS_MAC_VNI,
90 	QED_TUNN_CLSS_INNER_MAC_VLAN,
91 	QED_TUNN_CLSS_INNER_MAC_VNI,
92 	MAX_QED_TUNN_CLSS,
93 };
94 
95 struct qed_tunn_start_params {
96 	unsigned long	tunn_mode;
97 	u16		vxlan_udp_port;
98 	u16		geneve_udp_port;
99 	u8		update_vxlan_udp_port;
100 	u8		update_geneve_udp_port;
101 	u8		tunn_clss_vxlan;
102 	u8		tunn_clss_l2geneve;
103 	u8		tunn_clss_ipgeneve;
104 	u8		tunn_clss_l2gre;
105 	u8		tunn_clss_ipgre;
106 };
107 
108 struct qed_tunn_update_params {
109 	unsigned long	tunn_mode_update_mask;
110 	unsigned long	tunn_mode;
111 	u16		vxlan_udp_port;
112 	u16		geneve_udp_port;
113 	u8		update_rx_pf_clss;
114 	u8		update_tx_pf_clss;
115 	u8		update_vxlan_udp_port;
116 	u8		update_geneve_udp_port;
117 	u8		tunn_clss_vxlan;
118 	u8		tunn_clss_l2geneve;
119 	u8		tunn_clss_ipgeneve;
120 	u8		tunn_clss_l2gre;
121 	u8		tunn_clss_ipgre;
122 };
123 
124 /* The PCI personality is not quite synonymous to protocol ID:
125  * 1. All personalities need CORE connections
126  * 2. The Ethernet personality may support also the RoCE protocol
127  */
128 enum qed_pci_personality {
129 	QED_PCI_ETH,
130 	QED_PCI_DEFAULT /* default in shmem */
131 };
132 
133 /* All VFs are symmetric, all counters are PF + all VFs */
134 struct qed_qm_iids {
135 	u32 cids;
136 	u32 vf_cids;
137 	u32 tids;
138 };
139 
140 enum QED_RESOURCES {
141 	QED_SB,
142 	QED_L2_QUEUE,
143 	QED_VPORT,
144 	QED_RSS_ENG,
145 	QED_PQ,
146 	QED_RL,
147 	QED_MAC,
148 	QED_VLAN,
149 	QED_ILT,
150 	QED_MAX_RESC,
151 };
152 
153 enum QED_FEATURE {
154 	QED_PF_L2_QUE,
155 	QED_VF,
156 	QED_MAX_FEATURES,
157 };
158 
159 enum QED_PORT_MODE {
160 	QED_PORT_MODE_DE_2X40G,
161 	QED_PORT_MODE_DE_2X50G,
162 	QED_PORT_MODE_DE_1X100G,
163 	QED_PORT_MODE_DE_4X10G_F,
164 	QED_PORT_MODE_DE_4X10G_E,
165 	QED_PORT_MODE_DE_4X20G,
166 	QED_PORT_MODE_DE_1X40G,
167 	QED_PORT_MODE_DE_2X25G,
168 	QED_PORT_MODE_DE_1X25G
169 };
170 
171 enum qed_dev_cap {
172 	QED_DEV_CAP_ETH,
173 };
174 
175 struct qed_hw_info {
176 	/* PCI personality */
177 	enum qed_pci_personality	personality;
178 
179 	/* Resource Allocation scheme results */
180 	u32				resc_start[QED_MAX_RESC];
181 	u32				resc_num[QED_MAX_RESC];
182 	u32				feat_num[QED_MAX_FEATURES];
183 
184 #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
185 #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
186 #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
187 
188 	u8				num_tc;
189 	u8				offload_tc;
190 	u8				non_offload_tc;
191 
192 	u32				concrete_fid;
193 	u16				opaque_fid;
194 	u16				ovlan;
195 	u32				part_num[4];
196 
197 	unsigned char			hw_mac_addr[ETH_ALEN];
198 
199 	struct qed_igu_info		*p_igu_info;
200 
201 	u32				port_mode;
202 	u32				hw_mode;
203 	unsigned long		device_capabilities;
204 };
205 
206 struct qed_hw_cid_data {
207 	u32	cid;
208 	bool	b_cid_allocated;
209 
210 	/* Additional identifiers */
211 	u16	opaque_fid;
212 	u8	vport_id;
213 };
214 
215 /* maximun size of read/write commands (HW limit) */
216 #define DMAE_MAX_RW_SIZE        0x2000
217 
218 struct qed_dmae_info {
219 	/* Mutex for synchronizing access to functions */
220 	struct mutex	mutex;
221 
222 	u8		channel;
223 
224 	dma_addr_t	completion_word_phys_addr;
225 
226 	/* The memory location where the DMAE writes the completion
227 	 * value when an operation is finished on this context.
228 	 */
229 	u32		*p_completion_word;
230 
231 	dma_addr_t	intermediate_buffer_phys_addr;
232 
233 	/* An intermediate buffer for DMAE operations that use virtual
234 	 * addresses - data is DMA'd to/from this buffer and then
235 	 * memcpy'd to/from the virtual address
236 	 */
237 	u32		*p_intermediate_buffer;
238 
239 	dma_addr_t	dmae_cmd_phys_addr;
240 	struct dmae_cmd *p_dmae_cmd;
241 };
242 
243 struct qed_wfq_data {
244 	/* when feature is configured for at least 1 vport */
245 	u32	min_speed;
246 	bool	configured;
247 };
248 
249 struct qed_qm_info {
250 	struct init_qm_pq_params	*qm_pq_params;
251 	struct init_qm_vport_params	*qm_vport_params;
252 	struct init_qm_port_params	*qm_port_params;
253 	u16				start_pq;
254 	u8				start_vport;
255 	u8				pure_lb_pq;
256 	u8				offload_pq;
257 	u8				pure_ack_pq;
258 	u8				vf_queues_offset;
259 	u16				num_pqs;
260 	u16				num_vf_pqs;
261 	u8				num_vports;
262 	u8				max_phys_tcs_per_port;
263 	bool				pf_rl_en;
264 	bool				pf_wfq_en;
265 	bool				vport_rl_en;
266 	bool				vport_wfq_en;
267 	u8				pf_wfq;
268 	u32				pf_rl;
269 	struct qed_wfq_data		*wfq_data;
270 };
271 
272 struct storm_stats {
273 	u32     address;
274 	u32     len;
275 };
276 
277 struct qed_storm_stats {
278 	struct storm_stats mstats;
279 	struct storm_stats pstats;
280 	struct storm_stats tstats;
281 	struct storm_stats ustats;
282 };
283 
284 struct qed_fw_data {
285 	struct fw_ver_info	*fw_ver_info;
286 	const u8		*modes_tree_buf;
287 	union init_op		*init_ops;
288 	const u32		*arr_data;
289 	u32			init_ops_size;
290 };
291 
292 struct qed_simd_fp_handler {
293 	void	*token;
294 	void	(*func)(void *);
295 };
296 
297 struct qed_hwfn {
298 	struct qed_dev			*cdev;
299 	u8				my_id;          /* ID inside the PF */
300 #define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
301 	u8				rel_pf_id;      /* Relative to engine*/
302 	u8				abs_pf_id;
303 #define QED_PATH_ID(_p_hwfn)		((_p_hwfn)->abs_pf_id & 1)
304 	u8				port_id;
305 	bool				b_active;
306 
307 	u32				dp_module;
308 	u8				dp_level;
309 	char				name[NAME_SIZE];
310 
311 	bool				first_on_engine;
312 	bool				hw_init_done;
313 
314 	u8				num_funcs_on_engine;
315 
316 	/* BAR access */
317 	void __iomem			*regview;
318 	void __iomem			*doorbells;
319 	u64				db_phys_addr;
320 	unsigned long			db_size;
321 
322 	/* PTT pool */
323 	struct qed_ptt_pool		*p_ptt_pool;
324 
325 	/* HW info */
326 	struct qed_hw_info		hw_info;
327 
328 	/* rt_array (for init-tool) */
329 	struct qed_rt_data		rt_data;
330 
331 	/* SPQ */
332 	struct qed_spq			*p_spq;
333 
334 	/* EQ */
335 	struct qed_eq			*p_eq;
336 
337 	/* Consolidate Q*/
338 	struct qed_consq		*p_consq;
339 
340 	/* Slow-Path definitions */
341 	struct tasklet_struct		*sp_dpc;
342 	bool				b_sp_dpc_enabled;
343 
344 	struct qed_ptt			*p_main_ptt;
345 	struct qed_ptt			*p_dpc_ptt;
346 
347 	struct qed_sb_sp_info		*p_sp_sb;
348 	struct qed_sb_attn_info		*p_sb_attn;
349 
350 	/* Protocol related */
351 	struct qed_pf_params		pf_params;
352 
353 	/* Array of sb_info of all status blocks */
354 	struct qed_sb_info		*sbs_info[MAX_SB_PER_PF_MIMD];
355 	u16				num_sbs;
356 
357 	struct qed_cxt_mngr		*p_cxt_mngr;
358 
359 	/* Flag indicating whether interrupts are enabled or not*/
360 	bool				b_int_enabled;
361 	bool				b_int_requested;
362 
363 	/* True if the driver requests for the link */
364 	bool				b_drv_link_init;
365 
366 	struct qed_vf_iov		*vf_iov_info;
367 	struct qed_pf_iov		*pf_iov_info;
368 	struct qed_mcp_info		*mcp_info;
369 
370 	struct qed_dcbx_info		*p_dcbx_info;
371 
372 	struct qed_hw_cid_data		*p_tx_cids;
373 	struct qed_hw_cid_data		*p_rx_cids;
374 
375 	struct qed_dmae_info		dmae_info;
376 
377 	/* QM init */
378 	struct qed_qm_info		qm_info;
379 	struct qed_storm_stats		storm_stats;
380 
381 	/* Buffer for unzipping firmware data */
382 	void				*unzip_buf;
383 
384 	struct qed_simd_fp_handler	simd_proto_handler[64];
385 
386 #ifdef CONFIG_QED_SRIOV
387 	struct workqueue_struct *iov_wq;
388 	struct delayed_work iov_task;
389 	unsigned long iov_task_flags;
390 #endif
391 
392 	struct z_stream_s		*stream;
393 };
394 
395 struct pci_params {
396 	int		pm_cap;
397 
398 	unsigned long	mem_start;
399 	unsigned long	mem_end;
400 	unsigned int	irq;
401 	u8		pf_num;
402 };
403 
404 struct qed_int_param {
405 	u32	int_mode;
406 	u8	num_vectors;
407 	u8	min_msix_cnt; /* for minimal functionality */
408 };
409 
410 struct qed_int_params {
411 	struct qed_int_param	in;
412 	struct qed_int_param	out;
413 	struct msix_entry	*msix_table;
414 	bool			fp_initialized;
415 	u8			fp_msix_base;
416 	u8			fp_msix_cnt;
417 };
418 
419 struct qed_dev {
420 	u32	dp_module;
421 	u8	dp_level;
422 	char	name[NAME_SIZE];
423 
424 	u8	type;
425 #define QED_DEV_TYPE_BB (0 << 0)
426 #define QED_DEV_TYPE_AH BIT(0)
427 /* Translate type/revision combo into the proper conditions */
428 #define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
429 #define QED_IS_BB_A0(dev)       (QED_IS_BB(dev) && \
430 				 CHIP_REV_IS_A0(dev))
431 #define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && \
432 				 CHIP_REV_IS_B0(dev))
433 
434 #define QED_GET_TYPE(dev)       (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
435 				 QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
436 
437 	u16	vendor_id;
438 	u16	device_id;
439 
440 	u16	chip_num;
441 #define CHIP_NUM_MASK                   0xffff
442 #define CHIP_NUM_SHIFT                  16
443 
444 	u16	chip_rev;
445 #define CHIP_REV_MASK                   0xf
446 #define CHIP_REV_SHIFT                  12
447 #define CHIP_REV_IS_A0(_cdev)   (!(_cdev)->chip_rev)
448 #define CHIP_REV_IS_B0(_cdev)   ((_cdev)->chip_rev == 1)
449 
450 	u16				chip_metal;
451 #define CHIP_METAL_MASK                 0xff
452 #define CHIP_METAL_SHIFT                4
453 
454 	u16				chip_bond_id;
455 #define CHIP_BOND_ID_MASK               0xf
456 #define CHIP_BOND_ID_SHIFT              0
457 
458 	u8				num_engines;
459 	u8				num_ports_in_engines;
460 	u8				num_funcs_in_port;
461 
462 	u8				path_id;
463 	enum qed_mf_mode		mf_mode;
464 #define IS_MF_DEFAULT(_p_hwfn)  (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
465 #define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
466 #define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
467 
468 	int				pcie_width;
469 	int				pcie_speed;
470 	u8				ver_str[VER_SIZE];
471 
472 	/* Add MF related configuration */
473 	u8				mcp_rev;
474 	u8				boot_mode;
475 
476 	u8				wol;
477 
478 	u32				int_mode;
479 	enum qed_coalescing_mode	int_coalescing_mode;
480 	u8				rx_coalesce_usecs;
481 	u8				tx_coalesce_usecs;
482 
483 	/* Start Bar offset of first hwfn */
484 	void __iomem			*regview;
485 	void __iomem			*doorbells;
486 	u64				db_phys_addr;
487 	unsigned long			db_size;
488 
489 	/* PCI */
490 	u8				cache_shift;
491 
492 	/* Init */
493 	const struct iro		*iro_arr;
494 #define IRO (p_hwfn->cdev->iro_arr)
495 
496 	/* HW functions */
497 	u8				num_hwfns;
498 	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];
499 
500 	/* SRIOV */
501 	struct qed_hw_sriov_info *p_iov_info;
502 #define IS_QED_SRIOV(cdev)              (!!(cdev)->p_iov_info)
503 
504 	unsigned long			tunn_mode;
505 
506 	bool				b_is_vf;
507 	u32				drv_type;
508 
509 	struct qed_eth_stats		*reset_stats;
510 	struct qed_fw_data		*fw_data;
511 
512 	u32				mcp_nvm_resp;
513 
514 	/* Linux specific here */
515 	struct  qede_dev		*edev;
516 	struct  pci_dev			*pdev;
517 	int				msg_enable;
518 
519 	struct pci_params		pci_params;
520 
521 	struct qed_int_params		int_params;
522 
523 	u8				protocol;
524 #define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
525 
526 	/* Callbacks to protocol driver */
527 	union {
528 		struct qed_common_cb_ops	*common;
529 		struct qed_eth_cb_ops		*eth;
530 	} protocol_ops;
531 	void				*ops_cookie;
532 
533 	const struct firmware		*firmware;
534 };
535 
536 #define NUM_OF_VFS(dev)         MAX_NUM_VFS_BB
537 #define NUM_OF_L2_QUEUES(dev)	MAX_NUM_L2_QUEUES_BB
538 #define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
539 #define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
540 
541 /**
542  * @brief qed_concrete_to_sw_fid - get the sw function id from
543  *        the concrete value.
544  *
545  * @param concrete_fid
546  *
547  * @return inline u8
548  */
549 static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
550 					u32 concrete_fid)
551 {
552 	u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
553 
554 	return pfid;
555 }
556 
557 #define PURE_LB_TC 8
558 
559 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
560 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
561 
562 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
563 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
564 
565 /* Other Linux specific common definitions */
566 #define DP_NAME(cdev) ((cdev)->name)
567 
568 #define REG_ADDR(cdev, offset)          (void __iomem *)((u8 __iomem *)\
569 						(cdev->regview) + \
570 							 (offset))
571 
572 #define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
573 #define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
574 #define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))
575 
576 #define DOORBELL(cdev, db_addr, val)			 \
577 	writel((u32)val, (void __iomem *)((u8 __iomem *)\
578 					  (cdev->doorbells) + (db_addr)))
579 
580 /* Prototypes */
581 int qed_fill_dev_info(struct qed_dev *cdev,
582 		      struct qed_dev_info *dev_info);
583 void qed_link_update(struct qed_hwfn *hwfn);
584 u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
585 		   u32 input_len, u8 *input_buf,
586 		   u32 max_size, u8 *unzip_buf);
587 
588 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
589 
590 #endif /* _QED_H */
591