xref: /linux/drivers/net/ethernet/qlogic/qed/qed_sriov.h (revision 16018c0d27eda6a7f69dafa750d23770fb46b00f)
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #ifndef _QED_SRIOV_H
8 #define _QED_SRIOV_H
9 #include <linux/types.h>
10 #include "qed_vf.h"
11 
12 #define QED_ETH_VF_NUM_MAC_FILTERS 1
13 #define QED_ETH_VF_NUM_VLAN_FILTERS 2
14 #define QED_VF_ARRAY_LENGTH (3)
15 
16 #ifdef CONFIG_QED_SRIOV
17 #define IS_VF(cdev)             ((cdev)->b_is_vf)
18 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
19 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
20 #else
21 #define IS_VF(cdev)             (0)
22 #define IS_PF(cdev)             (1)
23 #define IS_PF_SRIOV(p_hwfn)     (0)
24 #endif
25 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
26 
27 #define QED_MAX_VF_CHAINS_PER_PF 16
28 
29 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS	\
30 	(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
31 
32 enum qed_iov_vport_update_flag {
33 	QED_IOV_VP_UPDATE_ACTIVATE,
34 	QED_IOV_VP_UPDATE_VLAN_STRIP,
35 	QED_IOV_VP_UPDATE_TX_SWITCH,
36 	QED_IOV_VP_UPDATE_MCAST,
37 	QED_IOV_VP_UPDATE_ACCEPT_PARAM,
38 	QED_IOV_VP_UPDATE_RSS,
39 	QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
40 	QED_IOV_VP_UPDATE_SGE_TPA,
41 	QED_IOV_VP_UPDATE_MAX,
42 };
43 
44 struct qed_public_vf_info {
45 	/* These copies will later be reflected in the bulletin board,
46 	 * but this copy should be newer.
47 	 */
48 	u8 forced_mac[ETH_ALEN];
49 	u16 forced_vlan;
50 	u8 mac[ETH_ALEN];
51 
52 	/* IFLA_VF_LINK_STATE_<X> */
53 	int link_state;
54 
55 	/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
56 	int tx_rate;
57 
58 	/* Trusted VFs can configure promiscuous mode.
59 	 * Also store shadow promisc configuration if needed.
60 	 */
61 	bool is_trusted_configured;
62 	bool is_trusted_request;
63 	u8 rx_accept_mode;
64 	u8 tx_accept_mode;
65 };
66 
67 struct qed_iov_vf_init_params {
68 	u16 rel_vf_id;
69 
70 	/* Number of requested Queues; Currently, don't support different
71 	 * number of Rx/Tx queues.
72 	 */
73 
74 	u16 num_queues;
75 
76 	/* Allow the client to choose which qzones to use for Rx/Tx,
77 	 * and which queue_base to use for Tx queues on a per-queue basis.
78 	 * Notice values should be relative to the PF resources.
79 	 */
80 	u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
81 	u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
82 };
83 
84 /* This struct is part of qed_dev and contains data relevant to all hwfns;
85  * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
86  */
87 struct qed_hw_sriov_info {
88 	int pos;		/* capability position */
89 	int nres;		/* number of resources */
90 	u32 cap;		/* SR-IOV Capabilities */
91 	u16 ctrl;		/* SR-IOV Control */
92 	u16 total_vfs;		/* total VFs associated with the PF */
93 	u16 num_vfs;		/* number of vfs that have been started */
94 	u16 initial_vfs;	/* initial VFs associated with the PF */
95 	u16 nr_virtfn;		/* number of VFs available */
96 	u16 offset;		/* first VF Routing ID offset */
97 	u16 stride;		/* following VF stride */
98 	u16 vf_device_id;	/* VF device id */
99 	u32 pgsz;		/* page size for BAR alignment */
100 	u8 link;		/* Function Dependency Link */
101 
102 	u32 first_vf_in_pf;
103 };
104 
105 /* This mailbox is maintained per VF in its PF contains all information
106  * required for sending / receiving a message.
107  */
108 struct qed_iov_vf_mbx {
109 	union vfpf_tlvs *req_virt;
110 	dma_addr_t req_phys;
111 	union pfvf_tlvs *reply_virt;
112 	dma_addr_t reply_phys;
113 
114 	/* Address in VF where a pending message is located */
115 	dma_addr_t pending_req;
116 
117 	/* Message from VF awaits handling */
118 	bool b_pending_msg;
119 
120 	u8 *offset;
121 
122 	/* saved VF request header */
123 	struct vfpf_first_tlv first_tlv;
124 };
125 
126 #define QED_IOV_LEGACY_QID_RX (0)
127 #define QED_IOV_LEGACY_QID_TX (1)
128 #define QED_IOV_QID_INVALID (0xFE)
129 
130 struct qed_vf_queue_cid {
131 	bool b_is_tx;
132 	struct qed_queue_cid *p_cid;
133 };
134 
135 /* Describes a qzone associated with the VF */
136 struct qed_vf_queue {
137 	u16 fw_rx_qid;
138 	u16 fw_tx_qid;
139 
140 	struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
141 };
142 
143 enum vf_state {
144 	VF_FREE = 0,		/* VF ready to be acquired holds no resc */
145 	VF_ACQUIRED,		/* VF, acquired, but not initialized */
146 	VF_ENABLED,		/* VF, Enabled */
147 	VF_RESET,		/* VF, FLR'd, pending cleanup */
148 	VF_STOPPED		/* VF, Stopped */
149 };
150 
151 struct qed_vf_vlan_shadow {
152 	bool used;
153 	u16 vid;
154 };
155 
156 struct qed_vf_shadow_config {
157 	/* Shadow copy of all guest vlans */
158 	struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
159 
160 	/* Shadow copy of all configured MACs; Empty if forcing MACs */
161 	u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
162 	u8 inner_vlan_removal;
163 };
164 
165 /* PFs maintain an array of this structure, per VF */
166 struct qed_vf_info {
167 	struct qed_iov_vf_mbx vf_mbx;
168 	enum vf_state state;
169 	bool b_init;
170 	bool b_malicious;
171 	u8 to_disable;
172 
173 	struct qed_bulletin bulletin;
174 	dma_addr_t vf_bulletin;
175 
176 	/* PF saves a copy of the last VF acquire message */
177 	struct vfpf_acquire_tlv acquire;
178 
179 	u32 concrete_fid;
180 	u16 opaque_fid;
181 	u16 mtu;
182 
183 	u8 vport_id;
184 	u8 relative_vf_id;
185 	u8 abs_vf_id;
186 #define QED_VF_ABS_ID(p_hwfn, p_vf)	(QED_PATH_ID(p_hwfn) ?		      \
187 					 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
188 					 (p_vf)->abs_vf_id)
189 
190 	u8 vport_instance;
191 	u8 num_rxqs;
192 	u8 num_txqs;
193 
194 	u16 rx_coal;
195 	u16 tx_coal;
196 
197 	u8 num_sbs;
198 
199 	u8 num_mac_filters;
200 	u8 num_vlan_filters;
201 
202 	struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
203 	u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
204 	u8 num_active_rxqs;
205 	struct qed_public_vf_info p_vf_info;
206 	bool spoof_chk;
207 	bool req_spoofchk_val;
208 
209 	/* Stores the configuration requested by VF */
210 	struct qed_vf_shadow_config shadow_config;
211 
212 	/* A bitfield using bulletin's valid-map bits, used to indicate
213 	 * which of the bulletin board features have been configured.
214 	 */
215 	u64 configured_features;
216 #define QED_IOV_CONFIGURED_FEATURES_MASK        ((1 << MAC_ADDR_FORCED) | \
217 						 (1 << VLAN_ADDR_FORCED))
218 };
219 
220 /* This structure is part of qed_hwfn and used only for PFs that have sriov
221  * capability enabled.
222  */
223 struct qed_pf_iov {
224 	struct qed_vf_info vfs_array[MAX_NUM_VFS];
225 	u64 pending_flr[QED_VF_ARRAY_LENGTH];
226 
227 	/* Allocate message address continuosuly and split to each VF */
228 	void *mbx_msg_virt_addr;
229 	dma_addr_t mbx_msg_phys_addr;
230 	u32 mbx_msg_size;
231 	void *mbx_reply_virt_addr;
232 	dma_addr_t mbx_reply_phys_addr;
233 	u32 mbx_reply_size;
234 	void *p_bulletins;
235 	dma_addr_t bulletins_phys;
236 	u32 bulletins_size;
237 };
238 
239 enum qed_iov_wq_flag {
240 	QED_IOV_WQ_MSG_FLAG,
241 	QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
242 	QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
243 	QED_IOV_WQ_STOP_WQ_FLAG,
244 	QED_IOV_WQ_FLR_FLAG,
245 	QED_IOV_WQ_TRUST_FLAG,
246 	QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
247 };
248 
249 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
250 
251 #ifdef CONFIG_QED_SRIOV
252 /**
253  * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
254  *                          w.r.t. @b_enabled_only value
255  *                          if b_enabled_only = true - only enabled
256  *                          VF id is valid.
257  *                          else any VF id less than max_vfs is valid.
258  *
259  * @p_hwfn: HW device data.
260  * @rel_vf_id: Relative VF ID.
261  * @b_enabled_only: consider only enabled VF.
262  * @b_non_malicious: true iff we want to validate vf isn't malicious.
263  *
264  * Return: bool - true for valid VF ID
265  */
266 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
267 			   int rel_vf_id,
268 			   bool b_enabled_only, bool b_non_malicious);
269 
270 /**
271  * qed_iov_get_next_active_vf(): Given a VF index, return index of
272  *                               next [including that] active VF.
273  *
274  * @p_hwfn: HW device data.
275  * @rel_vf_id: VF ID.
276  *
277  * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
278  */
279 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
280 
281 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
282 				    int vfid, u16 vxlan_port, u16 geneve_port);
283 
284 /**
285  * qed_iov_hw_info(): Read sriov related information and allocated resources
286  *                    reads from configuration space, shmem, etc.
287  *
288  * @p_hwfn: HW device data.
289  *
290  * Return: Int.
291  */
292 int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
293 
294 /**
295  * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
296  *
297  * @p_hwfn: HW device data.
298  * @offset: offset.
299  * @type: Type
300  * @length: Length.
301  *
302  * Return: pointer to the newly placed tlv
303  */
304 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
305 
306 /**
307  * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
308  *
309  * @p_hwfn: HW device data.
310  * @tlvs_list: Tlvs_list.
311  *
312  * Return: Void.
313  */
314 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
315 
316 /**
317  * qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
318  *
319  * @p_hwfn: HW device data.
320  * @p_data: Pointer to data.
321  *
322  * Return: Void.
323  */
324 void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
325 			      struct fw_err_data *p_data);
326 
327 /**
328  * qed_sriov_eqe_event(): Callback for SRIOV events.
329  *
330  * @p_hwfn: HW device data.
331  * @opcode: Opcode.
332  * @echo: Echo.
333  * @data: data
334  * @fw_return_code: FW return code.
335  *
336  * Return: Int.
337  */
338 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
339 			union event_ring_data *data, u8  fw_return_code);
340 
341 /**
342  * qed_iov_alloc(): allocate sriov related resources
343  *
344  * @p_hwfn: HW device data.
345  *
346  * Return: Int.
347  */
348 int qed_iov_alloc(struct qed_hwfn *p_hwfn);
349 
350 /**
351  * qed_iov_setup(): setup sriov related resources
352  *
353  * @p_hwfn: HW device data.
354  *
355  * Return: Void.
356  */
357 void qed_iov_setup(struct qed_hwfn *p_hwfn);
358 
359 /**
360  * qed_iov_free(): free sriov related resources
361  *
362  * @p_hwfn: HW device data.
363  *
364  * Return: Void.
365  */
366 void qed_iov_free(struct qed_hwfn *p_hwfn);
367 
368 /**
369  * qed_iov_free_hw_info(): free sriov related memory that was
370  *                          allocated during hw_prepare
371  *
372  * @cdev: Qed dev pointer.
373  *
374  * Return: Void.
375  */
376 void qed_iov_free_hw_info(struct qed_dev *cdev);
377 
378 /**
379  * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
380  *
381  * @p_hwfn: HW device data.
382  * @disabled_vfs: bitmask of all VFs on path that were FLRed
383  *
384  * Return: true iff one of the PF's vfs got FLRed. false otherwise.
385  */
386 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
387 
388 /**
389  * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
390  *
391  * @p_hwfn: HW device data.
392  * @p_tlvs_list: Pointer to tlvs list
393  * @req_type: Type of TLV
394  *
395  * Return: pointer to tlv type if found, otherwise returns NULL.
396  */
397 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
398 			       void *p_tlvs_list, u16 req_type);
399 
400 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
401 int qed_iov_wq_start(struct qed_dev *cdev);
402 
403 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
404 void qed_vf_start_iov_wq(struct qed_dev *cdev);
405 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
406 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
407 #else
408 static inline bool
409 qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
410 		      int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
411 {
412 	return false;
413 }
414 
415 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
416 					     u16 rel_vf_id)
417 {
418 	return MAX_NUM_VFS;
419 }
420 
421 static inline void
422 qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
423 			       u16 vxlan_port, u16 geneve_port)
424 {
425 }
426 
427 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
428 {
429 	return 0;
430 }
431 
432 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
433 {
434 	return 0;
435 }
436 
437 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
438 {
439 }
440 
441 static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
442 {
443 }
444 
445 static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
446 {
447 }
448 
449 static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
450 				       u32 *disabled_vfs)
451 {
452 	return false;
453 }
454 
455 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
456 {
457 }
458 
459 static inline int qed_iov_wq_start(struct qed_dev *cdev)
460 {
461 	return 0;
462 }
463 
464 static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
465 				    enum qed_iov_wq_flag flag)
466 {
467 }
468 
469 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
470 {
471 }
472 
473 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
474 {
475 	return 0;
476 }
477 
478 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
479 {
480 }
481 
482 static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
483 					    struct fw_err_data *p_data)
484 {
485 }
486 
487 static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
488 				      __le16 echo, union event_ring_data *data,
489 				      u8  fw_return_code)
490 {
491 	return 0;
492 }
493 #endif
494 
495 #define qed_for_each_vf(_p_hwfn, _i)			  \
496 	for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
497 	     _i < MAX_NUM_VFS;				  \
498 	     _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
499 
500 #endif
501