xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_iov_api.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #ifndef __ECORE_SRIOV_API_H__
30 #define __ECORE_SRIOV_API_H__
31 
32 #include "common_hsi.h"
33 #include "ecore_status.h"
34 
35 #define ECORE_ETH_VF_NUM_MAC_FILTERS 1
36 #define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
37 #define ECORE_VF_ARRAY_LENGTH (3)
38 
39 #define IS_VF(p_dev)		((p_dev)->b_is_vf)
40 #define IS_PF(p_dev)		(!((p_dev)->b_is_vf))
41 #ifdef CONFIG_ECORE_SRIOV
42 #define IS_PF_SRIOV(p_hwfn)	(!!((p_hwfn)->p_dev->p_iov_info))
43 #else
44 #define IS_PF_SRIOV(p_hwfn)	(0)
45 #endif
46 #define IS_PF_SRIOV_ALLOC(p_hwfn)	(!!((p_hwfn)->pf_iov_info))
47 #define IS_PF_PDA(p_hwfn)	0 /* @@TBD Michalk */
48 
49 /* @@@ TBD MichalK - what should this number be*/
50 #define ECORE_MAX_VF_CHAINS_PER_PF 16
51 
52 /* vport update extended feature tlvs flags */
53 enum ecore_iov_vport_update_flag {
54 	ECORE_IOV_VP_UPDATE_ACTIVATE		= 0,
55 	ECORE_IOV_VP_UPDATE_VLAN_STRIP		= 1,
56 	ECORE_IOV_VP_UPDATE_TX_SWITCH		= 2,
57 	ECORE_IOV_VP_UPDATE_MCAST		= 3,
58 	ECORE_IOV_VP_UPDATE_ACCEPT_PARAM	= 4,
59 	ECORE_IOV_VP_UPDATE_RSS			= 5,
60 	ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN	= 6,
61 	ECORE_IOV_VP_UPDATE_SGE_TPA		= 7,
62 	ECORE_IOV_VP_UPDATE_MAX			= 8,
63 };
64 
65 /*PF to VF STATUS is part of vfpf-channel API
66 and must be forward compatible */
67 enum ecore_iov_pf_to_vf_status {
68         PFVF_STATUS_WAITING = 0,
69         PFVF_STATUS_SUCCESS,
70         PFVF_STATUS_FAILURE,
71         PFVF_STATUS_NOT_SUPPORTED,
72         PFVF_STATUS_NO_RESOURCE,
73         PFVF_STATUS_FORCED,
74 	PFVF_STATUS_MALICIOUS,
75 };
76 
77 struct ecore_mcp_link_params;
78 struct ecore_mcp_link_state;
79 struct ecore_mcp_link_capabilities;
80 
81 /* These defines are used by the hw-channel; should never change order */
82 #define VFPF_ACQUIRE_OS_LINUX (0)
83 #define VFPF_ACQUIRE_OS_WINDOWS (1)
84 #define VFPF_ACQUIRE_OS_ESX (2)
85 #define VFPF_ACQUIRE_OS_SOLARIS (3)
86 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
87 #define VFPF_ACQUIRE_OS_FREEBSD (5)
88 
89 struct ecore_vf_acquire_sw_info {
90 	u32 driver_version;
91 	u8 os_type;
92 };
93 
94 struct ecore_public_vf_info {
95 	/* These copies will later be reflected in the bulletin board,
96 	 * but this copy should be newer.
97 	 */
98 	u8 forced_mac[ETH_ALEN];
99 	u16 forced_vlan;
100 };
101 
102 struct ecore_iov_vf_init_params {
103 	u16 rel_vf_id;
104 
105 	/* Number of requested Queues; Currently, don't support different
106 	 * number of Rx/Tx queues.
107 	 */
108 	/* TODO - remove this limitation */
109 	u16 num_queues;
110 
111 	/* Allow the client to choose which qzones to use for Rx/Tx,
112 	 * and which queue_base to use for Tx queues on a per-queue basis.
113 	 * Notice values should be relative to the PF resources.
114 	 */
115 	u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
116 	u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
117 
118 	u8 vport_id;
119 
120 	/* Should be set in case RSS is going to be used for VF */
121 	u8 rss_eng_id;
122 };
123 
124 #ifdef CONFIG_ECORE_SW_CHANNEL
125 /* This is SW channel related only... */
126 enum mbx_state {
127 	VF_PF_UNKNOWN_STATE			= 0,
128 	VF_PF_WAIT_FOR_START_REQUEST		= 1,
129 	VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST	= 2,
130 	VF_PF_REQUEST_IN_PROCESSING		= 3,
131 	VF_PF_RESPONSE_READY			= 4,
132 };
133 
134 struct ecore_iov_sw_mbx {
135 	enum mbx_state		mbx_state;
136 
137 	u32			request_size;
138 	u32			request_offset;
139 
140 	u32			response_size;
141 	u32			response_offset;
142 };
143 
144 /**
145  * @brief Get the vf sw mailbox params
146  *
147  * @param p_hwfn
148  * @param rel_vf_id
149  *
150  * @return struct ecore_iov_sw_mbx*
151  */
152 struct ecore_iov_sw_mbx*
153 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
154 			u16 rel_vf_id);
155 #endif
156 
157 /* This struct is part of ecore_dev and contains data relevant to all hwfns;
158  * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
159  */
160 struct ecore_hw_sriov_info {
161 	/* standard SRIOV capability fields, mostly for debugging */
162 	int	pos;		/* capability position */
163 	int	nres;		/* number of resources */
164 	u32	cap;		/* SR-IOV Capabilities */
165 	u16	ctrl;		/* SR-IOV Control */
166 	u16	total_vfs;	/* total VFs associated with the PF */
167 	u16	num_vfs;        /* number of vfs that have been started */
168 	u16	initial_vfs;    /* initial VFs associated with the PF */
169 	u16	nr_virtfn;	/* number of VFs available */
170 	u16	offset;		/* first VF Routing ID offset */
171 	u16	stride;		/* following VF stride */
172 	u16	vf_device_id;	/* VF device id */
173 	u32	pgsz;		/* page size for BAR alignment */
174 	u8	link;		/* Function Dependency Link */
175 
176 	u32	first_vf_in_pf;
177 };
178 
179 #ifdef CONFIG_ECORE_SRIOV
180 #ifndef LINUX_REMOVE
181 /**
182  * @brief mark/clear all VFs before/after an incoming PCIe sriov
183  *        disable.
184  *
185  * @param p_dev
186  * @param to_disable
187  */
188 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
189 				  u8 to_disable);
190 
191 /**
192  * @brief mark/clear chosen VF before/after an incoming PCIe
193  *        sriov disable.
194  *
195  * @param p_dev
196  * @param rel_vf_id
197  * @param to_disable
198  */
199 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
200 				 u16 rel_vf_id,
201 				 u8 to_disable);
202 
203 /**
204  * @brief ecore_iov_init_hw_for_vf - initialize the HW for
205  *        enabling access of a VF. Also includes preparing the
206  *        IGU for VF access. This needs to be called AFTER hw is
207  *        initialized and BEFORE VF is loaded inside the VM.
208  *
209  * @param p_hwfn
210  * @param p_ptt
211  * @param p_params
212  *
213  * @return enum _ecore_status_t
214  */
215 enum _ecore_status_t
216 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
217 			 struct ecore_ptt *p_ptt,
218 			 struct ecore_iov_vf_init_params *p_params);
219 
220 /**
221  * @brief ecore_iov_process_mbx_req - process a request received
222  *        from the VF
223  *
224  * @param p_hwfn
225  * @param p_ptt
226  * @param vfid
227  */
228 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
229 			       struct ecore_ptt *p_ptt,
230 			       int vfid);
231 
232 /**
233  * @brief ecore_iov_release_hw_for_vf - called once upper layer
234  *        knows VF is done with - can release any resources
235  *        allocated for VF at this point. this must be done once
236  *        we know VF is no longer loaded in VM.
237  *
238  * @param p_hwfn
239  * @param p_ptt
240  * @param rel_vf_id
241  *
242  * @return enum _ecore_status_t
243  */
244 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
245 						 struct ecore_ptt *p_ptt,
246 						 u16 rel_vf_id);
247 
248 /**
249  * @brief ecore_iov_set_vf_ctx - set a context for a given VF
250  *
251  * @param p_hwfn
252  * @param vf_id
253  * @param ctx
254  *
255  * @return enum _ecore_status_t
256  */
257 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
258 					  u16 vf_id,
259 					  void *ctx);
260 
261 /**
262  * @brief FLR cleanup for all VFs
263  *
264  * @param p_hwfn
265  * @param p_ptt
266  *
267  * @return enum _ecore_status_t
268  */
269 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
270 					      struct ecore_ptt *p_ptt);
271 
272 /**
273  * @brief FLR cleanup for single VF
274  *
275  * @param p_hwfn
276  * @param p_ptt
277  * @param rel_vf_id
278  *
279  * @return enum _ecore_status_t
280  */
281 enum _ecore_status_t
282 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
283 				struct ecore_ptt *p_ptt,
284 				u16 rel_vf_id);
285 
286 /**
287  * @brief Update the bulletin with link information. Notice this does NOT
288  *        send a bulletin update, only updates the PF's bulletin.
289  *
290  * @param p_hwfn
291  * @param p_vf
292  * @param params - the link params to use for the VF link configuration
293  * @param link - the link output to use for the VF link configuration
294  * @param p_caps - the link default capabilities.
295  */
296 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
297 			u16 vfid,
298 			struct ecore_mcp_link_params *params,
299 			struct ecore_mcp_link_state *link,
300 			struct ecore_mcp_link_capabilities *p_caps);
301 
302 /**
303  * @brief Returns link information as perceived by VF.
304  *
305  * @param p_hwfn
306  * @param p_vf
307  * @param p_params - the link params visible to vf.
308  * @param p_link - the link state visible to vf.
309  * @param p_caps - the link default capabilities visible to vf.
310  */
311 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
312 			u16 vfid,
313 			struct ecore_mcp_link_params *params,
314 			struct ecore_mcp_link_state *link,
315 			struct ecore_mcp_link_capabilities *p_caps);
316 
317 /**
318  * @brief return if the VF is pending FLR
319  *
320  * @param p_hwfn
321  * @param rel_vf_id
322  *
323  * @return bool
324  */
325 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
326 				 u16 rel_vf_id);
327 #endif
328 
329 /**
330  * @brief Check if given VF ID @vfid is valid
331  *        w.r.t. @b_enabled_only value
332  *        if b_enabled_only = true - only enabled VF id is valid
333  *        else any VF id less than max_vfs is valid
334  *
335  * @param p_hwfn
336  * @param rel_vf_id - Relative VF ID
337  * @param b_enabled_only - consider only enabled VF
338  * @param b_non_malicious - true iff we want to validate vf isn't malicious.
339  *
340  * @return bool - true for valid VF ID
341  */
342 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
343 			     int rel_vf_id,
344 			     bool b_enabled_only, bool b_non_malicious);
345 
346 #ifndef LINUX_REMOVE
347 /**
348  * @brief Get VF's public info structure
349  *
350  * @param p_hwfn
351  * @param vfid - Relative VF ID
352  * @param b_enabled_only - false if want to access even if vf is disabled
353  *
354  * @return struct ecore_public_vf_info *
355  */
356 struct ecore_public_vf_info*
357 ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
358 			     u16 vfid, bool b_enabled_only);
359 
360 /**
361  * @brief fills a bitmask of all VFs which have pending unhandled
362  *        messages.
363  *
364  * @param p_hwfn
365  */
366 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
367 				     u64 *events);
368 
369 /**
370  * @brief Copy VF's message to PF's buffer
371  *
372  * @param p_hwfn
373  * @param ptt
374  * @param vfid
375  *
376  * @return enum _ecore_status_t
377  */
378 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
379 					   struct ecore_ptt *ptt,
380 					   int vfid);
381 /**
382  * @brief Set forced MAC address in PFs copy of bulletin board
383  *        and configures FW/HW to support the configuration.
384  *
385  * @param p_hwfn
386  * @param mac
387  * @param vfid
388  */
389 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
390 				       u8 *mac, int vfid);
391 
392 /**
393  * @brief Set MAC address in PFs copy of bulletin board without
394  *        configuring FW/HW.
395  *
396  * @param p_hwfn
397  * @param mac
398  * @param vfid
399  */
400 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
401 						u8 *mac, int vfid);
402 
403 /**
404  * @brief Set default behaviour of VF in case no vlans are configured for it
405  *        whether to accept only untagged traffic or all.
406  *        Must be called prior to the VF vport-start.
407  *
408  * @param p_hwfn
409  * @param b_untagged_only
410  * @param vfid
411  *
412  * @return ECORE_SUCCESS if configuration would stick.
413  */
414 enum _ecore_status_t
415 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
416 					       bool b_untagged_only,
417 					       int vfid);
418 
419 /**
420  * @brief Get VFs opaque fid.
421  *
422  * @param p_hwfn
423  * @param vfid
424  * @param opaque_fid
425  */
426 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
427 				  u16 *opaque_fid);
428 
429 /**
430  * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
431  *        and configures FW/HW to support the configuration.
432  *        Setting of pvid 0 would clear the feature.
433  * @param p_hwfn
434  * @param pvid
435  * @param vfid
436  */
437 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
438 					u16 pvid, int vfid);
439 
440 /**
441  * @brief Check if VF has VPORT instance. This can be used
442  *	  to check if VPORT is active.
443  *
444  * @param p_hwfn
445  */
446 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
447 
448 /**
449  * @brief PF posts the bulletin to the VF
450  *
451  * @param p_hwfn
452  * @param p_vf
453  * @param p_ptt
454  *
455  * @return enum _ecore_status_t
456  */
457 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
458 						int vfid,
459 						struct ecore_ptt *p_ptt);
460 
461 /**
462  * @brief Check if given VF (@vfid) is marked as stopped
463  *
464  * @param p_hwfn
465  * @param vfid
466  *
467  * @return bool : true if stopped
468  */
469 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
470 
471 /**
472  * @brief Configure VF anti spoofing
473  *
474  * @param p_hwfn
475  * @param vfid
476  * @param val - spoofchk value - true/false
477  *
478  * @return enum _ecore_status_t
479  */
480 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
481 					    int vfid, bool val);
482 
483 /**
484  * @brief Get VF's configured spoof value.
485  *
486  * @param p_hwfn
487  * @param vfid
488  *
489  * @return bool - spoofchk value - true/false
490  */
491 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
492 
493 /**
494  * @brief Check for SRIOV sanity by PF.
495  *
496  * @param p_hwfn
497  * @param vfid
498  *
499  * @return bool - true if sanity checks passes, else false
500  */
501 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
502 
503 /**
504  * @brief Get the num of VF chains.
505  *
506  * @param p_hwfn
507  *
508  * @return u8
509  */
510 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
511 
512 /**
513  * @brief Get vf request mailbox params
514  *
515  * @param p_hwfn
516  * @param rel_vf_id
517  * @param pp_req_virt_addr
518  * @param p_req_virt_size
519  */
520 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
521 					  u16 rel_vf_id,
522 					  void **pp_req_virt_addr,
523 					  u16 *p_req_virt_size);
524 
525 /**
526  * @brief Get vf mailbox params
527  *
528  * @param p_hwfn
529  * @param rel_vf_id
530  * @param pp_reply_virt_addr
531  * @param p_reply_virt_size
532  */
533 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
534 					    u16	rel_vf_id,
535 					    void **pp_reply_virt_addr,
536 					    u16	*p_reply_virt_size);
537 
538 /**
539  * @brief Validate if the given length is a valid vfpf message
540  *        length
541  *
542  * @param length
543  *
544  * @return bool
545  */
546 bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
547 
548 /**
549  * @brief Return the max pfvf message length
550  *
551  * @return u32
552  */
553 u32 ecore_iov_pfvf_msg_length(void);
554 
555 /**
556  * @brief Returns forced MAC address if one is configured
557  *
558  * @parm p_hwfn
559  * @parm rel_vf_id
560  *
561  * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
562  */
563 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
564 				      u16 rel_vf_id);
565 
566 /**
567  * @brief Returns pvid if one is configured
568  *
569  * @parm p_hwfn
570  * @parm rel_vf_id
571  *
572  * @return 0 if no pvid is configured, otherwise the pvid.
573  */
574 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
575 				       u16 rel_vf_id);
576 /**
577  * @brief Configure VFs tx rate
578  *
579  * @param p_hwfn
580  * @param p_ptt
581  * @param vfid
582  * @param val - tx rate value in Mb/sec.
583  *
584  * @return enum _ecore_status_t
585  */
586 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
587 						 struct ecore_ptt *p_ptt,
588 						 int vfid, int val);
589 
590 /**
591  * @brief - Retrieves the statistics associated with a VF
592  *
593  * @param p_hwfn
594  * @param p_ptt
595  * @param vfid
596  * @param p_stats - this will be filled with the VF statistics
597  *
598  * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
599  */
600 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
601 					    struct ecore_ptt *p_ptt,
602 					    int vfid,
603 					    struct ecore_eth_stats *p_stats);
604 
605 /**
606  * @brief - Retrieves num of rxqs chains
607  *
608  * @param p_hwfn
609  * @param rel_vf_id
610  *
611  * @return num of rxqs chains.
612  */
613 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
614 			     u16 rel_vf_id);
615 
616 /**
617  * @brief - Retrieves num of active rxqs chains
618  *
619  * @param p_hwfn
620  * @param rel_vf_id
621  *
622  * @return
623  */
624 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
625 				    u16 rel_vf_id);
626 
627 /**
628  * @brief - Retrieves ctx pointer
629  *
630  * @param p_hwfn
631  * @param rel_vf_id
632  *
633  * @return
634  */
635 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
636 			   u16 rel_vf_id);
637 
638 /**
639  * @brief - Retrieves VF`s num sbs
640  *
641  * @param p_hwfn
642  * @param rel_vf_id
643  *
644  * @return
645  */
646 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
647 			    u16 rel_vf_id);
648 
649 /**
650  * @brief - Returm true if VF is waiting for acquire
651  *
652  * @param p_hwfn
653  * @param rel_vf_id
654  *
655  * @return
656  */
657 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
658 				      u16 rel_vf_id);
659 
660 /**
661  * @brief - Returm true if VF is acquired but not initialized
662  *
663  * @param p_hwfn
664  * @param rel_vf_id
665  *
666  * @return
667  */
668 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
669 					      u16 rel_vf_id);
670 
671 /**
672  * @brief - Returm true if VF is acquired and initialized
673  *
674  * @param p_hwfn
675  * @param rel_vf_id
676  *
677  * @return
678  */
679 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
680 				 u16 rel_vf_id);
681 
682 /**
683  * @brief - Returm true if VF has started in FW
684  *
685  * @param p_hwfn
686  * @param rel_vf_id
687  *
688  * @return
689  */
690 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
691 			     u16 rel_vf_id);
692 
693 /**
694  * @brief - Get VF's vport min rate configured.
695  * @param p_hwfn
696  * @param rel_vf_id
697  *
698  * @return - rate in Mbps
699  */
700 int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
701 
702 /**
703  * @brief - Configure min rate for VF's vport.
704  * @param p_dev
705  * @param vfid
706  * @param - rate in Mbps
707  *
708  * @return
709  */
710 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
711 						     int vfid, u32 rate);
712 
713 #endif
714 
715 /**
716  * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce parameters
717  *    of VFs for Rx and Tx queue.
718  *    While the API allows setting coalescing per-qid, all queues sharing a SB
719  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
720  *    otherwise configuration would break.
721  *
722  * @param p_hwfn
723  * @param rx_coal - Rx Coalesce value in micro seconds.
724  * @param tx_coal - TX Coalesce value in micro seconds.
725  * @param vf_id
726  * @param qid
727  *
728  * @return int
729  **/
730 enum _ecore_status_t
731 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
732 					 u16 rx_coal, u16 tx_coal,
733 					 u16 vf_id, u16 qid);
734 
735 /**
736  * @brief - Given a VF index, return index of next [including that] active VF.
737  *
738  * @param p_hwfn
739  * @param rel_vf_id
740  *
741  * @return MAX_NUM_VFS_E4 in case no further active VFs, otherwise index.
742  */
743 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
744 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
745 				      u16 vxlan_port, u16 geneve_port);
746 
747 #ifdef CONFIG_ECORE_SW_CHANNEL
748 /**
749  * @brief Set whether PF should communicate with VF using SW/HW channel
750  *        Needs to be called for an enabled VF before acquire is over
751  *        [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()]
752  *
753  * @param p_hwfn
754  * @param vfid - relative vf index
755  * @param b_is_hw - true iff PF is to use HW channel for communication
756  */
757 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
758 				 bool b_is_hw);
759 #endif
760 #else
761 #ifndef LINUX_REMOVE
ecore_iov_set_vfs_to_disable(struct ecore_dev OSAL_UNUSED * p_dev,u8 OSAL_UNUSED to_disable)762 static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_dev OSAL_UNUSED *p_dev, u8 OSAL_UNUSED to_disable) {}
ecore_iov_set_vf_to_disable(struct ecore_dev OSAL_UNUSED * p_dev,u16 OSAL_UNUSED rel_vf_id,u8 OSAL_UNUSED to_disable)763 static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_dev OSAL_UNUSED *p_dev, u16 OSAL_UNUSED rel_vf_id, u8 OSAL_UNUSED to_disable) {}
ecore_iov_init_hw_for_vf(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt,struct ecore_iov_vf_init_params OSAL_UNUSED * p_params)764 static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, struct ecore_iov_vf_init_params OSAL_UNUSED *p_params) {return ECORE_INVAL;}
ecore_iov_process_mbx_req(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt,int OSAL_UNUSED vfid)765 static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid) {}
ecore_iov_release_hw_for_vf(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt,u16 OSAL_UNUSED rel_vf_id)766 static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, u16 OSAL_UNUSED rel_vf_id) {return ECORE_SUCCESS;}
ecore_iov_set_vf_ctx(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED vf_id,OSAL_UNUSED void * ctx)767 static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vf_id, OSAL_UNUSED void *ctx) {return ECORE_INVAL;}
ecore_iov_vf_flr_cleanup(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt)768 static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt) {return ECORE_INVAL;}
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt,u16 OSAL_UNUSED rel_vf_id)769 static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, u16 OSAL_UNUSED rel_vf_id) {return ECORE_INVAL;}
ecore_iov_set_link(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED vfid,struct ecore_mcp_link_params OSAL_UNUSED * params,struct ecore_mcp_link_state OSAL_UNUSED * link,struct ecore_mcp_link_capabilities OSAL_UNUSED * p_caps)770 static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, struct ecore_mcp_link_params OSAL_UNUSED *params, struct ecore_mcp_link_state OSAL_UNUSED *link, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_caps) {}
ecore_iov_get_link(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED vfid,struct ecore_mcp_link_params OSAL_UNUSED * params,struct ecore_mcp_link_state OSAL_UNUSED * link,struct ecore_mcp_link_capabilities OSAL_UNUSED * p_caps)771 static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, struct ecore_mcp_link_params OSAL_UNUSED *params, struct ecore_mcp_link_state OSAL_UNUSED *link, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_caps) {}
ecore_iov_is_vf_pending_flr(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)772 static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
773 #endif
774 static OSAL_INLINE bool
ecore_iov_is_valid_vfid(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED rel_vf_id,bool OSAL_UNUSED b_enabled_only,bool OSAL_UNUSED b_non_malicious)775 ecore_iov_is_valid_vfid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED rel_vf_id,
776 			bool OSAL_UNUSED b_enabled_only,
777 			bool OSAL_UNUSED b_non_malicious)
778 {
779 	return false;
780 }
781 #ifndef LINUX_REMOVE
ecore_iov_get_public_vf_info(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED vfid,bool OSAL_UNUSED b_enabled_only)782 static OSAL_INLINE struct ecore_public_vf_info* ecore_iov_get_public_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
ecore_iov_pf_add_pending_events(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u8 OSAL_UNUSED vfid)783 static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vfid) {}
ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u64 OSAL_UNUSED * events)784 static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u64 OSAL_UNUSED *events) {}
ecore_iov_copy_vf_msg(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * ptt,int OSAL_UNUSED vfid)785 static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *ptt, int OSAL_UNUSED vfid) {return ECORE_INVAL;}
ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u8 OSAL_UNUSED * mac,int OSAL_UNUSED vfid)786 static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *mac, int OSAL_UNUSED vfid) {}
ecore_iov_bulletin_set_mac(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u8 OSAL_UNUSED * mac,OSAL_UNUSED int vfid)787 static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *mac, OSAL_UNUSED int vfid) {return ECORE_INVAL;}
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn OSAL_UNUSED * p_hwfn,bool OSAL_UNUSED b_untagged_only,int OSAL_UNUSED vfid)788 static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn OSAL_UNUSED *p_hwfn, bool OSAL_UNUSED b_untagged_only, int OSAL_UNUSED vfid) {return ECORE_INVAL;}
ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid,u16 OSAL_UNUSED * opaque_fid)789 static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, u16 OSAL_UNUSED *opaque_fid) {}
ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn OSAL_UNUSED p_hwfn,u16 OSAL_UNUSED pvid,int OSAL_UNUSED vfid)790 static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn OSAL_UNUSED p_hwfn, u16 OSAL_UNUSED pvid, int OSAL_UNUSED vfid) {}
ecore_iov_vf_has_vport_instance(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid)791 static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
ecore_iov_post_vf_bulletin(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid,struct ecore_ptt OSAL_UNUSED * p_ptt)792 static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, struct ecore_ptt OSAL_UNUSED *p_ptt) {return ECORE_INVAL;}
ecore_iov_is_vf_stopped(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid)793 static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
ecore_iov_spoofchk_set(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid,bool OSAL_UNUSED val)794 static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, bool OSAL_UNUSED val) {return ECORE_INVAL;}
ecore_iov_spoofchk_get(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid)795 static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
ecore_iov_pf_sanity_check(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid)796 static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
ecore_iov_vf_chains_per_pf(struct ecore_hwfn OSAL_UNUSED * p_hwfn)797 static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return 0;}
ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id,void OSAL_UNUSED ** pp_req_virt_addr,u16 OSAL_UNUSED * p_req_virt_size)798 static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id, void OSAL_UNUSED **pp_req_virt_addr, u16 OSAL_UNUSED *p_req_virt_size) {}
ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id,void OSAL_UNUSED ** pp_reply_virt_addr,u16 OSAL_UNUSED * p_reply_virt_size)799 static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id, void OSAL_UNUSED **pp_reply_virt_addr, u16 OSAL_UNUSED *p_reply_virt_size) {}
ecore_iov_is_valid_vfpf_msg_length(u32 OSAL_UNUSED length)800 static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 OSAL_UNUSED length) {return false;}
ecore_iov_pfvf_msg_length(void)801 static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void) {return 0;}
ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)802 static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return OSAL_NULL;}
ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)803 static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
ecore_iov_configure_tx_rate(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt,int OSAL_UNUSED vfid,int OSAL_UNUSED val)804 static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid, int OSAL_UNUSED val) { return ECORE_INVAL; }
ecore_iov_get_vf_stats(struct ecore_hwfn OSAL_UNUSED * p_hwfn,struct ecore_ptt OSAL_UNUSED * p_ptt,int OSAL_UNUSED vfid,struct ecore_eth_stats OSAL_UNUSED * p_stats)805 static OSAL_INLINE enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid, struct ecore_eth_stats OSAL_UNUSED *p_stats) { return ECORE_INVAL; }
ecore_iov_get_vf_num_rxqs(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)806 static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)807 static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
ecore_iov_get_vf_ctx(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)808 static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return OSAL_NULL;}
ecore_iov_get_vf_num_sbs(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)809 static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)810 static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)811 static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
ecore_iov_is_vf_initialized(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)812 static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
ecore_iov_get_vf_min_rate(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid)813 static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) { return 0; }
ecore_iov_configure_min_tx_rate(struct ecore_dev OSAL_UNUSED * p_dev,int OSAL_UNUSED vfid,OSAL_UNUSED u32 rate)814 static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev OSAL_UNUSED *p_dev, int OSAL_UNUSED vfid, OSAL_UNUSED u32 rate) { return ECORE_INVAL; }
815 #endif
ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid,u16 OSAL_UNUSED vxlan_port,u16 OSAL_UNUSED geneve_port)816 static OSAL_INLINE void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, u16 OSAL_UNUSED vxlan_port, u16 OSAL_UNUSED geneve_port) { return; }
ecore_iov_get_next_active_vf(struct ecore_hwfn OSAL_UNUSED * p_hwfn,u16 OSAL_UNUSED rel_vf_id)817 static OSAL_INLINE u16 ecore_iov_get_next_active_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) { return MAX_NUM_VFS_E4; }
818 
819 #ifdef CONFIG_ECORE_SW_CHANNEL
820 static OSAL_INLINE void
ecore_iov_set_vf_hw_channel(struct ecore_hwfn OSAL_UNUSED * p_hwfn,int OSAL_UNUSED vfid,bool OSAL_UNUSED b_is_hw)821 ecore_iov_set_vf_hw_channel(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
822 			    int OSAL_UNUSED vfid, bool OSAL_UNUSED b_is_hw) {}
823 #endif
824 #endif
825 
826 #define ecore_for_each_vf(_p_hwfn, _i)					\
827 	for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0);		\
828 	     _i < MAX_NUM_VFS_E4;					\
829 	     _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
830 
831 #endif
832