xref: /freebsd/sys/dev/ice/ice_switch.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2022, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_switch.h"
35 #include "ice_flex_type.h"
36 #include "ice_flow.h"
37 
38 #define ICE_ETH_DA_OFFSET		0
39 #define ICE_ETH_ETHTYPE_OFFSET		12
40 #define ICE_ETH_VLAN_TCI_OFFSET		14
41 #define ICE_MAX_VLAN_ID			0xFFF
42 #define ICE_IPV6_ETHER_ID		0x86DD
43 #define ICE_PPP_IPV6_PROTO_ID		0x0057
44 #define ICE_ETH_P_8021Q			0x8100
45 
46 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
47  * struct to configure any switch filter rules.
48  * {DA (6 bytes), SA(6 bytes),
49  * Ether type (2 bytes for header without VLAN tag) OR
50  * VLAN tag (4 bytes for header with VLAN tag) }
51  *
52  * Word on Hardcoded values
53  * byte 0 = 0x2: to identify it as locally administered DA MAC
54  * byte 6 = 0x2: to identify it as locally administered SA MAC
55  * byte 12 = 0x81 & byte 13 = 0x00:
56  *	In case of VLAN filter first two bytes defines ether type (0x8100)
57  *	and remaining two bytes are placeholder for programming a given VLAN ID
58  *	In case of Ether type filter it is treated as header without VLAN tag
59  *	and byte 12 and 13 is used to program a given Ether type instead
60  */
61 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
62 							0x2, 0, 0, 0, 0, 0,
63 							0x81, 0, 0, 0};
64 
65 static bool
66 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle);
67 
68 /**
69  * ice_init_def_sw_recp - initialize the recipe book keeping tables
70  * @hw: pointer to the HW struct
71  * @recp_list: pointer to sw recipe list
72  *
73  * Allocate memory for the entire recipe table and initialize the structures/
74  * entries corresponding to basic recipes.
75  */
76 enum ice_status
77 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
78 {
79 	struct ice_sw_recipe *recps;
80 	u8 i;
81 
82 	recps = (struct ice_sw_recipe *)
83 		ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
84 	if (!recps)
85 		return ICE_ERR_NO_MEMORY;
86 
87 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
88 		recps[i].root_rid = i;
89 		INIT_LIST_HEAD(&recps[i].filt_rules);
90 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
91 		INIT_LIST_HEAD(&recps[i].rg_list);
92 		ice_init_lock(&recps[i].filt_rule_lock);
93 	}
94 
95 	*recp_list = recps;
96 
97 	return ICE_SUCCESS;
98 }
99 
100 /**
101  * ice_aq_get_sw_cfg - get switch configuration
102  * @hw: pointer to the hardware structure
103  * @buf: pointer to the result buffer
104  * @buf_size: length of the buffer available for response
105  * @req_desc: pointer to requested descriptor
106  * @num_elems: pointer to number of elements
107  * @cd: pointer to command details structure or NULL
108  *
109  * Get switch configuration (0x0200) to be placed in buf.
110  * This admin command returns information such as initial VSI/port number
111  * and switch ID it belongs to.
112  *
113  * NOTE: *req_desc is both an input/output parameter.
114  * The caller of this function first calls this function with *request_desc set
115  * to 0. If the response from f/w has *req_desc set to 0, all the switch
116  * configuration information has been returned; if non-zero (meaning not all
117  * the information was returned), the caller should call this function again
118  * with *req_desc set to the previous value returned by f/w to get the
119  * next block of switch configuration information.
120  *
121  * *num_elems is output only parameter. This reflects the number of elements
122  * in response buffer. The caller of this function to use *num_elems while
123  * parsing the response buffer.
124  */
125 static enum ice_status
126 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
127 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
128 		  struct ice_sq_cd *cd)
129 {
130 	struct ice_aqc_get_sw_cfg *cmd;
131 	struct ice_aq_desc desc;
132 	enum ice_status status;
133 
134 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
135 	cmd = &desc.params.get_sw_conf;
136 	cmd->element = CPU_TO_LE16(*req_desc);
137 
138 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
139 	if (!status) {
140 		*req_desc = LE16_TO_CPU(cmd->element);
141 		*num_elems = LE16_TO_CPU(cmd->num_elems);
142 	}
143 
144 	return status;
145 }
146 
147 /**
148  * ice_alloc_rss_global_lut - allocate a RSS global LUT
149  * @hw: pointer to the HW struct
150  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
151  * @global_lut_id: output parameter for the RSS global LUT's ID
152  */
153 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
154 {
155 	struct ice_aqc_alloc_free_res_elem *sw_buf;
156 	enum ice_status status;
157 	u16 buf_len;
158 
159 	buf_len = ice_struct_size(sw_buf, elem, 1);
160 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
161 	if (!sw_buf)
162 		return ICE_ERR_NO_MEMORY;
163 
164 	sw_buf->num_elems = CPU_TO_LE16(1);
165 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
166 				       (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
167 				       ICE_AQC_RES_TYPE_FLAG_DEDICATED));
168 
169 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
170 	if (status) {
171 		ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
172 			  shared_res ? "shared" : "dedicated", status);
173 		goto ice_alloc_global_lut_exit;
174 	}
175 
176 	*global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
177 
178 ice_alloc_global_lut_exit:
179 	ice_free(hw, sw_buf);
180 	return status;
181 }
182 
183 /**
184  * ice_free_rss_global_lut - free a RSS global LUT
185  * @hw: pointer to the HW struct
186  * @global_lut_id: ID of the RSS global LUT to free
187  */
188 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
189 {
190 	struct ice_aqc_alloc_free_res_elem *sw_buf;
191 	u16 buf_len, num_elems = 1;
192 	enum ice_status status;
193 
194 	buf_len = ice_struct_size(sw_buf, elem, num_elems);
195 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
196 	if (!sw_buf)
197 		return ICE_ERR_NO_MEMORY;
198 
199 	sw_buf->num_elems = CPU_TO_LE16(num_elems);
200 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
201 	sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
202 
203 	status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
204 	if (status)
205 		ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
206 			  global_lut_id, status);
207 
208 	ice_free(hw, sw_buf);
209 	return status;
210 }
211 
212 /**
213  * ice_alloc_sw - allocate resources specific to switch
214  * @hw: pointer to the HW struct
215  * @ena_stats: true to turn on VEB stats
216  * @shared_res: true for shared resource, false for dedicated resource
217  * @sw_id: switch ID returned
218  * @counter_id: VEB counter ID returned
219  *
220  * allocates switch resources (SWID and VEB counter) (0x0208)
221  */
222 enum ice_status
223 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
224 	     u16 *counter_id)
225 {
226 	struct ice_aqc_alloc_free_res_elem *sw_buf;
227 	struct ice_aqc_res_elem *sw_ele;
228 	enum ice_status status;
229 	u16 buf_len;
230 
231 	buf_len = ice_struct_size(sw_buf, elem, 1);
232 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
233 	if (!sw_buf)
234 		return ICE_ERR_NO_MEMORY;
235 
236 	/* Prepare buffer for switch ID.
237 	 * The number of resource entries in buffer is passed as 1 since only a
238 	 * single switch/VEB instance is allocated, and hence a single sw_id
239 	 * is requested.
240 	 */
241 	sw_buf->num_elems = CPU_TO_LE16(1);
242 	sw_buf->res_type =
243 		CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
244 			    (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
245 			    ICE_AQC_RES_TYPE_FLAG_DEDICATED));
246 
247 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
248 				       ice_aqc_opc_alloc_res, NULL);
249 
250 	if (status)
251 		goto ice_alloc_sw_exit;
252 
253 	sw_ele = &sw_buf->elem[0];
254 	*sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
255 
256 	if (ena_stats) {
257 		/* Prepare buffer for VEB Counter */
258 		enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
259 		struct ice_aqc_alloc_free_res_elem *counter_buf;
260 		struct ice_aqc_res_elem *counter_ele;
261 
262 		counter_buf = (struct ice_aqc_alloc_free_res_elem *)
263 				ice_malloc(hw, buf_len);
264 		if (!counter_buf) {
265 			status = ICE_ERR_NO_MEMORY;
266 			goto ice_alloc_sw_exit;
267 		}
268 
269 		/* The number of resource entries in buffer is passed as 1 since
270 		 * only a single switch/VEB instance is allocated, and hence a
271 		 * single VEB counter is requested.
272 		 */
273 		counter_buf->num_elems = CPU_TO_LE16(1);
274 		counter_buf->res_type =
275 			CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
276 				    ICE_AQC_RES_TYPE_FLAG_DEDICATED);
277 		status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
278 					       opc, NULL);
279 
280 		if (status) {
281 			ice_free(hw, counter_buf);
282 			goto ice_alloc_sw_exit;
283 		}
284 		counter_ele = &counter_buf->elem[0];
285 		*counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
286 		ice_free(hw, counter_buf);
287 	}
288 
289 ice_alloc_sw_exit:
290 	ice_free(hw, sw_buf);
291 	return status;
292 }
293 
294 /**
295  * ice_free_sw - free resources specific to switch
296  * @hw: pointer to the HW struct
297  * @sw_id: switch ID returned
298  * @counter_id: VEB counter ID returned
299  *
300  * free switch resources (SWID and VEB counter) (0x0209)
301  *
302  * NOTE: This function frees multiple resources. It continues
303  * releasing other resources even after it encounters error.
304  * The error code returned is the last error it encountered.
305  */
306 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
307 {
308 	struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
309 	enum ice_status status, ret_status;
310 	u16 buf_len;
311 
312 	buf_len = ice_struct_size(sw_buf, elem, 1);
313 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
314 	if (!sw_buf)
315 		return ICE_ERR_NO_MEMORY;
316 
317 	/* Prepare buffer to free for switch ID res.
318 	 * The number of resource entries in buffer is passed as 1 since only a
319 	 * single switch/VEB instance is freed, and hence a single sw_id
320 	 * is released.
321 	 */
322 	sw_buf->num_elems = CPU_TO_LE16(1);
323 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
324 	sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
325 
326 	ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
327 					   ice_aqc_opc_free_res, NULL);
328 
329 	if (ret_status)
330 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
331 
332 	/* Prepare buffer to free for VEB Counter resource */
333 	counter_buf = (struct ice_aqc_alloc_free_res_elem *)
334 			ice_malloc(hw, buf_len);
335 	if (!counter_buf) {
336 		ice_free(hw, sw_buf);
337 		return ICE_ERR_NO_MEMORY;
338 	}
339 
340 	/* The number of resource entries in buffer is passed as 1 since only a
341 	 * single switch/VEB instance is freed, and hence a single VEB counter
342 	 * is released
343 	 */
344 	counter_buf->num_elems = CPU_TO_LE16(1);
345 	counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
346 	counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
347 
348 	status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
349 				       ice_aqc_opc_free_res, NULL);
350 	if (status) {
351 		ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
352 		ret_status = status;
353 	}
354 
355 	ice_free(hw, counter_buf);
356 	ice_free(hw, sw_buf);
357 	return ret_status;
358 }
359 
360 /**
361  * ice_aq_add_vsi
362  * @hw: pointer to the HW struct
363  * @vsi_ctx: pointer to a VSI context struct
364  * @cd: pointer to command details structure or NULL
365  *
366  * Add a VSI context to the hardware (0x0210)
367  */
368 enum ice_status
369 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
370 	       struct ice_sq_cd *cd)
371 {
372 	struct ice_aqc_add_update_free_vsi_resp *res;
373 	struct ice_aqc_add_get_update_free_vsi *cmd;
374 	struct ice_aq_desc desc;
375 	enum ice_status status;
376 
377 	cmd = &desc.params.vsi_cmd;
378 	res = &desc.params.add_update_free_vsi_res;
379 
380 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
381 
382 	if (!vsi_ctx->alloc_from_pool)
383 		cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
384 					   ICE_AQ_VSI_IS_VALID);
385 	cmd->vf_id = vsi_ctx->vf_num;
386 
387 	cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
388 
389 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
390 
391 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
392 				 sizeof(vsi_ctx->info), cd);
393 
394 	if (!status) {
395 		vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
396 		vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
397 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
398 	}
399 
400 	return status;
401 }
402 
403 /**
404  * ice_aq_free_vsi
405  * @hw: pointer to the HW struct
406  * @vsi_ctx: pointer to a VSI context struct
407  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
408  * @cd: pointer to command details structure or NULL
409  *
410  * Free VSI context info from hardware (0x0213)
411  */
412 enum ice_status
413 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
414 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
415 {
416 	struct ice_aqc_add_update_free_vsi_resp *resp;
417 	struct ice_aqc_add_get_update_free_vsi *cmd;
418 	struct ice_aq_desc desc;
419 	enum ice_status status;
420 
421 	cmd = &desc.params.vsi_cmd;
422 	resp = &desc.params.add_update_free_vsi_res;
423 
424 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
425 
426 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
427 	if (keep_vsi_alloc)
428 		cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
429 
430 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
431 	if (!status) {
432 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
433 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
434 	}
435 
436 	return status;
437 }
438 
439 /**
440  * ice_aq_update_vsi
441  * @hw: pointer to the HW struct
442  * @vsi_ctx: pointer to a VSI context struct
443  * @cd: pointer to command details structure or NULL
444  *
445  * Update VSI context in the hardware (0x0211)
446  */
447 enum ice_status
448 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
449 		  struct ice_sq_cd *cd)
450 {
451 	struct ice_aqc_add_update_free_vsi_resp *resp;
452 	struct ice_aqc_add_get_update_free_vsi *cmd;
453 	struct ice_aq_desc desc;
454 	enum ice_status status;
455 
456 	cmd = &desc.params.vsi_cmd;
457 	resp = &desc.params.add_update_free_vsi_res;
458 
459 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
460 
461 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
462 
463 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
464 
465 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
466 				 sizeof(vsi_ctx->info), cd);
467 
468 	if (!status) {
469 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
470 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
471 	}
472 
473 	return status;
474 }
475 
476 /**
477  * ice_is_vsi_valid - check whether the VSI is valid or not
478  * @hw: pointer to the HW struct
479  * @vsi_handle: VSI handle
480  *
481  * check whether the VSI is valid or not
482  */
483 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
484 {
485 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
486 }
487 
488 /**
489  * ice_get_hw_vsi_num - return the HW VSI number
490  * @hw: pointer to the HW struct
491  * @vsi_handle: VSI handle
492  *
493  * return the HW VSI number
494  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
495  */
496 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
497 {
498 	return hw->vsi_ctx[vsi_handle]->vsi_num;
499 }
500 
501 /**
502  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
503  * @hw: pointer to the HW struct
504  * @vsi_handle: VSI handle
505  *
506  * return the VSI context entry for a given VSI handle
507  */
508 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
509 {
510 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
511 }
512 
513 /**
514  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
515  * @hw: pointer to the HW struct
516  * @vsi_handle: VSI handle
517  * @vsi: VSI context pointer
518  *
519  * save the VSI context entry for a given VSI handle
520  */
521 static void
522 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
523 {
524 	hw->vsi_ctx[vsi_handle] = vsi;
525 }
526 
527 /**
528  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
529  * @hw: pointer to the HW struct
530  * @vsi_handle: VSI handle
531  */
532 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
533 {
534 	struct ice_vsi_ctx *vsi;
535 	u8 i;
536 
537 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
538 	if (!vsi)
539 		return;
540 	ice_for_each_traffic_class(i) {
541 		if (vsi->lan_q_ctx[i]) {
542 			ice_free(hw, vsi->lan_q_ctx[i]);
543 			vsi->lan_q_ctx[i] = NULL;
544 		}
545 		if (vsi->rdma_q_ctx[i]) {
546 			ice_free(hw, vsi->rdma_q_ctx[i]);
547 			vsi->rdma_q_ctx[i] = NULL;
548 		}
549 	}
550 }
551 
552 /**
553  * ice_clear_vsi_ctx - clear the VSI context entry
554  * @hw: pointer to the HW struct
555  * @vsi_handle: VSI handle
556  *
557  * clear the VSI context entry
558  */
559 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
560 {
561 	struct ice_vsi_ctx *vsi;
562 
563 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
564 	if (vsi) {
565 		ice_clear_vsi_q_ctx(hw, vsi_handle);
566 		ice_free(hw, vsi);
567 		hw->vsi_ctx[vsi_handle] = NULL;
568 	}
569 }
570 
571 /**
572  * ice_clear_all_vsi_ctx - clear all the VSI context entries
573  * @hw: pointer to the HW struct
574  */
575 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
576 {
577 	u16 i;
578 
579 	for (i = 0; i < ICE_MAX_VSI; i++)
580 		ice_clear_vsi_ctx(hw, i);
581 }
582 
583 /**
584  * ice_add_vsi - add VSI context to the hardware and VSI handle list
585  * @hw: pointer to the HW struct
586  * @vsi_handle: unique VSI handle provided by drivers
587  * @vsi_ctx: pointer to a VSI context struct
588  * @cd: pointer to command details structure or NULL
589  *
590  * Add a VSI context to the hardware also add it into the VSI handle list.
591  * If this function gets called after reset for existing VSIs then update
592  * with the new HW VSI number in the corresponding VSI handle list entry.
593  */
594 enum ice_status
595 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
596 	    struct ice_sq_cd *cd)
597 {
598 	struct ice_vsi_ctx *tmp_vsi_ctx;
599 	enum ice_status status;
600 
601 	if (vsi_handle >= ICE_MAX_VSI)
602 		return ICE_ERR_PARAM;
603 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
604 	if (status)
605 		return status;
606 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
607 	if (!tmp_vsi_ctx) {
608 		/* Create a new VSI context */
609 		tmp_vsi_ctx = (struct ice_vsi_ctx *)
610 			ice_malloc(hw, sizeof(*tmp_vsi_ctx));
611 		if (!tmp_vsi_ctx) {
612 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
613 			return ICE_ERR_NO_MEMORY;
614 		}
615 		*tmp_vsi_ctx = *vsi_ctx;
616 
617 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
618 	} else {
619 		/* update with new HW VSI num */
620 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
621 	}
622 
623 	return ICE_SUCCESS;
624 }
625 
626 /**
627  * ice_free_vsi- free VSI context from hardware and VSI handle list
628  * @hw: pointer to the HW struct
629  * @vsi_handle: unique VSI handle
630  * @vsi_ctx: pointer to a VSI context struct
631  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
632  * @cd: pointer to command details structure or NULL
633  *
634  * Free VSI context info from hardware as well as from VSI handle list
635  */
636 enum ice_status
637 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
638 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
639 {
640 	enum ice_status status;
641 
642 	if (!ice_is_vsi_valid(hw, vsi_handle))
643 		return ICE_ERR_PARAM;
644 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
645 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
646 	if (!status)
647 		ice_clear_vsi_ctx(hw, vsi_handle);
648 	return status;
649 }
650 
651 /**
652  * ice_update_vsi
653  * @hw: pointer to the HW struct
654  * @vsi_handle: unique VSI handle
655  * @vsi_ctx: pointer to a VSI context struct
656  * @cd: pointer to command details structure or NULL
657  *
658  * Update VSI context in the hardware
659  */
660 enum ice_status
661 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
662 	       struct ice_sq_cd *cd)
663 {
664 	if (!ice_is_vsi_valid(hw, vsi_handle))
665 		return ICE_ERR_PARAM;
666 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
667 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
668 }
669 
670 /**
671  * ice_cfg_iwarp_fltr - enable/disable iWARP filtering on VSI
672  * @hw: pointer to HW struct
673  * @vsi_handle: VSI SW index
674  * @enable: boolean for enable/disable
675  */
676 enum ice_status
677 ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
678 {
679 	struct ice_vsi_ctx *ctx, *cached_ctx;
680 	enum ice_status status;
681 
682 	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
683 	if (!cached_ctx)
684 		return ICE_ERR_DOES_NOT_EXIST;
685 
686 	ctx = (struct ice_vsi_ctx *)ice_calloc(hw, 1, sizeof(*ctx));
687 	if (!ctx)
688 		return ICE_ERR_NO_MEMORY;
689 
690 	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
691 	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
692 	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
693 
694 	ctx->info.valid_sections = CPU_TO_LE16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
695 
696 	if (enable)
697 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
698 	else
699 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
700 
701 	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
702 	if (!status) {
703 		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
704 		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
705 	}
706 
707 	ice_free(hw, ctx);
708 	return status;
709 }
710 
711 /**
712  * ice_aq_get_vsi_params
713  * @hw: pointer to the HW struct
714  * @vsi_ctx: pointer to a VSI context struct
715  * @cd: pointer to command details structure or NULL
716  *
717  * Get VSI context info from hardware (0x0212)
718  */
719 enum ice_status
720 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
721 		      struct ice_sq_cd *cd)
722 {
723 	struct ice_aqc_add_get_update_free_vsi *cmd;
724 	struct ice_aqc_get_vsi_resp *resp;
725 	struct ice_aq_desc desc;
726 	enum ice_status status;
727 
728 	cmd = &desc.params.vsi_cmd;
729 	resp = &desc.params.get_vsi_resp;
730 
731 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
732 
733 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
734 
735 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
736 				 sizeof(vsi_ctx->info), cd);
737 	if (!status) {
738 		vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
739 					ICE_AQ_VSI_NUM_M;
740 		vsi_ctx->vf_num = resp->vf_id;
741 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
742 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
743 	}
744 
745 	return status;
746 }
747 
748 /**
749  * ice_aq_add_update_mir_rule - add/update a mirror rule
750  * @hw: pointer to the HW struct
751  * @rule_type: Rule Type
752  * @dest_vsi: VSI number to which packets will be mirrored
753  * @count: length of the list
754  * @mr_buf: buffer for list of mirrored VSI numbers
755  * @cd: pointer to command details structure or NULL
756  * @rule_id: Rule ID
757  *
758  * Add/Update Mirror Rule (0x260).
759  */
760 enum ice_status
761 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
762 			   u16 count, struct ice_mir_rule_buf *mr_buf,
763 			   struct ice_sq_cd *cd, u16 *rule_id)
764 {
765 	struct ice_aqc_add_update_mir_rule *cmd;
766 	struct ice_aq_desc desc;
767 	enum ice_status status;
768 	__le16 *mr_list = NULL;
769 	u16 buf_size = 0;
770 
771 	switch (rule_type) {
772 	case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
773 	case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
774 		/* Make sure count and mr_buf are set for these rule_types */
775 		if (!(count && mr_buf))
776 			return ICE_ERR_PARAM;
777 
778 		buf_size = count * sizeof(__le16);
779 		mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
780 		if (!mr_list)
781 			return ICE_ERR_NO_MEMORY;
782 		break;
783 	case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
784 	case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
785 		/* Make sure count and mr_buf are not set for these
786 		 * rule_types
787 		 */
788 		if (count || mr_buf)
789 			return ICE_ERR_PARAM;
790 		break;
791 	default:
792 		ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
793 		return ICE_ERR_OUT_OF_RANGE;
794 	}
795 
796 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
797 
798 	/* Pre-process 'mr_buf' items for add/update of virtual port
799 	 * ingress/egress mirroring (but not physical port ingress/egress
800 	 * mirroring)
801 	 */
802 	if (mr_buf) {
803 		int i;
804 
805 		for (i = 0; i < count; i++) {
806 			u16 id;
807 
808 			id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
809 
810 			/* Validate specified VSI number, make sure it is less
811 			 * than ICE_MAX_VSI, if not return with error.
812 			 */
813 			if (id >= ICE_MAX_VSI) {
814 				ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
815 					  id);
816 				ice_free(hw, mr_list);
817 				return ICE_ERR_OUT_OF_RANGE;
818 			}
819 
820 			/* add VSI to mirror rule */
821 			if (mr_buf[i].add)
822 				mr_list[i] =
823 					CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
824 			else /* remove VSI from mirror rule */
825 				mr_list[i] = CPU_TO_LE16(id);
826 		}
827 
828 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
829 	}
830 
831 	cmd = &desc.params.add_update_rule;
832 	if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
833 		cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
834 					   ICE_AQC_RULE_ID_VALID_M);
835 	cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
836 	cmd->num_entries = CPU_TO_LE16(count);
837 	cmd->dest = CPU_TO_LE16(dest_vsi);
838 
839 	status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
840 	if (!status)
841 		*rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
842 
843 	ice_free(hw, mr_list);
844 
845 	return status;
846 }
847 
848 /**
849  * ice_aq_delete_mir_rule - delete a mirror rule
850  * @hw: pointer to the HW struct
851  * @rule_id: Mirror rule ID (to be deleted)
852  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
853  *		 otherwise it is returned to the shared pool
854  * @cd: pointer to command details structure or NULL
855  *
856  * Delete Mirror Rule (0x261).
857  */
858 enum ice_status
859 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
860 		       struct ice_sq_cd *cd)
861 {
862 	struct ice_aqc_delete_mir_rule *cmd;
863 	struct ice_aq_desc desc;
864 
865 	/* rule_id should be in the range 0...63 */
866 	if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
867 		return ICE_ERR_OUT_OF_RANGE;
868 
869 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
870 
871 	cmd = &desc.params.del_rule;
872 	rule_id |= ICE_AQC_RULE_ID_VALID_M;
873 	cmd->rule_id = CPU_TO_LE16(rule_id);
874 
875 	if (keep_allocd)
876 		cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
877 
878 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
879 }
880 
881 /**
882  * ice_aq_alloc_free_vsi_list
883  * @hw: pointer to the HW struct
884  * @vsi_list_id: VSI list ID returned or used for lookup
885  * @lkup_type: switch rule filter lookup type
886  * @opc: switch rules population command type - pass in the command opcode
887  *
888  * allocates or free a VSI list resource
889  */
890 static enum ice_status
891 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
892 			   enum ice_sw_lkup_type lkup_type,
893 			   enum ice_adminq_opc opc)
894 {
895 	struct ice_aqc_alloc_free_res_elem *sw_buf;
896 	struct ice_aqc_res_elem *vsi_ele;
897 	enum ice_status status;
898 	u16 buf_len;
899 
900 	buf_len = ice_struct_size(sw_buf, elem, 1);
901 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
902 	if (!sw_buf)
903 		return ICE_ERR_NO_MEMORY;
904 	sw_buf->num_elems = CPU_TO_LE16(1);
905 
906 	if (lkup_type == ICE_SW_LKUP_MAC ||
907 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
908 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
909 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
910 	    lkup_type == ICE_SW_LKUP_PROMISC ||
911 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
912 	    lkup_type == ICE_SW_LKUP_DFLT ||
913 	    lkup_type == ICE_SW_LKUP_LAST) {
914 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
915 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
916 		sw_buf->res_type =
917 			CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
918 	} else {
919 		status = ICE_ERR_PARAM;
920 		goto ice_aq_alloc_free_vsi_list_exit;
921 	}
922 
923 	if (opc == ice_aqc_opc_free_res)
924 		sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
925 
926 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
927 	if (status)
928 		goto ice_aq_alloc_free_vsi_list_exit;
929 
930 	if (opc == ice_aqc_opc_alloc_res) {
931 		vsi_ele = &sw_buf->elem[0];
932 		*vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
933 	}
934 
935 ice_aq_alloc_free_vsi_list_exit:
936 	ice_free(hw, sw_buf);
937 	return status;
938 }
939 
940 /**
941  * ice_aq_set_storm_ctrl - Sets storm control configuration
942  * @hw: pointer to the HW struct
943  * @bcast_thresh: represents the upper threshold for broadcast storm control
944  * @mcast_thresh: represents the upper threshold for multicast storm control
945  * @ctl_bitmask: storm control knobs
946  *
947  * Sets the storm control configuration (0x0280)
948  */
949 enum ice_status
950 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
951 		      u32 ctl_bitmask)
952 {
953 	struct ice_aqc_storm_cfg *cmd;
954 	struct ice_aq_desc desc;
955 
956 	cmd = &desc.params.storm_conf;
957 
958 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
959 
960 	cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
961 	cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
962 	cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
963 
964 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
965 }
966 
967 /**
968  * ice_aq_get_storm_ctrl - gets storm control configuration
969  * @hw: pointer to the HW struct
970  * @bcast_thresh: represents the upper threshold for broadcast storm control
971  * @mcast_thresh: represents the upper threshold for multicast storm control
972  * @ctl_bitmask: storm control knobs
973  *
974  * Gets the storm control configuration (0x0281)
975  */
976 enum ice_status
977 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
978 		      u32 *ctl_bitmask)
979 {
980 	enum ice_status status;
981 	struct ice_aq_desc desc;
982 
983 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
984 
985 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
986 	if (!status) {
987 		struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
988 
989 		if (bcast_thresh)
990 			*bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
991 				ICE_AQ_THRESHOLD_M;
992 		if (mcast_thresh)
993 			*mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
994 				ICE_AQ_THRESHOLD_M;
995 		if (ctl_bitmask)
996 			*ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
997 	}
998 
999 	return status;
1000 }
1001 
1002 /**
1003  * ice_aq_sw_rules - add/update/remove switch rules
1004  * @hw: pointer to the HW struct
1005  * @rule_list: pointer to switch rule population list
1006  * @rule_list_sz: total size of the rule list in bytes
1007  * @num_rules: number of switch rules in the rule_list
1008  * @opc: switch rules population command type - pass in the command opcode
1009  * @cd: pointer to command details structure or NULL
1010  *
1011  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1012  */
1013 enum ice_status
1014 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1015 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1016 {
1017 	struct ice_aq_desc desc;
1018 	enum ice_status status;
1019 
1020 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1021 
1022 	if (opc != ice_aqc_opc_add_sw_rules &&
1023 	    opc != ice_aqc_opc_update_sw_rules &&
1024 	    opc != ice_aqc_opc_remove_sw_rules)
1025 		return ICE_ERR_PARAM;
1026 
1027 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1028 
1029 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1030 	desc.params.sw_rules.num_rules_fltr_entry_index =
1031 		CPU_TO_LE16(num_rules);
1032 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1033 	if (opc != ice_aqc_opc_add_sw_rules &&
1034 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1035 		status = ICE_ERR_DOES_NOT_EXIST;
1036 
1037 	return status;
1038 }
1039 
1040 /* ice_init_port_info - Initialize port_info with switch configuration data
1041  * @pi: pointer to port_info
1042  * @vsi_port_num: VSI number or port number
1043  * @type: Type of switch element (port or VSI)
1044  * @swid: switch ID of the switch the element is attached to
1045  * @pf_vf_num: PF or VF number
1046  * @is_vf: true if the element is a VF, false otherwise
1047  */
1048 static void
1049 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1050 		   u16 swid, u16 pf_vf_num, bool is_vf)
1051 {
1052 	switch (type) {
1053 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1054 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1055 		pi->sw_id = swid;
1056 		pi->pf_vf_num = pf_vf_num;
1057 		pi->is_vf = is_vf;
1058 		break;
1059 	default:
1060 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
1061 		break;
1062 	}
1063 }
1064 
1065 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1066  * @hw: pointer to the hardware structure
1067  */
1068 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1069 {
1070 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
1071 	enum ice_status status;
1072 	u8 num_total_ports;
1073 	u16 req_desc = 0;
1074 	u16 num_elems;
1075 	u8 j = 0;
1076 	u16 i;
1077 
1078 	num_total_ports = 1;
1079 
1080 	rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
1081 		ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1082 
1083 	if (!rbuf)
1084 		return ICE_ERR_NO_MEMORY;
1085 
1086 	/* Multiple calls to ice_aq_get_sw_cfg may be required
1087 	 * to get all the switch configuration information. The need
1088 	 * for additional calls is indicated by ice_aq_get_sw_cfg
1089 	 * writing a non-zero value in req_desc
1090 	 */
1091 	do {
1092 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
1093 
1094 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1095 					   &req_desc, &num_elems, NULL);
1096 
1097 		if (status)
1098 			break;
1099 
1100 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
1101 			u16 pf_vf_num, swid, vsi_port_num;
1102 			bool is_vf = false;
1103 			u8 res_type;
1104 
1105 			vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1106 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1107 
1108 			pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1109 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1110 
1111 			swid = LE16_TO_CPU(ele->swid);
1112 
1113 			if (LE16_TO_CPU(ele->pf_vf_num) &
1114 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1115 				is_vf = true;
1116 
1117 			res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1118 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1119 
1120 			switch (res_type) {
1121 			case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1122 			case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1123 				if (j == num_total_ports) {
1124 					ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
1125 					status = ICE_ERR_CFG;
1126 					goto out;
1127 				}
1128 				ice_init_port_info(hw->port_info,
1129 						   vsi_port_num, res_type, swid,
1130 						   pf_vf_num, is_vf);
1131 				j++;
1132 				break;
1133 			default:
1134 				break;
1135 			}
1136 		}
1137 	} while (req_desc && !status);
1138 
1139 out:
1140 	ice_free(hw, rbuf);
1141 	return status;
1142 }
1143 
1144 /**
1145  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1146  * @hw: pointer to the hardware structure
1147  * @fi: filter info structure to fill/update
1148  *
1149  * This helper function populates the lb_en and lan_en elements of the provided
1150  * ice_fltr_info struct using the switch's type and characteristics of the
1151  * switch rule being configured.
1152  */
1153 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1154 {
1155 	fi->lb_en = false;
1156 	fi->lan_en = false;
1157 	if ((fi->flag & ICE_FLTR_TX) &&
1158 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
1159 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1160 	     fi->fltr_act == ICE_FWD_TO_Q ||
1161 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
1162 		/* Setting LB for prune actions will result in replicated
1163 		 * packets to the internal switch that will be dropped.
1164 		 */
1165 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1166 			fi->lb_en = true;
1167 
1168 		/* Set lan_en to TRUE if
1169 		 * 1. The switch is a VEB AND
1170 		 * 2
1171 		 * 2.1 The lookup is a directional lookup like ethertype,
1172 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
1173 		 * and default-port OR
1174 		 * 2.2 The lookup is VLAN, OR
1175 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1176 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1177 		 *
1178 		 * OR
1179 		 *
1180 		 * The switch is a VEPA.
1181 		 *
1182 		 * In all other cases, the LAN enable has to be set to false.
1183 		 */
1184 		if (hw->evb_veb) {
1185 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1186 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1187 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1188 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1189 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
1190 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
1191 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
1192 			     !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1193 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1194 			     !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1195 				fi->lan_en = true;
1196 		} else {
1197 			fi->lan_en = true;
1198 		}
1199 	}
1200 }
1201 
1202 /**
1203  * ice_fill_sw_rule - Helper function to fill switch rule structure
1204  * @hw: pointer to the hardware structure
1205  * @f_info: entry containing packet forwarding information
1206  * @s_rule: switch rule structure to be filled in based on mac_entry
1207  * @opc: switch rules population command type - pass in the command opcode
1208  */
1209 static void
1210 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1211 		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1212 {
1213 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1214 	u16 vlan_tpid = ICE_ETH_P_8021Q;
1215 	void *daddr = NULL;
1216 	u16 eth_hdr_sz;
1217 	u8 *eth_hdr;
1218 	u32 act = 0;
1219 	__be16 *off;
1220 	u8 q_rgn;
1221 
1222 	if (opc == ice_aqc_opc_remove_sw_rules) {
1223 		s_rule->pdata.lkup_tx_rx.act = 0;
1224 		s_rule->pdata.lkup_tx_rx.index =
1225 			CPU_TO_LE16(f_info->fltr_rule_id);
1226 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1227 		return;
1228 	}
1229 
1230 	eth_hdr_sz = sizeof(dummy_eth_header);
1231 	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1232 
1233 	/* initialize the ether header with a dummy header */
1234 	ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1235 	ice_fill_sw_info(hw, f_info);
1236 
1237 	switch (f_info->fltr_act) {
1238 	case ICE_FWD_TO_VSI:
1239 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1240 			ICE_SINGLE_ACT_VSI_ID_M;
1241 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1242 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1243 				ICE_SINGLE_ACT_VALID_BIT;
1244 		break;
1245 	case ICE_FWD_TO_VSI_LIST:
1246 		act |= ICE_SINGLE_ACT_VSI_LIST;
1247 		act |= (f_info->fwd_id.vsi_list_id <<
1248 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1249 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
1250 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1251 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1252 				ICE_SINGLE_ACT_VALID_BIT;
1253 		break;
1254 	case ICE_FWD_TO_Q:
1255 		act |= ICE_SINGLE_ACT_TO_Q;
1256 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1257 			ICE_SINGLE_ACT_Q_INDEX_M;
1258 		break;
1259 	case ICE_DROP_PACKET:
1260 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1261 			ICE_SINGLE_ACT_VALID_BIT;
1262 		break;
1263 	case ICE_FWD_TO_QGRP:
1264 		q_rgn = f_info->qgrp_size > 0 ?
1265 			(u8)ice_ilog2(f_info->qgrp_size) : 0;
1266 		act |= ICE_SINGLE_ACT_TO_Q;
1267 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1268 			ICE_SINGLE_ACT_Q_INDEX_M;
1269 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1270 			ICE_SINGLE_ACT_Q_REGION_M;
1271 		break;
1272 	default:
1273 		return;
1274 	}
1275 
1276 	if (f_info->lb_en)
1277 		act |= ICE_SINGLE_ACT_LB_ENABLE;
1278 	if (f_info->lan_en)
1279 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
1280 
1281 	switch (f_info->lkup_type) {
1282 	case ICE_SW_LKUP_MAC:
1283 		daddr = f_info->l_data.mac.mac_addr;
1284 		break;
1285 	case ICE_SW_LKUP_VLAN:
1286 		vlan_id = f_info->l_data.vlan.vlan_id;
1287 		if (f_info->l_data.vlan.tpid_valid)
1288 			vlan_tpid = f_info->l_data.vlan.tpid;
1289 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1290 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1291 			act |= ICE_SINGLE_ACT_PRUNE;
1292 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1293 		}
1294 		break;
1295 	case ICE_SW_LKUP_ETHERTYPE_MAC:
1296 		daddr = f_info->l_data.ethertype_mac.mac_addr;
1297 		/* fall-through */
1298 	case ICE_SW_LKUP_ETHERTYPE:
1299 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1300 		*off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1301 		break;
1302 	case ICE_SW_LKUP_MAC_VLAN:
1303 		daddr = f_info->l_data.mac_vlan.mac_addr;
1304 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
1305 		break;
1306 	case ICE_SW_LKUP_PROMISC_VLAN:
1307 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
1308 		/* fall-through */
1309 	case ICE_SW_LKUP_PROMISC:
1310 		daddr = f_info->l_data.mac_vlan.mac_addr;
1311 		break;
1312 	default:
1313 		break;
1314 	}
1315 
1316 	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1317 		CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1318 		CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1319 
1320 	/* Recipe set depending on lookup type */
1321 	s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1322 	s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1323 	s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1324 
1325 	if (daddr)
1326 		ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1327 			   ICE_NONDMA_TO_NONDMA);
1328 
1329 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1330 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1331 		*off = CPU_TO_BE16(vlan_id);
1332 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1333 		*off = CPU_TO_BE16(vlan_tpid);
1334 	}
1335 
1336 	/* Create the switch rule with the final dummy Ethernet header */
1337 	if (opc != ice_aqc_opc_update_sw_rules)
1338 		s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1339 }
1340 
1341 /**
1342  * ice_add_marker_act
1343  * @hw: pointer to the hardware structure
1344  * @m_ent: the management entry for which sw marker needs to be added
1345  * @sw_marker: sw marker to tag the Rx descriptor with
1346  * @l_id: large action resource ID
1347  *
1348  * Create a large action to hold software marker and update the switch rule
1349  * entry pointed by m_ent with newly created large action
1350  */
1351 static enum ice_status
1352 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1353 		   u16 sw_marker, u16 l_id)
1354 {
1355 	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1356 	/* For software marker we need 3 large actions
1357 	 * 1. FWD action: FWD TO VSI or VSI LIST
1358 	 * 2. GENERIC VALUE action to hold the profile ID
1359 	 * 3. GENERIC VALUE action to hold the software marker ID
1360 	 */
1361 	const u16 num_lg_acts = 3;
1362 	enum ice_status status;
1363 	u16 lg_act_size;
1364 	u16 rules_size;
1365 	u32 act;
1366 	u16 id;
1367 
1368 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1369 		return ICE_ERR_PARAM;
1370 
1371 	/* Create two back-to-back switch rules and submit them to the HW using
1372 	 * one memory buffer:
1373 	 *    1. Large Action
1374 	 *    2. Look up Tx Rx
1375 	 */
1376 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1377 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1378 	lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1379 	if (!lg_act)
1380 		return ICE_ERR_NO_MEMORY;
1381 
1382 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1383 
1384 	/* Fill in the first switch rule i.e. large action */
1385 	lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1386 	lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1387 	lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1388 
1389 	/* First action VSI forwarding or VSI list forwarding depending on how
1390 	 * many VSIs
1391 	 */
1392 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1393 		m_ent->fltr_info.fwd_id.hw_vsi_id;
1394 
1395 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1396 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
1397 	if (m_ent->vsi_count > 1)
1398 		act |= ICE_LG_ACT_VSI_LIST;
1399 	lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1400 
1401 	/* Second action descriptor type */
1402 	act = ICE_LG_ACT_GENERIC;
1403 
1404 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1405 	lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1406 
1407 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1408 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1409 
1410 	/* Third action Marker value */
1411 	act |= ICE_LG_ACT_GENERIC;
1412 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1413 		ICE_LG_ACT_GENERIC_VALUE_M;
1414 
1415 	lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1416 
1417 	/* call the fill switch rule to fill the lookup Tx Rx structure */
1418 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1419 			 ice_aqc_opc_update_sw_rules);
1420 
1421 	/* Update the action to point to the large action ID */
1422 	rx_tx->pdata.lkup_tx_rx.act =
1423 		CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1424 			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1425 			     ICE_SINGLE_ACT_PTR_VAL_M));
1426 
1427 	/* Use the filter rule ID of the previously created rule with single
1428 	 * act. Once the update happens, hardware will treat this as large
1429 	 * action
1430 	 */
1431 	rx_tx->pdata.lkup_tx_rx.index =
1432 		CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1433 
1434 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1435 				 ice_aqc_opc_update_sw_rules, NULL);
1436 	if (!status) {
1437 		m_ent->lg_act_idx = l_id;
1438 		m_ent->sw_marker_id = sw_marker;
1439 	}
1440 
1441 	ice_free(hw, lg_act);
1442 	return status;
1443 }
1444 
1445 /**
1446  * ice_add_counter_act - add/update filter rule with counter action
1447  * @hw: pointer to the hardware structure
1448  * @m_ent: the management entry for which counter needs to be added
1449  * @counter_id: VLAN counter ID returned as part of allocate resource
1450  * @l_id: large action resource ID
1451  */
1452 static enum ice_status
1453 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1454 		    u16 counter_id, u16 l_id)
1455 {
1456 	struct ice_aqc_sw_rules_elem *lg_act;
1457 	struct ice_aqc_sw_rules_elem *rx_tx;
1458 	enum ice_status status;
1459 	/* 2 actions will be added while adding a large action counter */
1460 	const int num_acts = 2;
1461 	u16 lg_act_size;
1462 	u16 rules_size;
1463 	u16 f_rule_id;
1464 	u32 act;
1465 	u16 id;
1466 
1467 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1468 		return ICE_ERR_PARAM;
1469 
1470 	/* Create two back-to-back switch rules and submit them to the HW using
1471 	 * one memory buffer:
1472 	 * 1. Large Action
1473 	 * 2. Look up Tx Rx
1474 	 */
1475 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1476 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1477 	lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1478 	if (!lg_act)
1479 		return ICE_ERR_NO_MEMORY;
1480 
1481 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1482 
1483 	/* Fill in the first switch rule i.e. large action */
1484 	lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1485 	lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1486 	lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1487 
1488 	/* First action VSI forwarding or VSI list forwarding depending on how
1489 	 * many VSIs
1490 	 */
1491 	id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
1492 		m_ent->fltr_info.fwd_id.hw_vsi_id;
1493 
1494 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1495 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1496 		ICE_LG_ACT_VSI_LIST_ID_M;
1497 	if (m_ent->vsi_count > 1)
1498 		act |= ICE_LG_ACT_VSI_LIST;
1499 	lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1500 
1501 	/* Second action counter ID */
1502 	act = ICE_LG_ACT_STAT_COUNT;
1503 	act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1504 		ICE_LG_ACT_STAT_COUNT_M;
1505 	lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1506 
1507 	/* call the fill switch rule to fill the lookup Tx Rx structure */
1508 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1509 			 ice_aqc_opc_update_sw_rules);
1510 
1511 	act = ICE_SINGLE_ACT_PTR;
1512 	act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1513 	rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1514 
1515 	/* Use the filter rule ID of the previously created rule with single
1516 	 * act. Once the update happens, hardware will treat this as large
1517 	 * action
1518 	 */
1519 	f_rule_id = m_ent->fltr_info.fltr_rule_id;
1520 	rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1521 
1522 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1523 				 ice_aqc_opc_update_sw_rules, NULL);
1524 	if (!status) {
1525 		m_ent->lg_act_idx = l_id;
1526 		m_ent->counter_index = (u8)counter_id;
1527 	}
1528 
1529 	ice_free(hw, lg_act);
1530 	return status;
1531 }
1532 
1533 /**
1534  * ice_create_vsi_list_map
1535  * @hw: pointer to the hardware structure
1536  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1537  * @num_vsi: number of VSI handles in the array
1538  * @vsi_list_id: VSI list ID generated as part of allocate resource
1539  *
1540  * Helper function to create a new entry of VSI list ID to VSI mapping
1541  * using the given VSI list ID
1542  */
1543 static struct ice_vsi_list_map_info *
1544 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1545 			u16 vsi_list_id)
1546 {
1547 	struct ice_switch_info *sw = hw->switch_info;
1548 	struct ice_vsi_list_map_info *v_map;
1549 	int i;
1550 
1551 	v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
1552 	if (!v_map)
1553 		return NULL;
1554 
1555 	v_map->vsi_list_id = vsi_list_id;
1556 	v_map->ref_cnt = 1;
1557 	for (i = 0; i < num_vsi; i++)
1558 		ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1559 
1560 	LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1561 	return v_map;
1562 }
1563 
1564 /**
1565  * ice_update_vsi_list_rule
1566  * @hw: pointer to the hardware structure
1567  * @vsi_handle_arr: array of VSI handles to form a VSI list
1568  * @num_vsi: number of VSI handles in the array
1569  * @vsi_list_id: VSI list ID generated as part of allocate resource
1570  * @remove: Boolean value to indicate if this is a remove action
1571  * @opc: switch rules population command type - pass in the command opcode
1572  * @lkup_type: lookup type of the filter
1573  *
1574  * Call AQ command to add a new switch rule or update existing switch rule
1575  * using the given VSI list ID
1576  */
1577 static enum ice_status
1578 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1579 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1580 			 enum ice_sw_lkup_type lkup_type)
1581 {
1582 	struct ice_aqc_sw_rules_elem *s_rule;
1583 	enum ice_status status;
1584 	u16 s_rule_size;
1585 	u16 rule_type;
1586 	int i;
1587 
1588 	if (!num_vsi)
1589 		return ICE_ERR_PARAM;
1590 
1591 	if (lkup_type == ICE_SW_LKUP_MAC ||
1592 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1593 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1594 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1595 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1596 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1597 	    lkup_type == ICE_SW_LKUP_DFLT ||
1598 	    lkup_type == ICE_SW_LKUP_LAST)
1599 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1600 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1601 	else if (lkup_type == ICE_SW_LKUP_VLAN)
1602 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1603 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1604 	else
1605 		return ICE_ERR_PARAM;
1606 
1607 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1608 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
1609 	if (!s_rule)
1610 		return ICE_ERR_NO_MEMORY;
1611 	for (i = 0; i < num_vsi; i++) {
1612 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1613 			status = ICE_ERR_PARAM;
1614 			goto exit;
1615 		}
1616 		/* AQ call requires hw_vsi_id(s) */
1617 		s_rule->pdata.vsi_list.vsi[i] =
1618 			CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1619 	}
1620 
1621 	s_rule->type = CPU_TO_LE16(rule_type);
1622 	s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
1623 	s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
1624 
1625 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1626 
1627 exit:
1628 	ice_free(hw, s_rule);
1629 	return status;
1630 }
1631 
1632 /**
1633  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1634  * @hw: pointer to the HW struct
1635  * @vsi_handle_arr: array of VSI handles to form a VSI list
1636  * @num_vsi: number of VSI handles in the array
1637  * @vsi_list_id: stores the ID of the VSI list to be created
1638  * @lkup_type: switch rule filter's lookup type
1639  */
1640 static enum ice_status
1641 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1642 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1643 {
1644 	enum ice_status status;
1645 
1646 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1647 					    ice_aqc_opc_alloc_res);
1648 	if (status)
1649 		return status;
1650 
1651 	/* Update the newly created VSI list to include the specified VSIs */
1652 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1653 					*vsi_list_id, false,
1654 					ice_aqc_opc_add_sw_rules, lkup_type);
1655 }
1656 
1657 /**
1658  * ice_create_pkt_fwd_rule
1659  * @hw: pointer to the hardware structure
1660  * @recp_list: corresponding filter management list
1661  * @f_entry: entry containing packet forwarding information
1662  *
1663  * Create switch rule with given filter information and add an entry
1664  * to the corresponding filter management list to track this switch rule
1665  * and VSI mapping
1666  */
1667 static enum ice_status
1668 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
1669 			struct ice_fltr_list_entry *f_entry)
1670 {
1671 	struct ice_fltr_mgmt_list_entry *fm_entry;
1672 	struct ice_aqc_sw_rules_elem *s_rule;
1673 	enum ice_status status;
1674 
1675 	s_rule = (struct ice_aqc_sw_rules_elem *)
1676 		ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1677 	if (!s_rule)
1678 		return ICE_ERR_NO_MEMORY;
1679 	fm_entry = (struct ice_fltr_mgmt_list_entry *)
1680 		   ice_malloc(hw, sizeof(*fm_entry));
1681 	if (!fm_entry) {
1682 		status = ICE_ERR_NO_MEMORY;
1683 		goto ice_create_pkt_fwd_rule_exit;
1684 	}
1685 
1686 	fm_entry->fltr_info = f_entry->fltr_info;
1687 
1688 	/* Initialize all the fields for the management entry */
1689 	fm_entry->vsi_count = 1;
1690 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1691 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1692 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1693 
1694 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1695 			 ice_aqc_opc_add_sw_rules);
1696 
1697 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1698 				 ice_aqc_opc_add_sw_rules, NULL);
1699 	if (status) {
1700 		ice_free(hw, fm_entry);
1701 		goto ice_create_pkt_fwd_rule_exit;
1702 	}
1703 
1704 	f_entry->fltr_info.fltr_rule_id =
1705 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1706 	fm_entry->fltr_info.fltr_rule_id =
1707 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1708 
1709 	/* The book keeping entries will get removed when base driver
1710 	 * calls remove filter AQ command
1711 	 */
1712 	LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
1713 
1714 ice_create_pkt_fwd_rule_exit:
1715 	ice_free(hw, s_rule);
1716 	return status;
1717 }
1718 
1719 /**
1720  * ice_update_pkt_fwd_rule
1721  * @hw: pointer to the hardware structure
1722  * @f_info: filter information for switch rule
1723  *
1724  * Call AQ command to update a previously created switch rule with a
1725  * VSI list ID
1726  */
1727 static enum ice_status
1728 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1729 {
1730 	struct ice_aqc_sw_rules_elem *s_rule;
1731 	enum ice_status status;
1732 
1733 	s_rule = (struct ice_aqc_sw_rules_elem *)
1734 		ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1735 	if (!s_rule)
1736 		return ICE_ERR_NO_MEMORY;
1737 
1738 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1739 
1740 	s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
1741 
1742 	/* Update switch rule with new rule set to forward VSI list */
1743 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1744 				 ice_aqc_opc_update_sw_rules, NULL);
1745 
1746 	ice_free(hw, s_rule);
1747 	return status;
1748 }
1749 
1750 /**
1751  * ice_update_sw_rule_bridge_mode
1752  * @hw: pointer to the HW struct
1753  *
1754  * Updates unicast switch filter rules based on VEB/VEPA mode
1755  */
1756 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1757 {
1758 	struct ice_fltr_mgmt_list_entry *fm_entry;
1759 	enum ice_status status = ICE_SUCCESS;
1760 	struct LIST_HEAD_TYPE *rule_head;
1761 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
1762 	struct ice_switch_info *sw;
1763 	sw = hw->switch_info;
1764 
1765 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1766 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1767 
1768 	ice_acquire_lock(rule_lock);
1769 	LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
1770 			    list_entry) {
1771 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
1772 		u8 *addr = fi->l_data.mac.mac_addr;
1773 
1774 		/* Update unicast Tx rules to reflect the selected
1775 		 * VEB/VEPA mode
1776 		 */
1777 		if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
1778 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
1779 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1780 		     fi->fltr_act == ICE_FWD_TO_Q ||
1781 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
1782 			status = ice_update_pkt_fwd_rule(hw, fi);
1783 			if (status)
1784 				break;
1785 		}
1786 	}
1787 
1788 	ice_release_lock(rule_lock);
1789 
1790 	return status;
1791 }
1792 
1793 /**
1794  * ice_add_update_vsi_list
1795  * @hw: pointer to the hardware structure
1796  * @m_entry: pointer to current filter management list entry
1797  * @cur_fltr: filter information from the book keeping entry
1798  * @new_fltr: filter information with the new VSI to be added
1799  *
1800  * Call AQ command to add or update previously created VSI list with new VSI.
1801  *
1802  * Helper function to do book keeping associated with adding filter information
1803  * The algorithm to do the book keeping is described below :
1804  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
1805  *	if only one VSI has been added till now
1806  *		Allocate a new VSI list and add two VSIs
1807  *		to this list using switch rule command
1808  *		Update the previously created switch rule with the
1809  *		newly created VSI list ID
1810  *	if a VSI list was previously created
1811  *		Add the new VSI to the previously created VSI list set
1812  *		using the update switch rule command
1813  */
1814 static enum ice_status
1815 ice_add_update_vsi_list(struct ice_hw *hw,
1816 			struct ice_fltr_mgmt_list_entry *m_entry,
1817 			struct ice_fltr_info *cur_fltr,
1818 			struct ice_fltr_info *new_fltr)
1819 {
1820 	enum ice_status status = ICE_SUCCESS;
1821 	u16 vsi_list_id = 0;
1822 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
1823 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
1824 		return ICE_ERR_NOT_IMPL;
1825 
1826 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
1827 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
1828 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
1829 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
1830 		return ICE_ERR_NOT_IMPL;
1831 
1832 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
1833 		/* Only one entry existed in the mapping and it was not already
1834 		 * a part of a VSI list. So, create a VSI list with the old and
1835 		 * new VSIs.
1836 		 */
1837 		struct ice_fltr_info tmp_fltr;
1838 		u16 vsi_handle_arr[2];
1839 
1840 		/* A rule already exists with the new VSI being added */
1841 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
1842 			return ICE_ERR_ALREADY_EXISTS;
1843 
1844 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
1845 		vsi_handle_arr[1] = new_fltr->vsi_handle;
1846 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1847 						  &vsi_list_id,
1848 						  new_fltr->lkup_type);
1849 		if (status)
1850 			return status;
1851 
1852 		tmp_fltr = *new_fltr;
1853 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
1854 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1855 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1856 		/* Update the previous switch rule of "MAC forward to VSI" to
1857 		 * "MAC fwd to VSI list"
1858 		 */
1859 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1860 		if (status)
1861 			return status;
1862 
1863 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
1864 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1865 		m_entry->vsi_list_info =
1866 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1867 						vsi_list_id);
1868 
1869 		if (!m_entry->vsi_list_info)
1870 			return ICE_ERR_NO_MEMORY;
1871 
1872 		/* If this entry was large action then the large action needs
1873 		 * to be updated to point to FWD to VSI list
1874 		 */
1875 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
1876 			status =
1877 			    ice_add_marker_act(hw, m_entry,
1878 					       m_entry->sw_marker_id,
1879 					       m_entry->lg_act_idx);
1880 	} else {
1881 		u16 vsi_handle = new_fltr->vsi_handle;
1882 		enum ice_adminq_opc opcode;
1883 
1884 		if (!m_entry->vsi_list_info)
1885 			return ICE_ERR_CFG;
1886 
1887 		/* A rule already exists with the new VSI being added */
1888 		if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
1889 			return ICE_SUCCESS;
1890 
1891 		/* Update the previously created VSI list set with
1892 		 * the new VSI ID passed in
1893 		 */
1894 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
1895 		opcode = ice_aqc_opc_update_sw_rules;
1896 
1897 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
1898 						  vsi_list_id, false, opcode,
1899 						  new_fltr->lkup_type);
1900 		/* update VSI list mapping info with new VSI ID */
1901 		if (!status)
1902 			ice_set_bit(vsi_handle,
1903 				    m_entry->vsi_list_info->vsi_map);
1904 	}
1905 	if (!status)
1906 		m_entry->vsi_count++;
1907 	return status;
1908 }
1909 
1910 /**
1911  * ice_find_rule_entry - Search a rule entry
1912  * @list_head: head of rule list
1913  * @f_info: rule information
1914  *
1915  * Helper function to search for a given rule entry
1916  * Returns pointer to entry storing the rule if found
1917  */
1918 static struct ice_fltr_mgmt_list_entry *
1919 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
1920 		    struct ice_fltr_info *f_info)
1921 {
1922 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
1923 
1924 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
1925 			    list_entry) {
1926 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
1927 			    sizeof(f_info->l_data)) &&
1928 		    f_info->flag == list_itr->fltr_info.flag) {
1929 			ret = list_itr;
1930 			break;
1931 		}
1932 	}
1933 	return ret;
1934 }
1935 
1936 /**
1937  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
1938  * @recp_list: VSI lists needs to be searched
1939  * @vsi_handle: VSI handle to be found in VSI list
1940  * @vsi_list_id: VSI list ID found containing vsi_handle
1941  *
1942  * Helper function to search a VSI list with single entry containing given VSI
1943  * handle element. This can be extended further to search VSI list with more
1944  * than 1 vsi_count. Returns pointer to VSI list entry if found.
1945  */
1946 struct ice_vsi_list_map_info *
1947 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
1948 			u16 *vsi_list_id)
1949 {
1950 	struct ice_vsi_list_map_info *map_info = NULL;
1951 	struct LIST_HEAD_TYPE *list_head;
1952 
1953 	list_head = &recp_list->filt_rules;
1954 	if (recp_list->adv_rule) {
1955 		struct ice_adv_fltr_mgmt_list_entry *list_itr;
1956 
1957 		LIST_FOR_EACH_ENTRY(list_itr, list_head,
1958 				    ice_adv_fltr_mgmt_list_entry,
1959 				    list_entry) {
1960 			if (list_itr->vsi_list_info) {
1961 				map_info = list_itr->vsi_list_info;
1962 				if (ice_is_bit_set(map_info->vsi_map,
1963 						   vsi_handle)) {
1964 					*vsi_list_id = map_info->vsi_list_id;
1965 					return map_info;
1966 				}
1967 			}
1968 		}
1969 	} else {
1970 		struct ice_fltr_mgmt_list_entry *list_itr;
1971 
1972 		LIST_FOR_EACH_ENTRY(list_itr, list_head,
1973 				    ice_fltr_mgmt_list_entry,
1974 				    list_entry) {
1975 			if (list_itr->vsi_count == 1 &&
1976 			    list_itr->vsi_list_info) {
1977 				map_info = list_itr->vsi_list_info;
1978 				if (ice_is_bit_set(map_info->vsi_map,
1979 						   vsi_handle)) {
1980 					*vsi_list_id = map_info->vsi_list_id;
1981 					return map_info;
1982 				}
1983 			}
1984 		}
1985 	}
1986 	return NULL;
1987 }
1988 
1989 /**
1990  * ice_add_rule_internal - add rule for a given lookup type
1991  * @hw: pointer to the hardware structure
1992  * @recp_list: recipe list for which rule has to be added
1993  * @lport: logic port number on which function add rule
1994  * @f_entry: structure containing MAC forwarding information
1995  *
1996  * Adds or updates the rule lists for a given recipe
1997  */
1998 static enum ice_status
1999 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2000 		      u8 lport, struct ice_fltr_list_entry *f_entry)
2001 {
2002 	struct ice_fltr_info *new_fltr, *cur_fltr;
2003 	struct ice_fltr_mgmt_list_entry *m_entry;
2004 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2005 	enum ice_status status = ICE_SUCCESS;
2006 
2007 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2008 		return ICE_ERR_PARAM;
2009 
2010 	/* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2011 	if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2012 		f_entry->fltr_info.fwd_id.hw_vsi_id =
2013 			ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2014 
2015 	rule_lock = &recp_list->filt_rule_lock;
2016 
2017 	ice_acquire_lock(rule_lock);
2018 	new_fltr = &f_entry->fltr_info;
2019 	if (new_fltr->flag & ICE_FLTR_RX)
2020 		new_fltr->src = lport;
2021 	else if (new_fltr->flag & ICE_FLTR_TX)
2022 		new_fltr->src =
2023 			ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2024 
2025 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2026 	if (!m_entry) {
2027 		status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2028 		goto exit_add_rule_internal;
2029 	}
2030 
2031 	cur_fltr = &m_entry->fltr_info;
2032 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2033 
2034 exit_add_rule_internal:
2035 	ice_release_lock(rule_lock);
2036 	return status;
2037 }
2038 
2039 /**
2040  * ice_remove_vsi_list_rule
2041  * @hw: pointer to the hardware structure
2042  * @vsi_list_id: VSI list ID generated as part of allocate resource
2043  * @lkup_type: switch rule filter lookup type
2044  *
2045  * The VSI list should be emptied before this function is called to remove the
2046  * VSI list.
2047  */
2048 static enum ice_status
2049 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2050 			 enum ice_sw_lkup_type lkup_type)
2051 {
2052 	/* Free the vsi_list resource that we allocated. It is assumed that the
2053 	 * list is empty at this point.
2054 	 */
2055 	return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2056 					    ice_aqc_opc_free_res);
2057 }
2058 
2059 /**
2060  * ice_rem_update_vsi_list
2061  * @hw: pointer to the hardware structure
2062  * @vsi_handle: VSI handle of the VSI to remove
2063  * @fm_list: filter management entry for which the VSI list management needs to
2064  *	     be done
2065  */
2066 static enum ice_status
2067 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2068 			struct ice_fltr_mgmt_list_entry *fm_list)
2069 {
2070 	enum ice_sw_lkup_type lkup_type;
2071 	enum ice_status status = ICE_SUCCESS;
2072 	u16 vsi_list_id;
2073 
2074 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2075 	    fm_list->vsi_count == 0)
2076 		return ICE_ERR_PARAM;
2077 
2078 	/* A rule with the VSI being removed does not exist */
2079 	if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2080 		return ICE_ERR_DOES_NOT_EXIST;
2081 
2082 	lkup_type = fm_list->fltr_info.lkup_type;
2083 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2084 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2085 					  ice_aqc_opc_update_sw_rules,
2086 					  lkup_type);
2087 	if (status)
2088 		return status;
2089 
2090 	fm_list->vsi_count--;
2091 	ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2092 
2093 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2094 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2095 		struct ice_vsi_list_map_info *vsi_list_info =
2096 			fm_list->vsi_list_info;
2097 		u16 rem_vsi_handle;
2098 
2099 		rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2100 						    ICE_MAX_VSI);
2101 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2102 			return ICE_ERR_OUT_OF_RANGE;
2103 
2104 		/* Make sure VSI list is empty before removing it below */
2105 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2106 						  vsi_list_id, true,
2107 						  ice_aqc_opc_update_sw_rules,
2108 						  lkup_type);
2109 		if (status)
2110 			return status;
2111 
2112 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2113 		tmp_fltr_info.fwd_id.hw_vsi_id =
2114 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
2115 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
2116 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2117 		if (status) {
2118 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2119 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
2120 			return status;
2121 		}
2122 
2123 		fm_list->fltr_info = tmp_fltr_info;
2124 	}
2125 
2126 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2127 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2128 		struct ice_vsi_list_map_info *vsi_list_info =
2129 			fm_list->vsi_list_info;
2130 
2131 		/* Remove the VSI list since it is no longer used */
2132 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2133 		if (status) {
2134 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
2135 				  vsi_list_id, status);
2136 			return status;
2137 		}
2138 
2139 		LIST_DEL(&vsi_list_info->list_entry);
2140 		ice_free(hw, vsi_list_info);
2141 		fm_list->vsi_list_info = NULL;
2142 	}
2143 
2144 	return status;
2145 }
2146 
2147 /**
2148  * ice_remove_rule_internal - Remove a filter rule of a given type
2149  *
2150  * @hw: pointer to the hardware structure
2151  * @recp_list: recipe list for which the rule needs to removed
2152  * @f_entry: rule entry containing filter information
2153  */
2154 static enum ice_status
2155 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2156 			 struct ice_fltr_list_entry *f_entry)
2157 {
2158 	struct ice_fltr_mgmt_list_entry *list_elem;
2159 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2160 	enum ice_status status = ICE_SUCCESS;
2161 	bool remove_rule = false;
2162 	u16 vsi_handle;
2163 
2164 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2165 		return ICE_ERR_PARAM;
2166 	f_entry->fltr_info.fwd_id.hw_vsi_id =
2167 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2168 
2169 	rule_lock = &recp_list->filt_rule_lock;
2170 	ice_acquire_lock(rule_lock);
2171 	list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2172 					&f_entry->fltr_info);
2173 	if (!list_elem) {
2174 		status = ICE_ERR_DOES_NOT_EXIST;
2175 		goto exit;
2176 	}
2177 
2178 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2179 		remove_rule = true;
2180 	} else if (!list_elem->vsi_list_info) {
2181 		status = ICE_ERR_DOES_NOT_EXIST;
2182 		goto exit;
2183 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
2184 		/* a ref_cnt > 1 indicates that the vsi_list is being
2185 		 * shared by multiple rules. Decrement the ref_cnt and
2186 		 * remove this rule, but do not modify the list, as it
2187 		 * is in-use by other rules.
2188 		 */
2189 		list_elem->vsi_list_info->ref_cnt--;
2190 		remove_rule = true;
2191 	} else {
2192 		/* a ref_cnt of 1 indicates the vsi_list is only used
2193 		 * by one rule. However, the original removal request is only
2194 		 * for a single VSI. Update the vsi_list first, and only
2195 		 * remove the rule if there are no further VSIs in this list.
2196 		 */
2197 		vsi_handle = f_entry->fltr_info.vsi_handle;
2198 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2199 		if (status)
2200 			goto exit;
2201 		/* if VSI count goes to zero after updating the VSI list */
2202 		if (list_elem->vsi_count == 0)
2203 			remove_rule = true;
2204 	}
2205 
2206 	if (remove_rule) {
2207 		/* Remove the lookup rule */
2208 		struct ice_aqc_sw_rules_elem *s_rule;
2209 
2210 		s_rule = (struct ice_aqc_sw_rules_elem *)
2211 			ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2212 		if (!s_rule) {
2213 			status = ICE_ERR_NO_MEMORY;
2214 			goto exit;
2215 		}
2216 
2217 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2218 				 ice_aqc_opc_remove_sw_rules);
2219 
2220 		status = ice_aq_sw_rules(hw, s_rule,
2221 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2222 					 ice_aqc_opc_remove_sw_rules, NULL);
2223 
2224 		/* Remove a book keeping from the list */
2225 		ice_free(hw, s_rule);
2226 
2227 		if (status)
2228 			goto exit;
2229 
2230 		LIST_DEL(&list_elem->list_entry);
2231 		ice_free(hw, list_elem);
2232 	}
2233 exit:
2234 	ice_release_lock(rule_lock);
2235 	return status;
2236 }
2237 
2238 /**
2239  * ice_aq_get_res_alloc - get allocated resources
2240  * @hw: pointer to the HW struct
2241  * @num_entries: pointer to u16 to store the number of resource entries returned
2242  * @buf: pointer to buffer
2243  * @buf_size: size of buf
2244  * @cd: pointer to command details structure or NULL
2245  *
2246  * The caller-supplied buffer must be large enough to store the resource
2247  * information for all resource types. Each resource type is an
2248  * ice_aqc_get_res_resp_elem structure.
2249  */
2250 enum ice_status
2251 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
2252 		     struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
2253 		     struct ice_sq_cd *cd)
2254 {
2255 	struct ice_aqc_get_res_alloc *resp;
2256 	enum ice_status status;
2257 	struct ice_aq_desc desc;
2258 
2259 	if (!buf)
2260 		return ICE_ERR_BAD_PTR;
2261 
2262 	if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2263 		return ICE_ERR_INVAL_SIZE;
2264 
2265 	resp = &desc.params.get_res;
2266 
2267 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2268 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2269 
2270 	if (!status && num_entries)
2271 		*num_entries = LE16_TO_CPU(resp->resp_elem_num);
2272 
2273 	return status;
2274 }
2275 
2276 /**
2277  * ice_aq_get_res_descs - get allocated resource descriptors
2278  * @hw: pointer to the hardware structure
2279  * @num_entries: number of resource entries in buffer
2280  * @buf: structure to hold response data buffer
2281  * @buf_size: size of buffer
2282  * @res_type: resource type
2283  * @res_shared: is resource shared
2284  * @desc_id: input - first desc ID to start; output - next desc ID
2285  * @cd: pointer to command details structure or NULL
2286  */
2287 enum ice_status
2288 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2289 		     struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
2290 		     bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
2291 {
2292 	struct ice_aqc_get_allocd_res_desc *cmd;
2293 	struct ice_aq_desc desc;
2294 	enum ice_status status;
2295 
2296 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2297 
2298 	cmd = &desc.params.get_res_desc;
2299 
2300 	if (!buf)
2301 		return ICE_ERR_PARAM;
2302 
2303 	if (buf_size != (num_entries * sizeof(*buf)))
2304 		return ICE_ERR_PARAM;
2305 
2306 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2307 
2308 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2309 					 ICE_AQC_RES_TYPE_M) | (res_shared ?
2310 					ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2311 	cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2312 
2313 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2314 	if (!status)
2315 		*desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2316 
2317 	return status;
2318 }
2319 
2320 /**
2321  * ice_add_mac_rule - Add a MAC address based filter rule
2322  * @hw: pointer to the hardware structure
2323  * @m_list: list of MAC addresses and forwarding information
2324  * @sw: pointer to switch info struct for which function add rule
2325  * @lport: logic port number on which function add rule
2326  *
2327  * IMPORTANT: When the umac_shared flag is set to false and m_list has
2328  * multiple unicast addresses, the function assumes that all the
2329  * addresses are unique in a given add_mac call. It doesn't
2330  * check for duplicates in this case, removing duplicates from a given
2331  * list should be taken care of in the caller of this function.
2332  */
2333 static enum ice_status
2334 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
2335 		 struct ice_switch_info *sw, u8 lport)
2336 {
2337 	struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
2338 	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2339 	struct ice_fltr_list_entry *m_list_itr;
2340 	struct LIST_HEAD_TYPE *rule_head;
2341 	u16 total_elem_left, s_rule_size;
2342 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2343 	enum ice_status status = ICE_SUCCESS;
2344 	u16 num_unicast = 0;
2345 	u8 elem_sent;
2346 
2347 	s_rule = NULL;
2348 	rule_lock = &recp_list->filt_rule_lock;
2349 	rule_head = &recp_list->filt_rules;
2350 
2351 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2352 			    list_entry) {
2353 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2354 		u16 vsi_handle;
2355 		u16 hw_vsi_id;
2356 
2357 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2358 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
2359 		if (!ice_is_vsi_valid(hw, vsi_handle))
2360 			return ICE_ERR_PARAM;
2361 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2362 		if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2363 			m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2364 		/* update the src in case it is VSI num */
2365 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2366 			return ICE_ERR_PARAM;
2367 		m_list_itr->fltr_info.src = hw_vsi_id;
2368 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2369 		    IS_ZERO_ETHER_ADDR(add))
2370 			return ICE_ERR_PARAM;
2371 		if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
2372 			/* Don't overwrite the unicast address */
2373 			ice_acquire_lock(rule_lock);
2374 			if (ice_find_rule_entry(rule_head,
2375 						&m_list_itr->fltr_info)) {
2376 				ice_release_lock(rule_lock);
2377 				continue;
2378 			}
2379 			ice_release_lock(rule_lock);
2380 			num_unicast++;
2381 		} else if (IS_MULTICAST_ETHER_ADDR(add) ||
2382 			   (IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
2383 			m_list_itr->status =
2384 				ice_add_rule_internal(hw, recp_list, lport,
2385 						      m_list_itr);
2386 			if (m_list_itr->status)
2387 				return m_list_itr->status;
2388 		}
2389 	}
2390 
2391 	ice_acquire_lock(rule_lock);
2392 	/* Exit if no suitable entries were found for adding bulk switch rule */
2393 	if (!num_unicast) {
2394 		status = ICE_SUCCESS;
2395 		goto ice_add_mac_exit;
2396 	}
2397 
2398 	/* Allocate switch rule buffer for the bulk update for unicast */
2399 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2400 	s_rule = (struct ice_aqc_sw_rules_elem *)
2401 		ice_calloc(hw, num_unicast, s_rule_size);
2402 	if (!s_rule) {
2403 		status = ICE_ERR_NO_MEMORY;
2404 		goto ice_add_mac_exit;
2405 	}
2406 
2407 	r_iter = s_rule;
2408 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2409 			    list_entry) {
2410 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2411 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2412 
2413 		if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2414 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2415 					 ice_aqc_opc_add_sw_rules);
2416 			r_iter = (struct ice_aqc_sw_rules_elem *)
2417 				((u8 *)r_iter + s_rule_size);
2418 		}
2419 	}
2420 
2421 	/* Call AQ bulk switch rule update for all unicast addresses */
2422 	r_iter = s_rule;
2423 	/* Call AQ switch rule in AQ_MAX chunk */
2424 	for (total_elem_left = num_unicast; total_elem_left > 0;
2425 	     total_elem_left -= elem_sent) {
2426 		struct ice_aqc_sw_rules_elem *entry = r_iter;
2427 
2428 		elem_sent = MIN_T(u8, total_elem_left,
2429 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
2430 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2431 					 elem_sent, ice_aqc_opc_add_sw_rules,
2432 					 NULL);
2433 		if (status)
2434 			goto ice_add_mac_exit;
2435 		r_iter = (struct ice_aqc_sw_rules_elem *)
2436 			((u8 *)r_iter + (elem_sent * s_rule_size));
2437 	}
2438 
2439 	/* Fill up rule ID based on the value returned from FW */
2440 	r_iter = s_rule;
2441 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2442 			    list_entry) {
2443 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2444 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2445 		struct ice_fltr_mgmt_list_entry *fm_entry;
2446 
2447 		if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2448 			f_info->fltr_rule_id =
2449 				LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2450 			f_info->fltr_act = ICE_FWD_TO_VSI;
2451 			/* Create an entry to track this MAC address */
2452 			fm_entry = (struct ice_fltr_mgmt_list_entry *)
2453 				ice_malloc(hw, sizeof(*fm_entry));
2454 			if (!fm_entry) {
2455 				status = ICE_ERR_NO_MEMORY;
2456 				goto ice_add_mac_exit;
2457 			}
2458 			fm_entry->fltr_info = *f_info;
2459 			fm_entry->vsi_count = 1;
2460 			/* The book keeping entries will get removed when
2461 			 * base driver calls remove filter AQ command
2462 			 */
2463 
2464 			LIST_ADD(&fm_entry->list_entry, rule_head);
2465 			r_iter = (struct ice_aqc_sw_rules_elem *)
2466 				((u8 *)r_iter + s_rule_size);
2467 		}
2468 	}
2469 
2470 ice_add_mac_exit:
2471 	ice_release_lock(rule_lock);
2472 	if (s_rule)
2473 		ice_free(hw, s_rule);
2474 	return status;
2475 }
2476 
2477 /**
2478  * ice_add_mac - Add a MAC address based filter rule
2479  * @hw: pointer to the hardware structure
2480  * @m_list: list of MAC addresses and forwarding information
2481  *
2482  * Function add MAC rule for logical port from HW struct
2483  */
2484 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2485 {
2486 	if (!m_list || !hw)
2487 		return ICE_ERR_PARAM;
2488 
2489 	return ice_add_mac_rule(hw, m_list, hw->switch_info,
2490 				hw->port_info->lport);
2491 }
2492 
2493 /**
2494  * ice_add_vlan_internal - Add one VLAN based filter rule
2495  * @hw: pointer to the hardware structure
2496  * @recp_list: recipe list for which rule has to be added
2497  * @f_entry: filter entry containing one VLAN information
2498  */
2499 static enum ice_status
2500 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2501 		      struct ice_fltr_list_entry *f_entry)
2502 {
2503 	struct ice_fltr_mgmt_list_entry *v_list_itr;
2504 	struct ice_fltr_info *new_fltr, *cur_fltr;
2505 	enum ice_sw_lkup_type lkup_type;
2506 	u16 vsi_list_id = 0, vsi_handle;
2507 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2508 	enum ice_status status = ICE_SUCCESS;
2509 
2510 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2511 		return ICE_ERR_PARAM;
2512 
2513 	f_entry->fltr_info.fwd_id.hw_vsi_id =
2514 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2515 	new_fltr = &f_entry->fltr_info;
2516 
2517 	/* VLAN ID should only be 12 bits */
2518 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2519 		return ICE_ERR_PARAM;
2520 
2521 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
2522 		return ICE_ERR_PARAM;
2523 
2524 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2525 	lkup_type = new_fltr->lkup_type;
2526 	vsi_handle = new_fltr->vsi_handle;
2527 	rule_lock = &recp_list->filt_rule_lock;
2528 	ice_acquire_lock(rule_lock);
2529 	v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2530 	if (!v_list_itr) {
2531 		struct ice_vsi_list_map_info *map_info = NULL;
2532 
2533 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2534 			/* All VLAN pruning rules use a VSI list. Check if
2535 			 * there is already a VSI list containing VSI that we
2536 			 * want to add. If found, use the same vsi_list_id for
2537 			 * this new VLAN rule or else create a new list.
2538 			 */
2539 			map_info = ice_find_vsi_list_entry(recp_list,
2540 							   vsi_handle,
2541 							   &vsi_list_id);
2542 			if (!map_info) {
2543 				status = ice_create_vsi_list_rule(hw,
2544 								  &vsi_handle,
2545 								  1,
2546 								  &vsi_list_id,
2547 								  lkup_type);
2548 				if (status)
2549 					goto exit;
2550 			}
2551 			/* Convert the action to forwarding to a VSI list. */
2552 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2553 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2554 		}
2555 
2556 		status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2557 		if (!status) {
2558 			v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
2559 							 new_fltr);
2560 			if (!v_list_itr) {
2561 				status = ICE_ERR_DOES_NOT_EXIST;
2562 				goto exit;
2563 			}
2564 			/* reuse VSI list for new rule and increment ref_cnt */
2565 			if (map_info) {
2566 				v_list_itr->vsi_list_info = map_info;
2567 				map_info->ref_cnt++;
2568 			} else {
2569 				v_list_itr->vsi_list_info =
2570 					ice_create_vsi_list_map(hw, &vsi_handle,
2571 								1, vsi_list_id);
2572 			}
2573 		}
2574 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2575 		/* Update existing VSI list to add new VSI ID only if it used
2576 		 * by one VLAN rule.
2577 		 */
2578 		cur_fltr = &v_list_itr->fltr_info;
2579 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2580 						 new_fltr);
2581 	} else {
2582 		/* If VLAN rule exists and VSI list being used by this rule is
2583 		 * referenced by more than 1 VLAN rule. Then create a new VSI
2584 		 * list appending previous VSI with new VSI and update existing
2585 		 * VLAN rule to point to new VSI list ID
2586 		 */
2587 		struct ice_fltr_info tmp_fltr;
2588 		u16 vsi_handle_arr[2];
2589 		u16 cur_handle;
2590 
2591 		/* Current implementation only supports reusing VSI list with
2592 		 * one VSI count. We should never hit below condition
2593 		 */
2594 		if (v_list_itr->vsi_count > 1 &&
2595 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
2596 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2597 			status = ICE_ERR_CFG;
2598 			goto exit;
2599 		}
2600 
2601 		cur_handle =
2602 			ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2603 					   ICE_MAX_VSI);
2604 
2605 		/* A rule already exists with the new VSI being added */
2606 		if (cur_handle == vsi_handle) {
2607 			status = ICE_ERR_ALREADY_EXISTS;
2608 			goto exit;
2609 		}
2610 
2611 		vsi_handle_arr[0] = cur_handle;
2612 		vsi_handle_arr[1] = vsi_handle;
2613 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2614 						  &vsi_list_id, lkup_type);
2615 		if (status)
2616 			goto exit;
2617 
2618 		tmp_fltr = v_list_itr->fltr_info;
2619 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2620 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2621 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2622 		/* Update the previous switch rule to a new VSI list which
2623 		 * includes current VSI that is requested
2624 		 */
2625 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2626 		if (status)
2627 			goto exit;
2628 
2629 		/* before overriding VSI list map info. decrement ref_cnt of
2630 		 * previous VSI list
2631 		 */
2632 		v_list_itr->vsi_list_info->ref_cnt--;
2633 
2634 		/* now update to newly created list */
2635 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2636 		v_list_itr->vsi_list_info =
2637 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2638 						vsi_list_id);
2639 		v_list_itr->vsi_count++;
2640 	}
2641 
2642 exit:
2643 	ice_release_lock(rule_lock);
2644 	return status;
2645 }
2646 
2647 /**
2648  * ice_add_vlan_rule - Add VLAN based filter rule
2649  * @hw: pointer to the hardware structure
2650  * @v_list: list of VLAN entries and forwarding information
2651  * @sw: pointer to switch info struct for which function add rule
2652  */
2653 static enum ice_status
2654 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
2655 		  struct ice_switch_info *sw)
2656 {
2657 	struct ice_fltr_list_entry *v_list_itr;
2658 	struct ice_sw_recipe *recp_list;
2659 
2660 	recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
2661 	LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
2662 			    list_entry) {
2663 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2664 			return ICE_ERR_PARAM;
2665 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2666 		v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
2667 							   v_list_itr);
2668 		if (v_list_itr->status)
2669 			return v_list_itr->status;
2670 	}
2671 	return ICE_SUCCESS;
2672 }
2673 
2674 /**
2675  * ice_add_vlan - Add a VLAN based filter rule
2676  * @hw: pointer to the hardware structure
2677  * @v_list: list of VLAN and forwarding information
2678  *
2679  * Function add VLAN rule for logical port from HW struct
2680  */
2681 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
2682 {
2683 	if (!v_list || !hw)
2684 		return ICE_ERR_PARAM;
2685 
2686 	return ice_add_vlan_rule(hw, v_list, hw->switch_info);
2687 }
2688 
2689 /**
2690  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
2691  * @hw: pointer to the hardware structure
2692  * @em_list: list of ether type MAC filter, MAC is optional
2693  * @sw: pointer to switch info struct for which function add rule
2694  * @lport: logic port number on which function add rule
2695  *
2696  * This function requires the caller to populate the entries in
2697  * the filter list with the necessary fields (including flags to
2698  * indicate Tx or Rx rules).
2699  */
2700 static enum ice_status
2701 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
2702 		     struct ice_switch_info *sw, u8 lport)
2703 {
2704 	struct ice_fltr_list_entry *em_list_itr;
2705 
2706 	LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
2707 			    list_entry) {
2708 		struct ice_sw_recipe *recp_list;
2709 		enum ice_sw_lkup_type l_type;
2710 
2711 		l_type = em_list_itr->fltr_info.lkup_type;
2712 		recp_list = &sw->recp_list[l_type];
2713 
2714 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2715 		    l_type != ICE_SW_LKUP_ETHERTYPE)
2716 			return ICE_ERR_PARAM;
2717 
2718 		em_list_itr->status = ice_add_rule_internal(hw, recp_list,
2719 							    lport,
2720 							    em_list_itr);
2721 		if (em_list_itr->status)
2722 			return em_list_itr->status;
2723 	}
2724 	return ICE_SUCCESS;
2725 }
2726 
2727 /**
2728  * ice_add_eth_mac - Add a ethertype based filter rule
2729  * @hw: pointer to the hardware structure
2730  * @em_list: list of ethertype and forwarding information
2731  *
2732  * Function add ethertype rule for logical port from HW struct
2733  */
2734 enum ice_status
2735 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
2736 {
2737 	if (!em_list || !hw)
2738 		return ICE_ERR_PARAM;
2739 
2740 	return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
2741 				    hw->port_info->lport);
2742 }
2743 
2744 /**
2745  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
2746  * @hw: pointer to the hardware structure
2747  * @em_list: list of ethertype or ethertype MAC entries
2748  * @sw: pointer to switch info struct for which function add rule
2749  */
2750 static enum ice_status
2751 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
2752 			struct ice_switch_info *sw)
2753 {
2754 	struct ice_fltr_list_entry *em_list_itr, *tmp;
2755 
2756 	LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
2757 				 list_entry) {
2758 		struct ice_sw_recipe *recp_list;
2759 		enum ice_sw_lkup_type l_type;
2760 
2761 		l_type = em_list_itr->fltr_info.lkup_type;
2762 
2763 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2764 		    l_type != ICE_SW_LKUP_ETHERTYPE)
2765 			return ICE_ERR_PARAM;
2766 
2767 		recp_list = &sw->recp_list[l_type];
2768 		em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
2769 							       em_list_itr);
2770 		if (em_list_itr->status)
2771 			return em_list_itr->status;
2772 	}
2773 	return ICE_SUCCESS;
2774 }
2775 
2776 /**
2777  * ice_remove_eth_mac - remove a ethertype based filter rule
2778  * @hw: pointer to the hardware structure
2779  * @em_list: list of ethertype and forwarding information
2780  *
2781  */
2782 enum ice_status
2783 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
2784 {
2785 	if (!em_list || !hw)
2786 		return ICE_ERR_PARAM;
2787 
2788 	return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
2789 }
2790 
2791 /**
2792  * ice_get_lg_act_aqc_res_type - get resource type for a large action
2793  * @res_type: resource type to be filled in case of function success
2794  * @num_acts: number of actions to hold with a large action entry
2795  *
2796  * Get resource type for a large action depending on the number
2797  * of single actions that it contains.
2798  */
2799 static enum ice_status
2800 ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
2801 {
2802 	if (!res_type)
2803 		return ICE_ERR_BAD_PTR;
2804 
2805 	/* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
2806 	 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
2807 	 * If num_acts is greater than 2, then use
2808 	 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
2809 	 * The num_acts cannot be equal to 0 or greater than 4.
2810 	 */
2811 	switch (num_acts) {
2812 	case 1:
2813 		*res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_1;
2814 		break;
2815 	case 2:
2816 		*res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_2;
2817 		break;
2818 	case 3:
2819 	case 4:
2820 		*res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_4;
2821 		break;
2822 	default:
2823 		return ICE_ERR_PARAM;
2824 	}
2825 
2826 	return ICE_SUCCESS;
2827 }
2828 
2829 /**
2830  * ice_alloc_res_lg_act - add large action resource
2831  * @hw: pointer to the hardware structure
2832  * @l_id: large action ID to fill it in
2833  * @num_acts: number of actions to hold with a large action entry
2834  */
2835 static enum ice_status
2836 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
2837 {
2838 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2839 	enum ice_status status;
2840 	u16 buf_len, res_type;
2841 
2842 	if (!l_id)
2843 		return ICE_ERR_BAD_PTR;
2844 
2845 	status = ice_get_lg_act_aqc_res_type(&res_type, num_acts);
2846 	if (status)
2847 		return status;
2848 
2849 	/* Allocate resource for large action */
2850 	buf_len = ice_struct_size(sw_buf, elem, 1);
2851 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2852 	if (!sw_buf)
2853 		return ICE_ERR_NO_MEMORY;
2854 
2855 	sw_buf->res_type = CPU_TO_LE16(res_type);
2856 	sw_buf->num_elems = CPU_TO_LE16(1);
2857 
2858 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2859 				       ice_aqc_opc_alloc_res, NULL);
2860 	if (!status)
2861 		*l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2862 
2863 	ice_free(hw, sw_buf);
2864 
2865 	return status;
2866 }
2867 
2868 /**
2869  * ice_rem_sw_rule_info
2870  * @hw: pointer to the hardware structure
2871  * @rule_head: pointer to the switch list structure that we want to delete
2872  */
2873 static void
2874 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
2875 {
2876 	if (!LIST_EMPTY(rule_head)) {
2877 		struct ice_fltr_mgmt_list_entry *entry;
2878 		struct ice_fltr_mgmt_list_entry *tmp;
2879 
2880 		LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
2881 					 ice_fltr_mgmt_list_entry, list_entry) {
2882 			LIST_DEL(&entry->list_entry);
2883 			ice_free(hw, entry);
2884 		}
2885 	}
2886 }
2887 
2888 /**
2889  * ice_rem_all_sw_rules_info
2890  * @hw: pointer to the hardware structure
2891  */
2892 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
2893 {
2894 	struct ice_switch_info *sw = hw->switch_info;
2895 	u8 i;
2896 
2897 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2898 		struct LIST_HEAD_TYPE *rule_head;
2899 
2900 		rule_head = &sw->recp_list[i].filt_rules;
2901 		if (!sw->recp_list[i].adv_rule)
2902 			ice_rem_sw_rule_info(hw, rule_head);
2903 	}
2904 }
2905 
2906 /**
2907  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
2908  * @pi: pointer to the port_info structure
2909  * @vsi_handle: VSI handle to set as default
2910  * @set: true to add the above mentioned switch rule, false to remove it
2911  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
2912  *
2913  * add filter rule to set/unset given VSI as default VSI for the switch
2914  * (represented by swid)
2915  */
2916 enum ice_status
2917 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
2918 		 u8 direction)
2919 {
2920 	struct ice_fltr_list_entry f_list_entry;
2921 	struct ice_sw_recipe *recp_list;
2922 	struct ice_fltr_info f_info;
2923 	struct ice_hw *hw = pi->hw;
2924 	enum ice_status status;
2925 	u8 lport = pi->lport;
2926 	u16 hw_vsi_id;
2927 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
2928 
2929 	if (!ice_is_vsi_valid(hw, vsi_handle))
2930 		return ICE_ERR_PARAM;
2931 
2932 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2933 
2934 	ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
2935 
2936 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
2937 	f_info.flag = direction;
2938 	f_info.fltr_act = ICE_FWD_TO_VSI;
2939 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
2940 	f_info.vsi_handle = vsi_handle;
2941 
2942 	if (f_info.flag & ICE_FLTR_RX) {
2943 		f_info.src = pi->lport;
2944 		f_info.src_id = ICE_SRC_ID_LPORT;
2945 	} else if (f_info.flag & ICE_FLTR_TX) {
2946 		f_info.src_id = ICE_SRC_ID_VSI;
2947 		f_info.src = hw_vsi_id;
2948 	}
2949 	f_list_entry.fltr_info = f_info;
2950 
2951 	if (set)
2952 		status = ice_add_rule_internal(hw, recp_list, lport,
2953 					       &f_list_entry);
2954 	else
2955 		status = ice_remove_rule_internal(hw, recp_list,
2956 						  &f_list_entry);
2957 
2958 	return status;
2959 }
2960 
2961 /**
2962  * ice_check_if_dflt_vsi - check if VSI is default VSI
2963  * @pi: pointer to the port_info structure
2964  * @vsi_handle: vsi handle to check for in filter list
2965  * @rule_exists: indicates if there are any VSI's in the rule list
2966  *
2967  * checks if the VSI is in a default VSI list, and also indicates
2968  * if the default VSI list is empty
2969  */
2970 bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
2971 			   bool *rule_exists)
2972 {
2973 	struct ice_fltr_mgmt_list_entry *fm_entry;
2974 	struct LIST_HEAD_TYPE *rule_head;
2975 	struct ice_sw_recipe *recp_list;
2976 	struct ice_lock *rule_lock;
2977 	bool ret = false;
2978 	recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
2979 	rule_lock = &recp_list->filt_rule_lock;
2980 	rule_head = &recp_list->filt_rules;
2981 
2982 	ice_acquire_lock(rule_lock);
2983 
2984 	if (rule_exists && !LIST_EMPTY(rule_head))
2985 		*rule_exists = true;
2986 
2987 	LIST_FOR_EACH_ENTRY(fm_entry, rule_head,
2988 			    ice_fltr_mgmt_list_entry, list_entry) {
2989 		if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
2990 			ret = true;
2991 			break;
2992 		}
2993 	}
2994 
2995 	ice_release_lock(rule_lock);
2996 	return ret;
2997 }
2998 
2999 /**
3000  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3001  * @list_head: head of rule list
3002  * @f_info: rule information
3003  *
3004  * Helper function to search for a unicast rule entry - this is to be used
3005  * to remove unicast MAC filter that is not shared with other VSIs on the
3006  * PF switch.
3007  *
3008  * Returns pointer to entry storing the rule if found
3009  */
3010 static struct ice_fltr_mgmt_list_entry *
3011 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3012 			  struct ice_fltr_info *f_info)
3013 {
3014 	struct ice_fltr_mgmt_list_entry *list_itr;
3015 
3016 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3017 			    list_entry) {
3018 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3019 			    sizeof(f_info->l_data)) &&
3020 		    f_info->fwd_id.hw_vsi_id ==
3021 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
3022 		    f_info->flag == list_itr->fltr_info.flag)
3023 			return list_itr;
3024 	}
3025 	return NULL;
3026 }
3027 
3028 /**
3029  * ice_remove_mac_rule - remove a MAC based filter rule
3030  * @hw: pointer to the hardware structure
3031  * @m_list: list of MAC addresses and forwarding information
3032  * @recp_list: list from which function remove MAC address
3033  *
3034  * This function removes either a MAC filter rule or a specific VSI from a
3035  * VSI list for a multicast MAC address.
3036  *
3037  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3038  * ice_add_mac. Caller should be aware that this call will only work if all
3039  * the entries passed into m_list were added previously. It will not attempt to
3040  * do a partial remove of entries that were found.
3041  */
3042 static enum ice_status
3043 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3044 		    struct ice_sw_recipe *recp_list)
3045 {
3046 	struct ice_fltr_list_entry *list_itr, *tmp;
3047 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3048 
3049 	if (!m_list)
3050 		return ICE_ERR_PARAM;
3051 
3052 	rule_lock = &recp_list->filt_rule_lock;
3053 	LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3054 				 list_entry) {
3055 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3056 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3057 		u16 vsi_handle;
3058 
3059 		if (l_type != ICE_SW_LKUP_MAC)
3060 			return ICE_ERR_PARAM;
3061 
3062 		vsi_handle = list_itr->fltr_info.vsi_handle;
3063 		if (!ice_is_vsi_valid(hw, vsi_handle))
3064 			return ICE_ERR_PARAM;
3065 
3066 		list_itr->fltr_info.fwd_id.hw_vsi_id =
3067 					ice_get_hw_vsi_num(hw, vsi_handle);
3068 		if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
3069 			/* Don't remove the unicast address that belongs to
3070 			 * another VSI on the switch, since it is not being
3071 			 * shared...
3072 			 */
3073 			ice_acquire_lock(rule_lock);
3074 			if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3075 						       &list_itr->fltr_info)) {
3076 				ice_release_lock(rule_lock);
3077 				return ICE_ERR_DOES_NOT_EXIST;
3078 			}
3079 			ice_release_lock(rule_lock);
3080 		}
3081 		list_itr->status = ice_remove_rule_internal(hw, recp_list,
3082 							    list_itr);
3083 		if (list_itr->status)
3084 			return list_itr->status;
3085 	}
3086 	return ICE_SUCCESS;
3087 }
3088 
3089 /**
3090  * ice_remove_mac - remove a MAC address based filter rule
3091  * @hw: pointer to the hardware structure
3092  * @m_list: list of MAC addresses and forwarding information
3093  *
3094  */
3095 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3096 {
3097 	struct ice_sw_recipe *recp_list;
3098 
3099 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3100 	return ice_remove_mac_rule(hw, m_list, recp_list);
3101 }
3102 
3103 /**
3104  * ice_remove_vlan_rule - Remove VLAN based filter rule
3105  * @hw: pointer to the hardware structure
3106  * @v_list: list of VLAN entries and forwarding information
3107  * @recp_list: list from which function remove VLAN
3108  */
3109 static enum ice_status
3110 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3111 		     struct ice_sw_recipe *recp_list)
3112 {
3113 	struct ice_fltr_list_entry *v_list_itr, *tmp;
3114 
3115 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3116 				 list_entry) {
3117 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3118 
3119 		if (l_type != ICE_SW_LKUP_VLAN)
3120 			return ICE_ERR_PARAM;
3121 		v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3122 							      v_list_itr);
3123 		if (v_list_itr->status)
3124 			return v_list_itr->status;
3125 	}
3126 	return ICE_SUCCESS;
3127 }
3128 
3129 /**
3130  * ice_remove_vlan - remove a VLAN address based filter rule
3131  * @hw: pointer to the hardware structure
3132  * @v_list: list of VLAN and forwarding information
3133  *
3134  */
3135 enum ice_status
3136 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3137 {
3138 	struct ice_sw_recipe *recp_list;
3139 
3140 	if (!v_list || !hw)
3141 		return ICE_ERR_PARAM;
3142 
3143 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3144 	return ice_remove_vlan_rule(hw, v_list, recp_list);
3145 }
3146 
3147 /**
3148  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3149  * @fm_entry: filter entry to inspect
3150  * @vsi_handle: VSI handle to compare with filter info
3151  */
3152 static bool
3153 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3154 {
3155 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3156 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3157 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3158 		 fm_entry->vsi_list_info &&
3159 		 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3160 				 vsi_handle))));
3161 }
3162 
3163 /**
3164  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3165  * @hw: pointer to the hardware structure
3166  * @vsi_handle: VSI handle to remove filters from
3167  * @vsi_list_head: pointer to the list to add entry to
3168  * @fi: pointer to fltr_info of filter entry to copy & add
3169  *
3170  * Helper function, used when creating a list of filters to remove from
3171  * a specific VSI. The entry added to vsi_list_head is a COPY of the
3172  * original filter entry, with the exception of fltr_info.fltr_act and
3173  * fltr_info.fwd_id fields. These are set such that later logic can
3174  * extract which VSI to remove the fltr from, and pass on that information.
3175  */
3176 static enum ice_status
3177 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3178 			       struct LIST_HEAD_TYPE *vsi_list_head,
3179 			       struct ice_fltr_info *fi)
3180 {
3181 	struct ice_fltr_list_entry *tmp;
3182 
3183 	/* this memory is freed up in the caller function
3184 	 * once filters for this VSI are removed
3185 	 */
3186 	tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3187 	if (!tmp)
3188 		return ICE_ERR_NO_MEMORY;
3189 
3190 	tmp->fltr_info = *fi;
3191 
3192 	/* Overwrite these fields to indicate which VSI to remove filter from,
3193 	 * so find and remove logic can extract the information from the
3194 	 * list entries. Note that original entries will still have proper
3195 	 * values.
3196 	 */
3197 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3198 	tmp->fltr_info.vsi_handle = vsi_handle;
3199 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3200 
3201 	LIST_ADD(&tmp->list_entry, vsi_list_head);
3202 
3203 	return ICE_SUCCESS;
3204 }
3205 
3206 /**
3207  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3208  * @hw: pointer to the hardware structure
3209  * @vsi_handle: VSI handle to remove filters from
3210  * @lkup_list_head: pointer to the list that has certain lookup type filters
3211  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3212  *
3213  * Locates all filters in lkup_list_head that are used by the given VSI,
3214  * and adds COPIES of those entries to vsi_list_head (intended to be used
3215  * to remove the listed filters).
3216  * Note that this means all entries in vsi_list_head must be explicitly
3217  * deallocated by the caller when done with list.
3218  */
3219 static enum ice_status
3220 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3221 			 struct LIST_HEAD_TYPE *lkup_list_head,
3222 			 struct LIST_HEAD_TYPE *vsi_list_head)
3223 {
3224 	struct ice_fltr_mgmt_list_entry *fm_entry;
3225 	enum ice_status status = ICE_SUCCESS;
3226 
3227 	/* check to make sure VSI ID is valid and within boundary */
3228 	if (!ice_is_vsi_valid(hw, vsi_handle))
3229 		return ICE_ERR_PARAM;
3230 
3231 	LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3232 			    ice_fltr_mgmt_list_entry, list_entry) {
3233 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
3234 			continue;
3235 
3236 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3237 							vsi_list_head,
3238 							&fm_entry->fltr_info);
3239 		if (status)
3240 			return status;
3241 	}
3242 	return status;
3243 }
3244 
3245 /**
3246  * ice_determine_promisc_mask
3247  * @fi: filter info to parse
3248  *
3249  * Helper function to determine which ICE_PROMISC_ mask corresponds
3250  * to given filter into.
3251  */
3252 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3253 {
3254 	u16 vid = fi->l_data.mac_vlan.vlan_id;
3255 	u8 *macaddr = fi->l_data.mac.mac_addr;
3256 	bool is_tx_fltr = false;
3257 	u8 promisc_mask = 0;
3258 
3259 	if (fi->flag == ICE_FLTR_TX)
3260 		is_tx_fltr = true;
3261 
3262 	if (IS_BROADCAST_ETHER_ADDR(macaddr))
3263 		promisc_mask |= is_tx_fltr ?
3264 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3265 	else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3266 		promisc_mask |= is_tx_fltr ?
3267 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3268 	else if (IS_UNICAST_ETHER_ADDR(macaddr))
3269 		promisc_mask |= is_tx_fltr ?
3270 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3271 	if (vid)
3272 		promisc_mask |= is_tx_fltr ?
3273 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3274 
3275 	return promisc_mask;
3276 }
3277 
3278 /**
3279  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
3280  * @hw: pointer to the hardware structure
3281  * @vsi_handle: VSI handle to retrieve info from
3282  * @promisc_mask: pointer to mask to be filled in
3283  * @vid: VLAN ID of promisc VLAN VSI
3284  * @sw: pointer to switch info struct for which function add rule
3285  * @lkup: switch rule filter lookup type
3286  */
3287 static enum ice_status
3288 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3289 		     u16 *vid, struct ice_switch_info *sw,
3290 		     enum ice_sw_lkup_type lkup)
3291 {
3292 	struct ice_fltr_mgmt_list_entry *itr;
3293 	struct LIST_HEAD_TYPE *rule_head;
3294 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
3295 
3296 	if (!ice_is_vsi_valid(hw, vsi_handle) ||
3297 	    (lkup != ICE_SW_LKUP_PROMISC && lkup != ICE_SW_LKUP_PROMISC_VLAN))
3298 		return ICE_ERR_PARAM;
3299 
3300 	*vid = 0;
3301 	*promisc_mask = 0;
3302 	rule_head = &sw->recp_list[lkup].filt_rules;
3303 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3304 
3305 	ice_acquire_lock(rule_lock);
3306 	LIST_FOR_EACH_ENTRY(itr, rule_head,
3307 			    ice_fltr_mgmt_list_entry, list_entry) {
3308 		/* Continue if this filter doesn't apply to this VSI or the
3309 		 * VSI ID is not in the VSI map for this filter
3310 		 */
3311 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
3312 			continue;
3313 
3314 		*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3315 	}
3316 	ice_release_lock(rule_lock);
3317 
3318 	return ICE_SUCCESS;
3319 }
3320 
3321 /**
3322  * ice_get_vsi_promisc - get promiscuous mode of given VSI
3323  * @hw: pointer to the hardware structure
3324  * @vsi_handle: VSI handle to retrieve info from
3325  * @promisc_mask: pointer to mask to be filled in
3326  * @vid: VLAN ID of promisc VLAN VSI
3327  */
3328 enum ice_status
3329 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3330 		    u16 *vid)
3331 {
3332 	return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
3333 				    vid, hw->switch_info, ICE_SW_LKUP_PROMISC);
3334 }
3335 
3336 /**
3337  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3338  * @hw: pointer to the hardware structure
3339  * @vsi_handle: VSI handle to retrieve info from
3340  * @promisc_mask: pointer to mask to be filled in
3341  * @vid: VLAN ID of promisc VLAN VSI
3342  */
3343 enum ice_status
3344 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3345 			 u16 *vid)
3346 {
3347 	return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
3348 				    vid, hw->switch_info,
3349 				    ICE_SW_LKUP_PROMISC_VLAN);
3350 }
3351 
3352 /**
3353  * ice_remove_promisc - Remove promisc based filter rules
3354  * @hw: pointer to the hardware structure
3355  * @recp_id: recipe ID for which the rule needs to removed
3356  * @v_list: list of promisc entries
3357  */
3358 static enum ice_status
3359 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3360 		   struct LIST_HEAD_TYPE *v_list)
3361 {
3362 	struct ice_fltr_list_entry *v_list_itr, *tmp;
3363 	struct ice_sw_recipe *recp_list;
3364 
3365 	recp_list = &hw->switch_info->recp_list[recp_id];
3366 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3367 				 list_entry) {
3368 		v_list_itr->status =
3369 			ice_remove_rule_internal(hw, recp_list, v_list_itr);
3370 		if (v_list_itr->status)
3371 			return v_list_itr->status;
3372 	}
3373 	return ICE_SUCCESS;
3374 }
3375 
3376 /**
3377  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
3378  * @hw: pointer to the hardware structure
3379  * @vsi_handle: VSI handle to clear mode
3380  * @promisc_mask: mask of promiscuous config bits to clear
3381  * @vid: VLAN ID to clear VLAN promiscuous
3382  * @sw: pointer to switch info struct for which function add rule
3383  */
3384 static enum ice_status
3385 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3386 		       u16 vid, struct ice_switch_info *sw)
3387 {
3388 	struct ice_fltr_list_entry *fm_entry, *tmp;
3389 	struct LIST_HEAD_TYPE remove_list_head;
3390 	struct ice_fltr_mgmt_list_entry *itr;
3391 	struct LIST_HEAD_TYPE *rule_head;
3392 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
3393 	enum ice_status status = ICE_SUCCESS;
3394 	u8 recipe_id;
3395 
3396 	if (!ice_is_vsi_valid(hw, vsi_handle))
3397 		return ICE_ERR_PARAM;
3398 
3399 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3400 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3401 	else
3402 		recipe_id = ICE_SW_LKUP_PROMISC;
3403 
3404 	rule_head = &sw->recp_list[recipe_id].filt_rules;
3405 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3406 
3407 	INIT_LIST_HEAD(&remove_list_head);
3408 
3409 	ice_acquire_lock(rule_lock);
3410 	LIST_FOR_EACH_ENTRY(itr, rule_head,
3411 			    ice_fltr_mgmt_list_entry, list_entry) {
3412 		struct ice_fltr_info *fltr_info;
3413 		u8 fltr_promisc_mask = 0;
3414 
3415 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
3416 			continue;
3417 		fltr_info = &itr->fltr_info;
3418 
3419 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3420 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
3421 			continue;
3422 
3423 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3424 
3425 		/* Skip if filter is not completely specified by given mask */
3426 		if (fltr_promisc_mask & ~promisc_mask)
3427 			continue;
3428 
3429 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3430 							&remove_list_head,
3431 							fltr_info);
3432 		if (status) {
3433 			ice_release_lock(rule_lock);
3434 			goto free_fltr_list;
3435 		}
3436 	}
3437 	ice_release_lock(rule_lock);
3438 
3439 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3440 
3441 free_fltr_list:
3442 	LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3443 				 ice_fltr_list_entry, list_entry) {
3444 		LIST_DEL(&fm_entry->list_entry);
3445 		ice_free(hw, fm_entry);
3446 	}
3447 
3448 	return status;
3449 }
3450 
3451 /**
3452  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3453  * @hw: pointer to the hardware structure
3454  * @vsi_handle: VSI handle to clear mode
3455  * @promisc_mask: mask of promiscuous config bits to clear
3456  * @vid: VLAN ID to clear VLAN promiscuous
3457  */
3458 enum ice_status
3459 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
3460 		      u8 promisc_mask, u16 vid)
3461 {
3462 	return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
3463 				      vid, hw->switch_info);
3464 }
3465 
3466 /**
3467  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3468  * @hw: pointer to the hardware structure
3469  * @vsi_handle: VSI handle to configure
3470  * @promisc_mask: mask of promiscuous config bits
3471  * @vid: VLAN ID to set VLAN promiscuous
3472  * @lport: logical port number to configure promisc mode
3473  * @sw: pointer to switch info struct for which function add rule
3474  */
3475 static enum ice_status
3476 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3477 		     u16 vid, u8 lport, struct ice_switch_info *sw)
3478 {
3479 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3480 	struct ice_fltr_list_entry f_list_entry;
3481 	struct ice_fltr_info new_fltr;
3482 	enum ice_status status = ICE_SUCCESS;
3483 	bool is_tx_fltr;
3484 	u16 hw_vsi_id;
3485 	int pkt_type;
3486 	u8 recipe_id;
3487 
3488 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3489 
3490 	if (!ice_is_vsi_valid(hw, vsi_handle))
3491 		return ICE_ERR_PARAM;
3492 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3493 
3494 	ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3495 
3496 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3497 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3498 		new_fltr.l_data.mac_vlan.vlan_id = vid;
3499 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3500 	} else {
3501 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3502 		recipe_id = ICE_SW_LKUP_PROMISC;
3503 	}
3504 
3505 	/* Separate filters must be set for each direction/packet type
3506 	 * combination, so we will loop over the mask value, store the
3507 	 * individual type, and clear it out in the input mask as it
3508 	 * is found.
3509 	 */
3510 	while (promisc_mask) {
3511 		struct ice_sw_recipe *recp_list;
3512 		u8 *mac_addr;
3513 
3514 		pkt_type = 0;
3515 		is_tx_fltr = false;
3516 
3517 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3518 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3519 			pkt_type = UCAST_FLTR;
3520 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3521 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3522 			pkt_type = UCAST_FLTR;
3523 			is_tx_fltr = true;
3524 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3525 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3526 			pkt_type = MCAST_FLTR;
3527 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3528 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3529 			pkt_type = MCAST_FLTR;
3530 			is_tx_fltr = true;
3531 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3532 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3533 			pkt_type = BCAST_FLTR;
3534 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3535 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3536 			pkt_type = BCAST_FLTR;
3537 			is_tx_fltr = true;
3538 		}
3539 
3540 		/* Check for VLAN promiscuous flag */
3541 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3542 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3543 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3544 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3545 			is_tx_fltr = true;
3546 		}
3547 
3548 		/* Set filter DA based on packet type */
3549 		mac_addr = new_fltr.l_data.mac.mac_addr;
3550 		if (pkt_type == BCAST_FLTR) {
3551 			ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3552 		} else if (pkt_type == MCAST_FLTR ||
3553 			   pkt_type == UCAST_FLTR) {
3554 			/* Use the dummy ether header DA */
3555 			ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3556 				   ICE_NONDMA_TO_NONDMA);
3557 			if (pkt_type == MCAST_FLTR)
3558 				mac_addr[0] |= 0x1;	/* Set multicast bit */
3559 		}
3560 
3561 		/* Need to reset this to zero for all iterations */
3562 		new_fltr.flag = 0;
3563 		if (is_tx_fltr) {
3564 			new_fltr.flag |= ICE_FLTR_TX;
3565 			new_fltr.src = hw_vsi_id;
3566 		} else {
3567 			new_fltr.flag |= ICE_FLTR_RX;
3568 			new_fltr.src = lport;
3569 		}
3570 
3571 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
3572 		new_fltr.vsi_handle = vsi_handle;
3573 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3574 		f_list_entry.fltr_info = new_fltr;
3575 		recp_list = &sw->recp_list[recipe_id];
3576 
3577 		status = ice_add_rule_internal(hw, recp_list, lport,
3578 					       &f_list_entry);
3579 		if (status != ICE_SUCCESS)
3580 			goto set_promisc_exit;
3581 	}
3582 
3583 set_promisc_exit:
3584 	return status;
3585 }
3586 
3587 /**
3588  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3589  * @hw: pointer to the hardware structure
3590  * @vsi_handle: VSI handle to configure
3591  * @promisc_mask: mask of promiscuous config bits
3592  * @vid: VLAN ID to set VLAN promiscuous
3593  */
3594 enum ice_status
3595 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3596 		    u16 vid)
3597 {
3598 	return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
3599 				    hw->port_info->lport,
3600 				    hw->switch_info);
3601 }
3602 
3603 /**
3604  * _ice_set_vlan_vsi_promisc
3605  * @hw: pointer to the hardware structure
3606  * @vsi_handle: VSI handle to configure
3607  * @promisc_mask: mask of promiscuous config bits
3608  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3609  * @lport: logical port number to configure promisc mode
3610  * @sw: pointer to switch info struct for which function add rule
3611  *
3612  * Configure VSI with all associated VLANs to given promiscuous mode(s)
3613  */
3614 static enum ice_status
3615 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3616 			  bool rm_vlan_promisc, u8 lport,
3617 			  struct ice_switch_info *sw)
3618 {
3619 	struct ice_fltr_list_entry *list_itr, *tmp;
3620 	struct LIST_HEAD_TYPE vsi_list_head;
3621 	struct LIST_HEAD_TYPE *vlan_head;
3622 	struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3623 	enum ice_status status;
3624 	u16 vlan_id;
3625 
3626 	INIT_LIST_HEAD(&vsi_list_head);
3627 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3628 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3629 	ice_acquire_lock(vlan_lock);
3630 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3631 					  &vsi_list_head);
3632 	ice_release_lock(vlan_lock);
3633 	if (status)
3634 		goto free_fltr_list;
3635 
3636 	LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3637 			    list_entry) {
3638 		/* Avoid enabling or disabling vlan zero twice when in double
3639 		 * vlan mode
3640 		 */
3641 		if (ice_is_dvm_ena(hw) &&
3642 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
3643 			continue;
3644 
3645 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3646 		if (rm_vlan_promisc)
3647 			status =  _ice_clear_vsi_promisc(hw, vsi_handle,
3648 							 promisc_mask,
3649 							 vlan_id, sw);
3650 		else
3651 			status =  _ice_set_vsi_promisc(hw, vsi_handle,
3652 						       promisc_mask, vlan_id,
3653 						       lport, sw);
3654 		if (status && status != ICE_ERR_ALREADY_EXISTS)
3655 			break;
3656 	}
3657 
3658 free_fltr_list:
3659 	LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3660 				 ice_fltr_list_entry, list_entry) {
3661 		LIST_DEL(&list_itr->list_entry);
3662 		ice_free(hw, list_itr);
3663 	}
3664 	return status;
3665 }
3666 
3667 /**
3668  * ice_set_vlan_vsi_promisc
3669  * @hw: pointer to the hardware structure
3670  * @vsi_handle: VSI handle to configure
3671  * @promisc_mask: mask of promiscuous config bits
3672  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3673  *
3674  * Configure VSI with all associated VLANs to given promiscuous mode(s)
3675  */
3676 enum ice_status
3677 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3678 			 bool rm_vlan_promisc)
3679 {
3680 	return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
3681 					 rm_vlan_promisc, hw->port_info->lport,
3682 					 hw->switch_info);
3683 }
3684 
3685 /**
3686  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3687  * @hw: pointer to the hardware structure
3688  * @vsi_handle: VSI handle to remove filters from
3689  * @recp_list: recipe list from which function remove fltr
3690  * @lkup: switch rule filter lookup type
3691  */
3692 static void
3693 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3694 			 struct ice_sw_recipe *recp_list,
3695 			 enum ice_sw_lkup_type lkup)
3696 {
3697 	struct ice_fltr_list_entry *fm_entry;
3698 	struct LIST_HEAD_TYPE remove_list_head;
3699 	struct LIST_HEAD_TYPE *rule_head;
3700 	struct ice_fltr_list_entry *tmp;
3701 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
3702 	enum ice_status status;
3703 
3704 	INIT_LIST_HEAD(&remove_list_head);
3705 	rule_lock = &recp_list[lkup].filt_rule_lock;
3706 	rule_head = &recp_list[lkup].filt_rules;
3707 	ice_acquire_lock(rule_lock);
3708 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3709 					  &remove_list_head);
3710 	ice_release_lock(rule_lock);
3711 	if (status)
3712 		goto free_fltr_list;
3713 
3714 	switch (lkup) {
3715 	case ICE_SW_LKUP_MAC:
3716 		ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
3717 		break;
3718 	case ICE_SW_LKUP_VLAN:
3719 		ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
3720 		break;
3721 	case ICE_SW_LKUP_PROMISC:
3722 	case ICE_SW_LKUP_PROMISC_VLAN:
3723 		ice_remove_promisc(hw, (u8)lkup, &remove_list_head);
3724 		break;
3725 	case ICE_SW_LKUP_MAC_VLAN:
3726 		ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
3727 		break;
3728 	case ICE_SW_LKUP_ETHERTYPE:
3729 	case ICE_SW_LKUP_ETHERTYPE_MAC:
3730 		ice_remove_eth_mac(hw, &remove_list_head);
3731 		break;
3732 	case ICE_SW_LKUP_DFLT:
3733 		ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
3734 		break;
3735 	case ICE_SW_LKUP_LAST:
3736 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
3737 		break;
3738 	}
3739 
3740 free_fltr_list:
3741 	LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3742 				 ice_fltr_list_entry, list_entry) {
3743 		LIST_DEL(&fm_entry->list_entry);
3744 		ice_free(hw, fm_entry);
3745 	}
3746 }
3747 
3748 /**
3749  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
3750  * @hw: pointer to the hardware structure
3751  * @vsi_handle: VSI handle to remove filters from
3752  * @sw: pointer to switch info struct
3753  */
3754 static void
3755 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
3756 			 struct ice_switch_info *sw)
3757 {
3758 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3759 
3760 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3761 				 sw->recp_list, ICE_SW_LKUP_MAC);
3762 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3763 				 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
3764 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3765 				 sw->recp_list, ICE_SW_LKUP_PROMISC);
3766 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3767 				 sw->recp_list, ICE_SW_LKUP_VLAN);
3768 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3769 				 sw->recp_list, ICE_SW_LKUP_DFLT);
3770 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3771 				 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
3772 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3773 				 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
3774 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
3775 				 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
3776 }
3777 
3778 /**
3779  * ice_remove_vsi_fltr - Remove all filters for a VSI
3780  * @hw: pointer to the hardware structure
3781  * @vsi_handle: VSI handle to remove filters from
3782  */
3783 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3784 {
3785 	ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
3786 }
3787 
3788 /**
3789  * ice_alloc_res_cntr - allocating resource counter
3790  * @hw: pointer to the hardware structure
3791  * @type: type of resource
3792  * @alloc_shared: if set it is shared else dedicated
3793  * @num_items: number of entries requested for FD resource type
3794  * @counter_id: counter index returned by AQ call
3795  */
3796 static enum ice_status
3797 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3798 		   u16 *counter_id)
3799 {
3800 	struct ice_aqc_alloc_free_res_elem *buf;
3801 	enum ice_status status;
3802 	u16 buf_len;
3803 
3804 	/* Allocate resource */
3805 	buf_len = ice_struct_size(buf, elem, 1);
3806 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3807 	if (!buf)
3808 		return ICE_ERR_NO_MEMORY;
3809 
3810 	buf->num_elems = CPU_TO_LE16(num_items);
3811 	buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3812 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
3813 
3814 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3815 				       ice_aqc_opc_alloc_res, NULL);
3816 	if (status)
3817 		goto exit;
3818 
3819 	*counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
3820 
3821 exit:
3822 	ice_free(hw, buf);
3823 	return status;
3824 }
3825 
3826 /**
3827  * ice_free_res_cntr - free resource counter
3828  * @hw: pointer to the hardware structure
3829  * @type: type of resource
3830  * @alloc_shared: if set it is shared else dedicated
3831  * @num_items: number of entries to be freed for FD resource type
3832  * @counter_id: counter ID resource which needs to be freed
3833  */
3834 static enum ice_status
3835 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3836 		  u16 counter_id)
3837 {
3838 	struct ice_aqc_alloc_free_res_elem *buf;
3839 	enum ice_status status;
3840 	u16 buf_len;
3841 
3842 	/* Free resource */
3843 	buf_len = ice_struct_size(buf, elem, 1);
3844 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3845 	if (!buf)
3846 		return ICE_ERR_NO_MEMORY;
3847 
3848 	buf->num_elems = CPU_TO_LE16(num_items);
3849 	buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3850 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
3851 	buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
3852 
3853 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3854 				       ice_aqc_opc_free_res, NULL);
3855 	if (status)
3856 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
3857 
3858 	ice_free(hw, buf);
3859 	return status;
3860 }
3861 
3862 /**
3863  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
3864  * @hw: pointer to the hardware structure
3865  * @counter_id: returns counter index
3866  */
3867 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
3868 {
3869 	return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3870 				  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3871 				  counter_id);
3872 }
3873 
3874 /**
3875  * ice_free_vlan_res_counter - Free counter resource for VLAN type
3876  * @hw: pointer to the hardware structure
3877  * @counter_id: counter index to be freed
3878  */
3879 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
3880 {
3881 	return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3882 				 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3883 				 counter_id);
3884 }
3885 
3886 /**
3887  * ice_add_mac_with_sw_marker - add filter with sw marker
3888  * @hw: pointer to the hardware structure
3889  * @f_info: filter info structure containing the MAC filter information
3890  * @sw_marker: sw marker to tag the Rx descriptor with
3891  */
3892 enum ice_status
3893 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
3894 			   u16 sw_marker)
3895 {
3896 	struct ice_fltr_mgmt_list_entry *m_entry;
3897 	struct ice_fltr_list_entry fl_info;
3898 	struct ice_sw_recipe *recp_list;
3899 	struct LIST_HEAD_TYPE l_head;
3900 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
3901 	enum ice_status ret;
3902 	bool entry_exists;
3903 	u16 lg_act_id;
3904 
3905 	if (f_info->fltr_act != ICE_FWD_TO_VSI)
3906 		return ICE_ERR_PARAM;
3907 
3908 	if (f_info->lkup_type != ICE_SW_LKUP_MAC)
3909 		return ICE_ERR_PARAM;
3910 
3911 	if (sw_marker == ICE_INVAL_SW_MARKER_ID)
3912 		return ICE_ERR_PARAM;
3913 
3914 	if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
3915 		return ICE_ERR_PARAM;
3916 	f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
3917 
3918 	/* Add filter if it doesn't exist so then the adding of large
3919 	 * action always results in update
3920 	 */
3921 
3922 	INIT_LIST_HEAD(&l_head);
3923 	fl_info.fltr_info = *f_info;
3924 	LIST_ADD(&fl_info.list_entry, &l_head);
3925 
3926 	entry_exists = false;
3927 	ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
3928 			       hw->port_info->lport);
3929 	if (ret == ICE_ERR_ALREADY_EXISTS)
3930 		entry_exists = true;
3931 	else if (ret)
3932 		return ret;
3933 
3934 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3935 	rule_lock = &recp_list->filt_rule_lock;
3936 	ice_acquire_lock(rule_lock);
3937 	/* Get the book keeping entry for the filter */
3938 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
3939 	if (!m_entry)
3940 		goto exit_error;
3941 
3942 	/* If counter action was enabled for this rule then don't enable
3943 	 * sw marker large action
3944 	 */
3945 	if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
3946 		ret = ICE_ERR_PARAM;
3947 		goto exit_error;
3948 	}
3949 
3950 	/* if same marker was added before */
3951 	if (m_entry->sw_marker_id == sw_marker) {
3952 		ret = ICE_ERR_ALREADY_EXISTS;
3953 		goto exit_error;
3954 	}
3955 
3956 	/* Allocate a hardware table entry to hold large act. Three actions
3957 	 * for marker based large action
3958 	 */
3959 	ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
3960 	if (ret)
3961 		goto exit_error;
3962 
3963 	if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
3964 		goto exit_error;
3965 
3966 	/* Update the switch rule to add the marker action */
3967 	ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
3968 	if (!ret) {
3969 		ice_release_lock(rule_lock);
3970 		return ret;
3971 	}
3972 
3973 exit_error:
3974 	ice_release_lock(rule_lock);
3975 	/* only remove entry if it did not exist previously */
3976 	if (!entry_exists)
3977 		ret = ice_remove_mac(hw, &l_head);
3978 
3979 	return ret;
3980 }
3981 
3982 /**
3983  * ice_add_mac_with_counter - add filter with counter enabled
3984  * @hw: pointer to the hardware structure
3985  * @f_info: pointer to filter info structure containing the MAC filter
3986  *          information
3987  */
3988 enum ice_status
3989 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
3990 {
3991 	struct ice_fltr_mgmt_list_entry *m_entry;
3992 	struct ice_fltr_list_entry fl_info;
3993 	struct ice_sw_recipe *recp_list;
3994 	struct LIST_HEAD_TYPE l_head;
3995 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
3996 	enum ice_status ret;
3997 	bool entry_exist;
3998 	u16 counter_id;
3999 	u16 lg_act_id;
4000 
4001 	if (f_info->fltr_act != ICE_FWD_TO_VSI)
4002 		return ICE_ERR_PARAM;
4003 
4004 	if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4005 		return ICE_ERR_PARAM;
4006 
4007 	if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4008 		return ICE_ERR_PARAM;
4009 	f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4010 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4011 
4012 	entry_exist = false;
4013 
4014 	rule_lock = &recp_list->filt_rule_lock;
4015 
4016 	/* Add filter if it doesn't exist so then the adding of large
4017 	 * action always results in update
4018 	 */
4019 	INIT_LIST_HEAD(&l_head);
4020 
4021 	fl_info.fltr_info = *f_info;
4022 	LIST_ADD(&fl_info.list_entry, &l_head);
4023 
4024 	ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4025 			       hw->port_info->lport);
4026 	if (ret == ICE_ERR_ALREADY_EXISTS)
4027 		entry_exist = true;
4028 	else if (ret)
4029 		return ret;
4030 
4031 	ice_acquire_lock(rule_lock);
4032 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4033 	if (!m_entry) {
4034 		ret = ICE_ERR_BAD_PTR;
4035 		goto exit_error;
4036 	}
4037 
4038 	/* Don't enable counter for a filter for which sw marker was enabled */
4039 	if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4040 		ret = ICE_ERR_PARAM;
4041 		goto exit_error;
4042 	}
4043 
4044 	/* If a counter was already enabled then don't need to add again */
4045 	if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4046 		ret = ICE_ERR_ALREADY_EXISTS;
4047 		goto exit_error;
4048 	}
4049 
4050 	/* Allocate a hardware table entry to VLAN counter */
4051 	ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4052 	if (ret)
4053 		goto exit_error;
4054 
4055 	/* Allocate a hardware table entry to hold large act. Two actions for
4056 	 * counter based large action
4057 	 */
4058 	ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4059 	if (ret)
4060 		goto exit_error;
4061 
4062 	if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4063 		goto exit_error;
4064 
4065 	/* Update the switch rule to add the counter action */
4066 	ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4067 	if (!ret) {
4068 		ice_release_lock(rule_lock);
4069 		return ret;
4070 	}
4071 
4072 exit_error:
4073 	ice_release_lock(rule_lock);
4074 	/* only remove entry if it did not exist previously */
4075 	if (!entry_exist)
4076 		ret = ice_remove_mac(hw, &l_head);
4077 
4078 	return ret;
4079 }
4080 
4081 /**
4082  * ice_replay_fltr - Replay all the filters stored by a specific list head
4083  * @hw: pointer to the hardware structure
4084  * @list_head: list for which filters needs to be replayed
4085  * @recp_id: Recipe ID for which rules need to be replayed
4086  */
4087 static enum ice_status
4088 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
4089 {
4090 	struct ice_fltr_mgmt_list_entry *itr;
4091 	enum ice_status status = ICE_SUCCESS;
4092 	struct ice_sw_recipe *recp_list;
4093 	u8 lport = hw->port_info->lport;
4094 	struct LIST_HEAD_TYPE l_head;
4095 
4096 	if (LIST_EMPTY(list_head))
4097 		return status;
4098 
4099 	recp_list = &hw->switch_info->recp_list[recp_id];
4100 	/* Move entries from the given list_head to a temporary l_head so that
4101 	 * they can be replayed. Otherwise when trying to re-add the same
4102 	 * filter, the function will return already exists
4103 	 */
4104 	LIST_REPLACE_INIT(list_head, &l_head);
4105 
4106 	/* Mark the given list_head empty by reinitializing it so filters
4107 	 * could be added again by *handler
4108 	 */
4109 	LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
4110 			    list_entry) {
4111 		struct ice_fltr_list_entry f_entry;
4112 		u16 vsi_handle;
4113 
4114 		f_entry.fltr_info = itr->fltr_info;
4115 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
4116 			status = ice_add_rule_internal(hw, recp_list, lport,
4117 						       &f_entry);
4118 			if (status != ICE_SUCCESS)
4119 				goto end;
4120 			continue;
4121 		}
4122 
4123 		/* Add a filter per VSI separately */
4124 		ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
4125 				     ICE_MAX_VSI) {
4126 			if (!ice_is_vsi_valid(hw, vsi_handle))
4127 				break;
4128 
4129 			ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
4130 			f_entry.fltr_info.vsi_handle = vsi_handle;
4131 			f_entry.fltr_info.fwd_id.hw_vsi_id =
4132 				ice_get_hw_vsi_num(hw, vsi_handle);
4133 			f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
4134 			if (recp_id == ICE_SW_LKUP_VLAN)
4135 				status = ice_add_vlan_internal(hw, recp_list,
4136 							       &f_entry);
4137 			else
4138 				status = ice_add_rule_internal(hw, recp_list,
4139 							       lport,
4140 							       &f_entry);
4141 			if (status != ICE_SUCCESS)
4142 				goto end;
4143 		}
4144 	}
4145 end:
4146 	/* Clear the filter management list */
4147 	ice_rem_sw_rule_info(hw, &l_head);
4148 	return status;
4149 }
4150 
4151 /**
4152  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
4153  * @hw: pointer to the hardware structure
4154  *
4155  * NOTE: This function does not clean up partially added filters on error.
4156  * It is up to caller of the function to issue a reset or fail early.
4157  */
4158 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
4159 {
4160 	struct ice_switch_info *sw = hw->switch_info;
4161 	enum ice_status status = ICE_SUCCESS;
4162 	u8 i;
4163 
4164 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4165 		struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
4166 
4167 		status = ice_replay_fltr(hw, i, head);
4168 		if (status != ICE_SUCCESS)
4169 			return status;
4170 	}
4171 	return status;
4172 }
4173 
4174 /**
4175  * ice_replay_vsi_fltr - Replay filters for requested VSI
4176  * @hw: pointer to the hardware structure
4177  * @pi: pointer to port information structure
4178  * @sw: pointer to switch info struct for which function replays filters
4179  * @vsi_handle: driver VSI handle
4180  * @recp_id: Recipe ID for which rules need to be replayed
4181  * @list_head: list for which filters need to be replayed
4182  *
4183  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
4184  * It is required to pass valid VSI handle.
4185  */
4186 static enum ice_status
4187 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
4188 		    struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
4189 		    struct LIST_HEAD_TYPE *list_head)
4190 {
4191 	struct ice_fltr_mgmt_list_entry *itr;
4192 	enum ice_status status = ICE_SUCCESS;
4193 	struct ice_sw_recipe *recp_list;
4194 	u16 hw_vsi_id;
4195 
4196 	if (LIST_EMPTY(list_head))
4197 		return status;
4198 	recp_list = &sw->recp_list[recp_id];
4199 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4200 
4201 	LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
4202 			    list_entry) {
4203 		struct ice_fltr_list_entry f_entry;
4204 
4205 		f_entry.fltr_info = itr->fltr_info;
4206 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
4207 		    itr->fltr_info.vsi_handle == vsi_handle) {
4208 			/* update the src in case it is VSI num */
4209 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
4210 				f_entry.fltr_info.src = hw_vsi_id;
4211 			status = ice_add_rule_internal(hw, recp_list,
4212 						       pi->lport,
4213 						       &f_entry);
4214 			if (status != ICE_SUCCESS)
4215 				goto end;
4216 			continue;
4217 		}
4218 		if (!itr->vsi_list_info ||
4219 		    !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
4220 			continue;
4221 		/* Clearing it so that the logic can add it back */
4222 		ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
4223 		f_entry.fltr_info.vsi_handle = vsi_handle;
4224 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
4225 		/* update the src in case it is VSI num */
4226 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
4227 			f_entry.fltr_info.src = hw_vsi_id;
4228 		if (recp_id == ICE_SW_LKUP_VLAN)
4229 			status = ice_add_vlan_internal(hw, recp_list, &f_entry);
4230 		else
4231 			status = ice_add_rule_internal(hw, recp_list,
4232 						       pi->lport,
4233 						       &f_entry);
4234 		if (status != ICE_SUCCESS)
4235 			goto end;
4236 	}
4237 end:
4238 	return status;
4239 }
4240 
4241 /**
4242  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
4243  * @hw: pointer to the hardware structure
4244  * @pi: pointer to port information structure
4245  * @vsi_handle: driver VSI handle
4246  *
4247  * Replays filters for requested VSI via vsi_handle.
4248  */
4249 enum ice_status
4250 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
4251 			u16 vsi_handle)
4252 {
4253 struct ice_switch_info *sw;
4254 	enum ice_status status = ICE_SUCCESS;
4255 	u8 i;
4256 
4257 	sw = hw->switch_info;
4258 
4259 	/* Update the recipes that were created */
4260 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4261 		struct LIST_HEAD_TYPE *head;
4262 
4263 		head = &sw->recp_list[i].filt_replay_rules;
4264 		if (!sw->recp_list[i].adv_rule)
4265 			status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
4266 						     head);
4267 		if (status != ICE_SUCCESS)
4268 			return status;
4269 	}
4270 
4271 	return ICE_SUCCESS;
4272 }
4273 
4274 /**
4275  * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
4276  * @hw: pointer to the HW struct
4277  * @sw: pointer to switch info struct for which function removes filters
4278  *
4279  * Deletes the filter replay rules for given switch
4280  */
4281 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
4282 {
4283 	u8 i;
4284 
4285 	if (!sw)
4286 		return;
4287 
4288 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4289 		if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
4290 			struct LIST_HEAD_TYPE *l_head;
4291 
4292 			l_head = &sw->recp_list[i].filt_replay_rules;
4293 			if (!sw->recp_list[i].adv_rule)
4294 				ice_rem_sw_rule_info(hw, l_head);
4295 		}
4296 	}
4297 }
4298 
4299 /**
4300  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
4301  * @hw: pointer to the HW struct
4302  *
4303  * Deletes the filter replay rules.
4304  */
4305 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
4306 {
4307 	ice_rm_sw_replay_rule_info(hw, hw->switch_info);
4308 }
4309 
4310