xref: /freebsd/sys/dev/ice/ice_flex_pipe.c (revision 5e3190f700637fcfc1a52daeaa4a031fdd2557c7)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "ice_common.h"
33 #include "ice_ddp_common.h"
34 #include "ice_flex_pipe.h"
35 #include "ice_protocol_type.h"
36 #include "ice_flow.h"
37 
38 static const struct ice_tunnel_type_scan tnls[] = {
39 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
40 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
41 	{ TNL_LAST,		"" }
42 };
43 
44 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
45 	/* SWITCH */
46 	{
47 		ICE_SID_XLT0_SW,
48 		ICE_SID_XLT_KEY_BUILDER_SW,
49 		ICE_SID_XLT1_SW,
50 		ICE_SID_XLT2_SW,
51 		ICE_SID_PROFID_TCAM_SW,
52 		ICE_SID_PROFID_REDIR_SW,
53 		ICE_SID_FLD_VEC_SW,
54 		ICE_SID_CDID_KEY_BUILDER_SW,
55 		ICE_SID_CDID_REDIR_SW
56 	},
57 
58 	/* ACL */
59 	{
60 		ICE_SID_XLT0_ACL,
61 		ICE_SID_XLT_KEY_BUILDER_ACL,
62 		ICE_SID_XLT1_ACL,
63 		ICE_SID_XLT2_ACL,
64 		ICE_SID_PROFID_TCAM_ACL,
65 		ICE_SID_PROFID_REDIR_ACL,
66 		ICE_SID_FLD_VEC_ACL,
67 		ICE_SID_CDID_KEY_BUILDER_ACL,
68 		ICE_SID_CDID_REDIR_ACL
69 	},
70 
71 	/* FD */
72 	{
73 		ICE_SID_XLT0_FD,
74 		ICE_SID_XLT_KEY_BUILDER_FD,
75 		ICE_SID_XLT1_FD,
76 		ICE_SID_XLT2_FD,
77 		ICE_SID_PROFID_TCAM_FD,
78 		ICE_SID_PROFID_REDIR_FD,
79 		ICE_SID_FLD_VEC_FD,
80 		ICE_SID_CDID_KEY_BUILDER_FD,
81 		ICE_SID_CDID_REDIR_FD
82 	},
83 
84 	/* RSS */
85 	{
86 		ICE_SID_XLT0_RSS,
87 		ICE_SID_XLT_KEY_BUILDER_RSS,
88 		ICE_SID_XLT1_RSS,
89 		ICE_SID_XLT2_RSS,
90 		ICE_SID_PROFID_TCAM_RSS,
91 		ICE_SID_PROFID_REDIR_RSS,
92 		ICE_SID_FLD_VEC_RSS,
93 		ICE_SID_CDID_KEY_BUILDER_RSS,
94 		ICE_SID_CDID_REDIR_RSS
95 	},
96 
97 	/* PE */
98 	{
99 		ICE_SID_XLT0_PE,
100 		ICE_SID_XLT_KEY_BUILDER_PE,
101 		ICE_SID_XLT1_PE,
102 		ICE_SID_XLT2_PE,
103 		ICE_SID_PROFID_TCAM_PE,
104 		ICE_SID_PROFID_REDIR_PE,
105 		ICE_SID_FLD_VEC_PE,
106 		ICE_SID_CDID_KEY_BUILDER_PE,
107 		ICE_SID_CDID_REDIR_PE
108 	}
109 };
110 
111 /**
112  * ice_sect_id - returns section ID
113  * @blk: block type
114  * @sect: section type
115  *
116  * This helper function returns the proper section ID given a block type and a
117  * section type.
118  */
119 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
120 {
121 	return ice_sect_lkup[blk][sect];
122 }
123 
124 /**
125  * ice_add_tunnel_hint
126  * @hw: pointer to the HW structure
127  * @label_name: label text
128  * @val: value of the tunnel port boost entry
129  */
130 void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
131 {
132 	if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
133 		u16 i;
134 
135 		for (i = 0; tnls[i].type != TNL_LAST; i++) {
136 			size_t len = strlen(tnls[i].label_prefix);
137 
138 			/* Look for matching label start, before continuing */
139 			if (strncmp(label_name, tnls[i].label_prefix, len))
140 				continue;
141 
142 			/* Make sure this label matches our PF. Note that the PF
143 			 * character ('0' - '7') will be located where our
144 			 * prefix string's null terminator is located.
145 			 */
146 			if ((label_name[len] - '0') == hw->pf_id) {
147 				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
148 				hw->tnl.tbl[hw->tnl.count].valid = false;
149 				hw->tnl.tbl[hw->tnl.count].in_use = false;
150 				hw->tnl.tbl[hw->tnl.count].marked = false;
151 				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
152 				hw->tnl.tbl[hw->tnl.count].port = 0;
153 				hw->tnl.count++;
154 				break;
155 			}
156 		}
157 	}
158 }
159 
160 /* Key creation */
161 
162 #define ICE_DC_KEY	0x1	/* don't care */
163 #define ICE_DC_KEYINV	0x1
164 #define ICE_NM_KEY	0x0	/* never match */
165 #define ICE_NM_KEYINV	0x0
166 #define ICE_0_KEY	0x1	/* match 0 */
167 #define ICE_0_KEYINV	0x0
168 #define ICE_1_KEY	0x0	/* match 1 */
169 #define ICE_1_KEYINV	0x1
170 
171 /**
172  * ice_gen_key_word - generate 16-bits of a key/mask word
173  * @val: the value
174  * @valid: valid bits mask (change only the valid bits)
175  * @dont_care: don't care mask
176  * @nvr_mtch: never match mask
177  * @key: pointer to an array of where the resulting key portion
178  * @key_inv: pointer to an array of where the resulting key invert portion
179  *
180  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
181  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
182  * of key and 8 bits of key invert.
183  *
184  *     '0' =    b01, always match a 0 bit
185  *     '1' =    b10, always match a 1 bit
186  *     '?' =    b11, don't care bit (always matches)
187  *     '~' =    b00, never match bit
188  *
189  * Input:
190  *          val:         b0  1  0  1  0  1
191  *          dont_care:   b0  0  1  1  0  0
192  *          never_mtch:  b0  0  0  0  1  1
193  *          ------------------------------
194  * Result:  key:        b01 10 11 11 00 00
195  */
196 static enum ice_status
197 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
198 		 u8 *key_inv)
199 {
200 	u8 in_key = *key, in_key_inv = *key_inv;
201 	u8 i;
202 
203 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
204 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
205 		return ICE_ERR_CFG;
206 
207 	*key = 0;
208 	*key_inv = 0;
209 
210 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
211 	for (i = 0; i < 8; i++) {
212 		*key >>= 1;
213 		*key_inv >>= 1;
214 
215 		if (!(valid & 0x1)) { /* change only valid bits */
216 			*key |= (in_key & 0x1) << 7;
217 			*key_inv |= (in_key_inv & 0x1) << 7;
218 		} else if (dont_care & 0x1) { /* don't care bit */
219 			*key |= ICE_DC_KEY << 7;
220 			*key_inv |= ICE_DC_KEYINV << 7;
221 		} else if (nvr_mtch & 0x1) { /* never match bit */
222 			*key |= ICE_NM_KEY << 7;
223 			*key_inv |= ICE_NM_KEYINV << 7;
224 		} else if (val & 0x01) { /* exact 1 match */
225 			*key |= ICE_1_KEY << 7;
226 			*key_inv |= ICE_1_KEYINV << 7;
227 		} else { /* exact 0 match */
228 			*key |= ICE_0_KEY << 7;
229 			*key_inv |= ICE_0_KEYINV << 7;
230 		}
231 
232 		dont_care >>= 1;
233 		nvr_mtch >>= 1;
234 		valid >>= 1;
235 		val >>= 1;
236 		in_key >>= 1;
237 		in_key_inv >>= 1;
238 	}
239 
240 	return ICE_SUCCESS;
241 }
242 
243 /**
244  * ice_bits_max_set - determine if the number of bits set is within a maximum
245  * @mask: pointer to the byte array which is the mask
246  * @size: the number of bytes in the mask
247  * @max: the max number of set bits
248  *
249  * This function determines if there are at most 'max' number of bits set in an
250  * array. Returns true if the number for bits set is <= max or will return false
251  * otherwise.
252  */
253 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
254 {
255 	u16 count = 0;
256 	u16 i;
257 
258 	/* check each byte */
259 	for (i = 0; i < size; i++) {
260 		/* if 0, go to next byte */
261 		if (!mask[i])
262 			continue;
263 
264 		/* We know there is at least one set bit in this byte because of
265 		 * the above check; if we already have found 'max' number of
266 		 * bits set, then we can return failure now.
267 		 */
268 		if (count == max)
269 			return false;
270 
271 		/* count the bits in this byte, checking threshold */
272 		count += ice_hweight8(mask[i]);
273 		if (count > max)
274 			return false;
275 	}
276 
277 	return true;
278 }
279 
280 /**
281  * ice_set_key - generate a variable sized key with multiples of 16-bits
282  * @key: pointer to where the key will be stored
283  * @size: the size of the complete key in bytes (must be even)
284  * @val: array of 8-bit values that makes up the value portion of the key
285  * @upd: array of 8-bit masks that determine what key portion to update
286  * @dc: array of 8-bit masks that make up the don't care mask
287  * @nm: array of 8-bit masks that make up the never match mask
288  * @off: the offset of the first byte in the key to update
289  * @len: the number of bytes in the key update
290  *
291  * This function generates a key from a value, a don't care mask and a never
292  * match mask.
293  * upd, dc, and nm are optional parameters, and can be NULL:
294  *	upd == NULL --> upd mask is all 1's (update all bits)
295  *	dc == NULL --> dc mask is all 0's (no don't care bits)
296  *	nm == NULL --> nm mask is all 0's (no never match bits)
297  */
298 static enum ice_status
299 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
300 	    u16 len)
301 {
302 	u16 half_size;
303 	u16 i;
304 
305 	/* size must be a multiple of 2 bytes. */
306 	if (size % 2)
307 		return ICE_ERR_CFG;
308 	half_size = size / 2;
309 
310 	if (off + len > half_size)
311 		return ICE_ERR_CFG;
312 
313 	/* Make sure at most one bit is set in the never match mask. Having more
314 	 * than one never match mask bit set will cause HW to consume excessive
315 	 * power otherwise; this is a power management efficiency check.
316 	 */
317 #define ICE_NVR_MTCH_BITS_MAX	1
318 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
319 		return ICE_ERR_CFG;
320 
321 	for (i = 0; i < len; i++)
322 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
323 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
324 				     key + off + i, key + half_size + off + i))
325 			return ICE_ERR_CFG;
326 
327 	return ICE_SUCCESS;
328 }
329 
330 /**
331  * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
332  * @hw: pointer to the HW structure
333  * @port: port to search for
334  * @index: optionally returns index
335  *
336  * Returns whether a port is already in use as a tunnel, and optionally its
337  * index
338  */
339 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
340 {
341 	u16 i;
342 
343 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
344 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
345 			if (index)
346 				*index = i;
347 			return true;
348 		}
349 
350 	return false;
351 }
352 
353 /**
354  * ice_tunnel_port_in_use
355  * @hw: pointer to the HW structure
356  * @port: port to search for
357  * @index: optionally returns index
358  *
359  * Returns whether a port is already in use as a tunnel, and optionally its
360  * index
361  */
362 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
363 {
364 	bool res;
365 
366 	ice_acquire_lock(&hw->tnl_lock);
367 	res = ice_tunnel_port_in_use_hlpr(hw, port, index);
368 	ice_release_lock(&hw->tnl_lock);
369 
370 	return res;
371 }
372 
373 /**
374  * ice_tunnel_get_type
375  * @hw: pointer to the HW structure
376  * @port: port to search for
377  * @type: returns tunnel index
378  *
379  * For a given port number, will return the type of tunnel.
380  */
381 bool
382 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
383 {
384 	bool res = false;
385 	u16 i;
386 
387 	ice_acquire_lock(&hw->tnl_lock);
388 
389 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
390 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
391 			*type = hw->tnl.tbl[i].type;
392 			res = true;
393 			break;
394 		}
395 
396 	ice_release_lock(&hw->tnl_lock);
397 
398 	return res;
399 }
400 
401 /**
402  * ice_find_free_tunnel_entry
403  * @hw: pointer to the HW structure
404  * @type: tunnel type
405  * @index: optionally returns index
406  *
407  * Returns whether there is a free tunnel entry, and optionally its index
408  */
409 static bool
410 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
411 			   u16 *index)
412 {
413 	u16 i;
414 
415 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
416 		if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
417 		    hw->tnl.tbl[i].type == type) {
418 			if (index)
419 				*index = i;
420 			return true;
421 		}
422 
423 	return false;
424 }
425 
426 /**
427  * ice_get_open_tunnel_port - retrieve an open tunnel port
428  * @hw: pointer to the HW structure
429  * @type: tunnel type (TNL_ALL will return any open port)
430  * @port: returns open port
431  */
432 bool
433 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
434 			 u16 *port)
435 {
436 	bool res = false;
437 	u16 i;
438 
439 	ice_acquire_lock(&hw->tnl_lock);
440 
441 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
442 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
443 		    (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
444 			*port = hw->tnl.tbl[i].port;
445 			res = true;
446 			break;
447 		}
448 
449 	ice_release_lock(&hw->tnl_lock);
450 
451 	return res;
452 }
453 
454 /**
455  * ice_create_tunnel
456  * @hw: pointer to the HW structure
457  * @type: type of tunnel
458  * @port: port of tunnel to create
459  *
460  * Create a tunnel by updating the parse graph in the parser. We do that by
461  * creating a package buffer with the tunnel info and issuing an update package
462  * command.
463  */
464 enum ice_status
465 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
466 {
467 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
468 	enum ice_status status = ICE_ERR_MAX_LIMIT;
469 	struct ice_buf_build *bld;
470 	u16 index;
471 
472 	ice_acquire_lock(&hw->tnl_lock);
473 
474 	if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
475 		hw->tnl.tbl[index].ref++;
476 		status = ICE_SUCCESS;
477 		goto ice_create_tunnel_end;
478 	}
479 
480 	if (!ice_find_free_tunnel_entry(hw, type, &index)) {
481 		status = ICE_ERR_OUT_OF_RANGE;
482 		goto ice_create_tunnel_end;
483 	}
484 
485 	bld = ice_pkg_buf_alloc(hw);
486 	if (!bld) {
487 		status = ICE_ERR_NO_MEMORY;
488 		goto ice_create_tunnel_end;
489 	}
490 
491 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
492 	if (ice_pkg_buf_reserve_section(bld, 2))
493 		goto ice_create_tunnel_err;
494 
495 	sect_rx = (struct ice_boost_tcam_section *)
496 		ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
497 					  ice_struct_size(sect_rx, tcam, 1));
498 	if (!sect_rx)
499 		goto ice_create_tunnel_err;
500 	sect_rx->count = CPU_TO_LE16(1);
501 
502 	sect_tx = (struct ice_boost_tcam_section *)
503 		ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
504 					  ice_struct_size(sect_tx, tcam, 1));
505 	if (!sect_tx)
506 		goto ice_create_tunnel_err;
507 	sect_tx->count = CPU_TO_LE16(1);
508 
509 	/* copy original boost entry to update package buffer */
510 	ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
511 		   sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
512 
513 	/* over-write the never-match dest port key bits with the encoded port
514 	 * bits
515 	 */
516 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
517 		    (u8 *)&port, NULL, NULL, NULL,
518 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
519 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
520 
521 	/* exact copy of entry to Tx section entry */
522 	ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
523 		   ICE_NONDMA_TO_NONDMA);
524 
525 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
526 	if (!status) {
527 		hw->tnl.tbl[index].port = port;
528 		hw->tnl.tbl[index].in_use = true;
529 		hw->tnl.tbl[index].ref = 1;
530 	}
531 
532 ice_create_tunnel_err:
533 	ice_pkg_buf_free(hw, bld);
534 
535 ice_create_tunnel_end:
536 	ice_release_lock(&hw->tnl_lock);
537 
538 	return status;
539 }
540 
541 /**
542  * ice_destroy_tunnel
543  * @hw: pointer to the HW structure
544  * @port: port of tunnel to destroy (ignored if the all parameter is true)
545  * @all: flag that states to destroy all tunnels
546  *
547  * Destroys a tunnel or all tunnels by creating an update package buffer
548  * targeting the specific updates requested and then performing an update
549  * package.
550  */
551 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
552 {
553 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
554 	enum ice_status status = ICE_ERR_MAX_LIMIT;
555 	struct ice_buf_build *bld;
556 	u16 count = 0;
557 	u16 index;
558 	u16 size;
559 	u16 i, j;
560 
561 	ice_acquire_lock(&hw->tnl_lock);
562 
563 	if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
564 		if (hw->tnl.tbl[index].ref > 1) {
565 			hw->tnl.tbl[index].ref--;
566 			status = ICE_SUCCESS;
567 			goto ice_destroy_tunnel_end;
568 		}
569 
570 	/* determine count */
571 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
572 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
573 		    (all || hw->tnl.tbl[i].port == port))
574 			count++;
575 
576 	if (!count) {
577 		status = ICE_ERR_PARAM;
578 		goto ice_destroy_tunnel_end;
579 	}
580 
581 	/* size of section - there is at least one entry */
582 	size = ice_struct_size(sect_rx, tcam, count);
583 
584 	bld = ice_pkg_buf_alloc(hw);
585 	if (!bld) {
586 		status = ICE_ERR_NO_MEMORY;
587 		goto ice_destroy_tunnel_end;
588 	}
589 
590 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
591 	if (ice_pkg_buf_reserve_section(bld, 2))
592 		goto ice_destroy_tunnel_err;
593 
594 	sect_rx = (struct ice_boost_tcam_section *)
595 		ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
596 					  size);
597 	if (!sect_rx)
598 		goto ice_destroy_tunnel_err;
599 	sect_rx->count = CPU_TO_LE16(count);
600 
601 	sect_tx = (struct ice_boost_tcam_section *)
602 		ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
603 					  size);
604 	if (!sect_tx)
605 		goto ice_destroy_tunnel_err;
606 	sect_tx->count = CPU_TO_LE16(count);
607 
608 	/* copy original boost entry to update package buffer, one copy to Rx
609 	 * section, another copy to the Tx section
610 	 */
611 	for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
612 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
613 		    (all || hw->tnl.tbl[i].port == port)) {
614 			ice_memcpy(sect_rx->tcam + j,
615 				   hw->tnl.tbl[i].boost_entry,
616 				   sizeof(*sect_rx->tcam),
617 				   ICE_NONDMA_TO_NONDMA);
618 			ice_memcpy(sect_tx->tcam + j,
619 				   hw->tnl.tbl[i].boost_entry,
620 				   sizeof(*sect_tx->tcam),
621 				   ICE_NONDMA_TO_NONDMA);
622 			hw->tnl.tbl[i].marked = true;
623 			j++;
624 		}
625 
626 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
627 	if (!status)
628 		for (i = 0; i < hw->tnl.count &&
629 		     i < ICE_TUNNEL_MAX_ENTRIES; i++)
630 			if (hw->tnl.tbl[i].marked) {
631 				hw->tnl.tbl[i].ref = 0;
632 				hw->tnl.tbl[i].port = 0;
633 				hw->tnl.tbl[i].in_use = false;
634 				hw->tnl.tbl[i].marked = false;
635 			}
636 
637 ice_destroy_tunnel_err:
638 	ice_pkg_buf_free(hw, bld);
639 
640 ice_destroy_tunnel_end:
641 	ice_release_lock(&hw->tnl_lock);
642 
643 	return status;
644 }
645 
646 /**
647  * ice_replay_tunnels
648  * @hw: pointer to the HW structure
649  *
650  * Replays all tunnels
651  */
652 enum ice_status ice_replay_tunnels(struct ice_hw *hw)
653 {
654 	enum ice_status status = ICE_SUCCESS;
655 	u16 i;
656 
657 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
658 
659 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) {
660 		enum ice_tunnel_type type = hw->tnl.tbl[i].type;
661 		u16 refs = hw->tnl.tbl[i].ref;
662 		u16 port = hw->tnl.tbl[i].port;
663 
664 		if (!hw->tnl.tbl[i].in_use)
665 			continue;
666 
667 		/* Replay tunnels one at a time by destroying them, then
668 		 * recreating them
669 		 */
670 		hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */
671 		status = ice_destroy_tunnel(hw, port, false);
672 		if (status) {
673 			ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n",
674 				  status, port);
675 			break;
676 		}
677 
678 		status = ice_create_tunnel(hw, type, port);
679 		if (status) {
680 			ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n",
681 				  status, port);
682 			break;
683 		}
684 
685 		/* reset to original ref count */
686 		hw->tnl.tbl[i].ref = refs;
687 	}
688 
689 	return status;
690 }
691 
692 /**
693  * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
694  * @hw: pointer to the hardware structure
695  * @blk: hardware block
696  * @prof: profile ID
697  * @fv_idx: field vector word index
698  * @prot: variable to receive the protocol ID
699  * @off: variable to receive the protocol offset
700  */
701 enum ice_status
702 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
703 		  u8 *prot, u16 *off)
704 {
705 	struct ice_fv_word *fv_ext;
706 
707 	if (prof >= hw->blk[blk].es.count)
708 		return ICE_ERR_PARAM;
709 
710 	if (fv_idx >= hw->blk[blk].es.fvw)
711 		return ICE_ERR_PARAM;
712 
713 	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
714 
715 	*prot = fv_ext[fv_idx].prot_id;
716 	*off = fv_ext[fv_idx].off;
717 
718 	return ICE_SUCCESS;
719 }
720 
721 /* PTG Management */
722 
723 /**
724  * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
725  * @hw: pointer to the hardware structure
726  * @blk: HW block
727  *
728  * This function will update the XLT1 hardware table to reflect the new
729  * packet type group configuration.
730  */
731 enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
732 {
733 	struct ice_xlt1_section *sect;
734 	struct ice_buf_build *bld;
735 	enum ice_status status;
736 	u16 index;
737 
738 	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
739 					       ice_struct_size(sect, value,
740 							       ICE_XLT1_CNT),
741 					       (void **)&sect);
742 	if (!bld)
743 		return ICE_ERR_NO_MEMORY;
744 
745 	sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
746 	sect->offset = CPU_TO_LE16(0);
747 	for (index = 0; index < ICE_XLT1_CNT; index++)
748 		sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
749 
750 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
751 
752 	ice_pkg_buf_free(hw, bld);
753 
754 	return status;
755 }
756 
757 /**
758  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
759  * @hw: pointer to the hardware structure
760  * @blk: HW block
761  * @ptype: the ptype to search for
762  * @ptg: pointer to variable that receives the PTG
763  *
764  * This function will search the PTGs for a particular ptype, returning the
765  * PTG ID that contains it through the PTG parameter, with the value of
766  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
767  */
768 static enum ice_status
769 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
770 {
771 	if (ptype >= ICE_XLT1_CNT || !ptg)
772 		return ICE_ERR_PARAM;
773 
774 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
775 	return ICE_SUCCESS;
776 }
777 
778 /**
779  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
780  * @hw: pointer to the hardware structure
781  * @blk: HW block
782  * @ptg: the PTG to allocate
783  *
784  * This function allocates a given packet type group ID specified by the PTG
785  * parameter.
786  */
787 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
788 {
789 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
790 }
791 
792 /**
793  * ice_ptg_free - Frees a packet type group
794  * @hw: pointer to the hardware structure
795  * @blk: HW block
796  * @ptg: the PTG ID to free
797  *
798  * This function frees a packet type group, and returns all the current ptypes
799  * within it to the default PTG.
800  */
801 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
802 {
803 	struct ice_ptg_ptype *p, *temp;
804 
805 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
806 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
807 	while (p) {
808 		p->ptg = ICE_DEFAULT_PTG;
809 		temp = p->next_ptype;
810 		p->next_ptype = NULL;
811 		p = temp;
812 	}
813 
814 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
815 }
816 
817 /**
818  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
819  * @hw: pointer to the hardware structure
820  * @blk: HW block
821  * @ptype: the ptype to remove
822  * @ptg: the PTG to remove the ptype from
823  *
824  * This function will remove the ptype from the specific PTG, and move it to
825  * the default PTG (ICE_DEFAULT_PTG).
826  */
827 static enum ice_status
828 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
829 {
830 	struct ice_ptg_ptype **ch;
831 	struct ice_ptg_ptype *p;
832 
833 	if (ptype > ICE_XLT1_CNT - 1)
834 		return ICE_ERR_PARAM;
835 
836 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
837 		return ICE_ERR_DOES_NOT_EXIST;
838 
839 	/* Should not happen if .in_use is set, bad config */
840 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
841 		return ICE_ERR_CFG;
842 
843 	/* find the ptype within this PTG, and bypass the link over it */
844 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
845 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
846 	while (p) {
847 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
848 			*ch = p->next_ptype;
849 			break;
850 		}
851 
852 		ch = &p->next_ptype;
853 		p = p->next_ptype;
854 	}
855 
856 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
857 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
858 
859 	return ICE_SUCCESS;
860 }
861 
862 /**
863  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
864  * @hw: pointer to the hardware structure
865  * @blk: HW block
866  * @ptype: the ptype to add or move
867  * @ptg: the PTG to add or move the ptype to
868  *
869  * This function will either add or move a ptype to a particular PTG depending
870  * on if the ptype is already part of another group. Note that using a
871  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
872  * default PTG.
873  */
874 static enum ice_status
875 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
876 {
877 	enum ice_status status;
878 	u8 original_ptg;
879 
880 	if (ptype > ICE_XLT1_CNT - 1)
881 		return ICE_ERR_PARAM;
882 
883 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
884 		return ICE_ERR_DOES_NOT_EXIST;
885 
886 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
887 	if (status)
888 		return status;
889 
890 	/* Is ptype already in the correct PTG? */
891 	if (original_ptg == ptg)
892 		return ICE_SUCCESS;
893 
894 	/* Remove from original PTG and move back to the default PTG */
895 	if (original_ptg != ICE_DEFAULT_PTG)
896 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
897 
898 	/* Moving to default PTG? Then we're done with this request */
899 	if (ptg == ICE_DEFAULT_PTG)
900 		return ICE_SUCCESS;
901 
902 	/* Add ptype to PTG at beginning of list */
903 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
904 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
905 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
906 		&hw->blk[blk].xlt1.ptypes[ptype];
907 
908 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
909 	hw->blk[blk].xlt1.t[ptype] = ptg;
910 
911 	return ICE_SUCCESS;
912 }
913 
914 /* Block / table size info */
915 struct ice_blk_size_details {
916 	u16 xlt1;			/* # XLT1 entries */
917 	u16 xlt2;			/* # XLT2 entries */
918 	u16 prof_tcam;			/* # profile ID TCAM entries */
919 	u16 prof_id;			/* # profile IDs */
920 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
921 	u16 prof_redir;			/* # profile redirection entries */
922 	u16 es;				/* # extraction sequence entries */
923 	u16 fvw;			/* # field vector words */
924 	u8 overwrite;			/* overwrite existing entries allowed */
925 	u8 reverse;			/* reverse FV order */
926 };
927 
928 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
929 	/**
930 	 * Table Definitions
931 	 * XLT1 - Number of entries in XLT1 table
932 	 * XLT2 - Number of entries in XLT2 table
933 	 * TCAM - Number of entries Profile ID TCAM table
934 	 * CDID - Control Domain ID of the hardware block
935 	 * PRED - Number of entries in the Profile Redirection Table
936 	 * FV   - Number of entries in the Field Vector
937 	 * FVW  - Width (in WORDs) of the Field Vector
938 	 * OVR  - Overwrite existing table entries
939 	 * REV  - Reverse FV
940 	 */
941 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
942 	/*          Overwrite   , Reverse FV */
943 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
944 		    false, false },
945 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
946 		    false, false },
947 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
948 		    false, true  },
949 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
950 		    true,  true  },
951 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
952 		    false, false },
953 };
954 
955 enum ice_sid_all {
956 	ICE_SID_XLT1_OFF = 0,
957 	ICE_SID_XLT2_OFF,
958 	ICE_SID_PR_OFF,
959 	ICE_SID_PR_REDIR_OFF,
960 	ICE_SID_ES_OFF,
961 	ICE_SID_OFF_COUNT,
962 };
963 
964 /* Characteristic handling */
965 
966 /**
967  * ice_match_prop_lst - determine if properties of two lists match
968  * @list1: first properties list
969  * @list2: second properties list
970  *
971  * Count, cookies and the order must match in order to be considered equivalent.
972  */
973 static bool
974 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
975 {
976 	struct ice_vsig_prof *tmp1;
977 	struct ice_vsig_prof *tmp2;
978 	u16 chk_count = 0;
979 	u16 count = 0;
980 
981 	/* compare counts */
982 	LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
983 		count++;
984 	LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
985 		chk_count++;
986 	if (!count || count != chk_count)
987 		return false;
988 
989 	tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
990 	tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
991 
992 	/* profile cookies must compare, and in the exact same order to take
993 	 * into account priority
994 	 */
995 	while (count--) {
996 		if (tmp2->profile_cookie != tmp1->profile_cookie)
997 			return false;
998 
999 		tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
1000 		tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
1001 	}
1002 
1003 	return true;
1004 }
1005 
1006 /* VSIG Management */
1007 
1008 /**
1009  * ice_vsig_update_xlt2_sect - update one section of XLT2 table
1010  * @hw: pointer to the hardware structure
1011  * @blk: HW block
1012  * @vsi: HW VSI number to program
1013  * @vsig: VSIG for the VSI
1014  *
1015  * This function will update the XLT2 hardware table with the input VSI
1016  * group configuration.
1017  */
1018 static enum ice_status
1019 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
1020 			  u16 vsig)
1021 {
1022 	struct ice_xlt2_section *sect;
1023 	struct ice_buf_build *bld;
1024 	enum ice_status status;
1025 
1026 	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
1027 					       ice_struct_size(sect, value, 1),
1028 					       (void **)&sect);
1029 	if (!bld)
1030 		return ICE_ERR_NO_MEMORY;
1031 
1032 	sect->count = CPU_TO_LE16(1);
1033 	sect->offset = CPU_TO_LE16(vsi);
1034 	sect->value[0] = CPU_TO_LE16(vsig);
1035 
1036 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1037 
1038 	ice_pkg_buf_free(hw, bld);
1039 
1040 	return status;
1041 }
1042 
1043 /**
1044  * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
1045  * @hw: pointer to the hardware structure
1046  * @blk: HW block
1047  *
1048  * This function will update the XLT2 hardware table with the input VSI
1049  * group configuration of used vsis.
1050  */
1051 enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
1052 {
1053 	u16 vsi;
1054 
1055 	for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
1056 		/* update only vsis that have been changed */
1057 		if (hw->blk[blk].xlt2.vsis[vsi].changed) {
1058 			enum ice_status status;
1059 			u16 vsig;
1060 
1061 			vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
1062 			status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
1063 			if (status)
1064 				return status;
1065 
1066 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1067 		}
1068 	}
1069 
1070 	return ICE_SUCCESS;
1071 }
1072 
1073 /**
1074  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
1075  * @hw: pointer to the hardware structure
1076  * @blk: HW block
1077  * @vsi: VSI of interest
1078  * @vsig: pointer to receive the VSI group
1079  *
1080  * This function will lookup the VSI entry in the XLT2 list and return
1081  * the VSI group its associated with.
1082  */
1083 enum ice_status
1084 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
1085 {
1086 	if (!vsig || vsi >= ICE_MAX_VSI)
1087 		return ICE_ERR_PARAM;
1088 
1089 	/* As long as there's a default or valid VSIG associated with the input
1090 	 * VSI, the functions returns a success. Any handling of VSIG will be
1091 	 * done by the following add, update or remove functions.
1092 	 */
1093 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
1094 
1095 	return ICE_SUCCESS;
1096 }
1097 
1098 /**
1099  * ice_vsig_alloc_val - allocate a new VSIG by value
1100  * @hw: pointer to the hardware structure
1101  * @blk: HW block
1102  * @vsig: the VSIG to allocate
1103  *
1104  * This function will allocate a given VSIG specified by the VSIG parameter.
1105  */
1106 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
1107 {
1108 	u16 idx = vsig & ICE_VSIG_IDX_M;
1109 
1110 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
1111 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1112 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
1113 	}
1114 
1115 	return ICE_VSIG_VALUE(idx, hw->pf_id);
1116 }
1117 
1118 /**
1119  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
1120  * @hw: pointer to the hardware structure
1121  * @blk: HW block
1122  *
1123  * This function will iterate through the VSIG list and mark the first
1124  * unused entry for the new VSIG entry as used and return that value.
1125  */
1126 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
1127 {
1128 	u16 i;
1129 
1130 	for (i = 1; i < ICE_MAX_VSIGS; i++)
1131 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
1132 			return ice_vsig_alloc_val(hw, blk, i);
1133 
1134 	return ICE_DEFAULT_VSIG;
1135 }
1136 
1137 /**
1138  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
1139  * @hw: pointer to the hardware structure
1140  * @blk: HW block
1141  * @chs: characteristic list
1142  * @vsig: returns the VSIG with the matching profiles, if found
1143  *
1144  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
1145  * a group have the same characteristic set. To check if there exists a VSIG
1146  * which has the same characteristics as the input characteristics; this
1147  * function will iterate through the XLT2 list and return the VSIG that has a
1148  * matching configuration. In order to make sure that priorities are accounted
1149  * for, the list must match exactly, including the order in which the
1150  * characteristics are listed.
1151  */
1152 static enum ice_status
1153 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
1154 			struct LIST_HEAD_TYPE *chs, u16 *vsig)
1155 {
1156 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
1157 	u16 i;
1158 
1159 	for (i = 0; i < xlt2->count; i++)
1160 		if (xlt2->vsig_tbl[i].in_use &&
1161 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
1162 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
1163 			return ICE_SUCCESS;
1164 		}
1165 
1166 	return ICE_ERR_DOES_NOT_EXIST;
1167 }
1168 
1169 /**
1170  * ice_vsig_free - free VSI group
1171  * @hw: pointer to the hardware structure
1172  * @blk: HW block
1173  * @vsig: VSIG to remove
1174  *
1175  * The function will remove all VSIs associated with the input VSIG and move
1176  * them to the DEFAULT_VSIG and mark the VSIG available.
1177  */
1178 static enum ice_status
1179 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
1180 {
1181 	struct ice_vsig_prof *dtmp, *del;
1182 	struct ice_vsig_vsi *vsi_cur;
1183 	u16 idx;
1184 
1185 	idx = vsig & ICE_VSIG_IDX_M;
1186 	if (idx >= ICE_MAX_VSIGS)
1187 		return ICE_ERR_PARAM;
1188 
1189 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1190 		return ICE_ERR_DOES_NOT_EXIST;
1191 
1192 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
1193 
1194 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1195 	/* If the VSIG has at least 1 VSI then iterate through the
1196 	 * list and remove the VSIs before deleting the group.
1197 	 */
1198 	if (vsi_cur) {
1199 		/* remove all vsis associated with this VSIG XLT2 entry */
1200 		do {
1201 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
1202 
1203 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
1204 			vsi_cur->changed = 1;
1205 			vsi_cur->next_vsi = NULL;
1206 			vsi_cur = tmp;
1207 		} while (vsi_cur);
1208 
1209 		/* NULL terminate head of VSI list */
1210 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
1211 	}
1212 
1213 	/* free characteristic list */
1214 	LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
1215 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
1216 				 ice_vsig_prof, list) {
1217 		LIST_DEL(&del->list);
1218 		ice_free(hw, del);
1219 	}
1220 
1221 	/* if VSIG characteristic list was cleared for reset
1222 	 * re-initialize the list head
1223 	 */
1224 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1225 
1226 	return ICE_SUCCESS;
1227 }
1228 
1229 /**
1230  * ice_vsig_remove_vsi - remove VSI from VSIG
1231  * @hw: pointer to the hardware structure
1232  * @blk: HW block
1233  * @vsi: VSI to remove
1234  * @vsig: VSI group to remove from
1235  *
1236  * The function will remove the input VSI from its VSI group and move it
1237  * to the DEFAULT_VSIG.
1238  */
1239 static enum ice_status
1240 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1241 {
1242 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
1243 	u16 idx;
1244 
1245 	idx = vsig & ICE_VSIG_IDX_M;
1246 
1247 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1248 		return ICE_ERR_PARAM;
1249 
1250 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1251 		return ICE_ERR_DOES_NOT_EXIST;
1252 
1253 	/* entry already in default VSIG, don't have to remove */
1254 	if (idx == ICE_DEFAULT_VSIG)
1255 		return ICE_SUCCESS;
1256 
1257 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1258 	if (!(*vsi_head))
1259 		return ICE_ERR_CFG;
1260 
1261 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1262 	vsi_cur = (*vsi_head);
1263 
1264 	/* iterate the VSI list, skip over the entry to be removed */
1265 	while (vsi_cur) {
1266 		if (vsi_tgt == vsi_cur) {
1267 			(*vsi_head) = vsi_cur->next_vsi;
1268 			break;
1269 		}
1270 		vsi_head = &vsi_cur->next_vsi;
1271 		vsi_cur = vsi_cur->next_vsi;
1272 	}
1273 
1274 	/* verify if VSI was removed from group list */
1275 	if (!vsi_cur)
1276 		return ICE_ERR_DOES_NOT_EXIST;
1277 
1278 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
1279 	vsi_cur->changed = 1;
1280 	vsi_cur->next_vsi = NULL;
1281 
1282 	return ICE_SUCCESS;
1283 }
1284 
1285 /**
1286  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
1287  * @hw: pointer to the hardware structure
1288  * @blk: HW block
1289  * @vsi: VSI to move
1290  * @vsig: destination VSI group
1291  *
1292  * This function will move or add the input VSI to the target VSIG.
1293  * The function will find the original VSIG the VSI belongs to and
1294  * move the entry to the DEFAULT_VSIG, update the original VSIG and
1295  * then move entry to the new VSIG.
1296  */
1297 static enum ice_status
1298 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1299 {
1300 	struct ice_vsig_vsi *tmp;
1301 	enum ice_status status;
1302 	u16 orig_vsig, idx;
1303 
1304 	idx = vsig & ICE_VSIG_IDX_M;
1305 
1306 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1307 		return ICE_ERR_PARAM;
1308 
1309 	/* if VSIG not in use and VSIG is not default type this VSIG
1310 	 * doesn't exist.
1311 	 */
1312 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1313 	    vsig != ICE_DEFAULT_VSIG)
1314 		return ICE_ERR_DOES_NOT_EXIST;
1315 
1316 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1317 	if (status)
1318 		return status;
1319 
1320 	/* no update required if vsigs match */
1321 	if (orig_vsig == vsig)
1322 		return ICE_SUCCESS;
1323 
1324 	if (orig_vsig != ICE_DEFAULT_VSIG) {
1325 		/* remove entry from orig_vsig and add to default VSIG */
1326 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1327 		if (status)
1328 			return status;
1329 	}
1330 
1331 	if (idx == ICE_DEFAULT_VSIG)
1332 		return ICE_SUCCESS;
1333 
1334 	/* Create VSI entry and add VSIG and prop_mask values */
1335 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1336 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1337 
1338 	/* Add new entry to the head of the VSIG list */
1339 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1340 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1341 		&hw->blk[blk].xlt2.vsis[vsi];
1342 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1343 	hw->blk[blk].xlt2.t[vsi] = vsig;
1344 
1345 	return ICE_SUCCESS;
1346 }
1347 
1348 /**
1349  * ice_find_prof_id - find profile ID for a given field vector
1350  * @hw: pointer to the hardware structure
1351  * @blk: HW block
1352  * @fv: field vector to search for
1353  * @prof_id: receives the profile ID
1354  */
1355 static enum ice_status
1356 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
1357 		 struct ice_fv_word *fv, u8 *prof_id)
1358 {
1359 	struct ice_es *es = &hw->blk[blk].es;
1360 	u16 off;
1361 	u8 i;
1362 
1363 	for (i = 0; i < (u8)es->count; i++) {
1364 		off = i * es->fvw;
1365 
1366 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
1367 			continue;
1368 
1369 		*prof_id = i;
1370 		return ICE_SUCCESS;
1371 	}
1372 
1373 	return ICE_ERR_DOES_NOT_EXIST;
1374 }
1375 
1376 /**
1377  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
1378  * @blk: the block type
1379  * @rsrc_type: pointer to variable to receive the resource type
1380  */
1381 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1382 {
1383 	switch (blk) {
1384 	case ICE_BLK_RSS:
1385 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
1386 		break;
1387 	case ICE_BLK_PE:
1388 		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
1389 		break;
1390 	default:
1391 		return false;
1392 	}
1393 	return true;
1394 }
1395 
1396 /**
1397  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
1398  * @blk: the block type
1399  * @rsrc_type: pointer to variable to receive the resource type
1400  */
1401 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1402 {
1403 	switch (blk) {
1404 	case ICE_BLK_RSS:
1405 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
1406 		break;
1407 	case ICE_BLK_PE:
1408 		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
1409 		break;
1410 	default:
1411 		return false;
1412 	}
1413 	return true;
1414 }
1415 
1416 /**
1417  * ice_alloc_tcam_ent - allocate hardware TCAM entry
1418  * @hw: pointer to the HW struct
1419  * @blk: the block to allocate the TCAM for
1420  * @btm: true to allocate from bottom of table, false to allocate from top
1421  * @tcam_idx: pointer to variable to receive the TCAM entry
1422  *
1423  * This function allocates a new entry in a Profile ID TCAM for a specific
1424  * block.
1425  */
1426 static enum ice_status
1427 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
1428 		   u16 *tcam_idx)
1429 {
1430 	u16 res_type;
1431 
1432 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1433 		return ICE_ERR_PARAM;
1434 
1435 	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
1436 }
1437 
1438 /**
1439  * ice_free_tcam_ent - free hardware TCAM entry
1440  * @hw: pointer to the HW struct
1441  * @blk: the block from which to free the TCAM entry
1442  * @tcam_idx: the TCAM entry to free
1443  *
1444  * This function frees an entry in a Profile ID TCAM for a specific block.
1445  */
1446 static enum ice_status
1447 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
1448 {
1449 	u16 res_type;
1450 
1451 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1452 		return ICE_ERR_PARAM;
1453 
1454 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
1455 }
1456 
1457 /**
1458  * ice_alloc_prof_id - allocate profile ID
1459  * @hw: pointer to the HW struct
1460  * @blk: the block to allocate the profile ID for
1461  * @prof_id: pointer to variable to receive the profile ID
1462  *
1463  * This function allocates a new profile ID, which also corresponds to a Field
1464  * Vector (Extraction Sequence) entry.
1465  */
1466 static enum ice_status
1467 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
1468 {
1469 	enum ice_status status;
1470 	u16 res_type;
1471 	u16 get_prof;
1472 
1473 	if (!ice_prof_id_rsrc_type(blk, &res_type))
1474 		return ICE_ERR_PARAM;
1475 
1476 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
1477 	if (!status)
1478 		*prof_id = (u8)get_prof;
1479 
1480 	return status;
1481 }
1482 
1483 /**
1484  * ice_free_prof_id - free profile ID
1485  * @hw: pointer to the HW struct
1486  * @blk: the block from which to free the profile ID
1487  * @prof_id: the profile ID to free
1488  *
1489  * This function frees a profile ID, which also corresponds to a Field Vector.
1490  */
1491 static enum ice_status
1492 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1493 {
1494 	u16 tmp_prof_id = (u16)prof_id;
1495 	u16 res_type;
1496 
1497 	if (!ice_prof_id_rsrc_type(blk, &res_type))
1498 		return ICE_ERR_PARAM;
1499 
1500 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
1501 }
1502 
1503 /**
1504  * ice_prof_inc_ref - increment reference count for profile
1505  * @hw: pointer to the HW struct
1506  * @blk: the block from which to free the profile ID
1507  * @prof_id: the profile ID for which to increment the reference count
1508  */
1509 static enum ice_status
1510 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1511 {
1512 	if (prof_id > hw->blk[blk].es.count)
1513 		return ICE_ERR_PARAM;
1514 
1515 	hw->blk[blk].es.ref_count[prof_id]++;
1516 
1517 	return ICE_SUCCESS;
1518 }
1519 
1520 /**
1521  * ice_write_es - write an extraction sequence to hardware
1522  * @hw: pointer to the HW struct
1523  * @blk: the block in which to write the extraction sequence
1524  * @prof_id: the profile ID to write
1525  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
1526  */
1527 static void
1528 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
1529 	     struct ice_fv_word *fv)
1530 {
1531 	u16 off;
1532 
1533 	off = prof_id * hw->blk[blk].es.fvw;
1534 	if (!fv) {
1535 		ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
1536 			   sizeof(*fv), ICE_NONDMA_MEM);
1537 		hw->blk[blk].es.written[prof_id] = false;
1538 	} else {
1539 		ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
1540 			   sizeof(*fv), ICE_NONDMA_TO_NONDMA);
1541 	}
1542 }
1543 
1544 /**
1545  * ice_prof_dec_ref - decrement reference count for profile
1546  * @hw: pointer to the HW struct
1547  * @blk: the block from which to free the profile ID
1548  * @prof_id: the profile ID for which to decrement the reference count
1549  */
1550 static enum ice_status
1551 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1552 {
1553 	if (prof_id > hw->blk[blk].es.count)
1554 		return ICE_ERR_PARAM;
1555 
1556 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
1557 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
1558 			ice_write_es(hw, blk, prof_id, NULL);
1559 			return ice_free_prof_id(hw, blk, prof_id);
1560 		}
1561 	}
1562 
1563 	return ICE_SUCCESS;
1564 }
1565 
1566 /* Block / table section IDs */
1567 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
1568 	/* SWITCH */
1569 	{	ICE_SID_XLT1_SW,
1570 		ICE_SID_XLT2_SW,
1571 		ICE_SID_PROFID_TCAM_SW,
1572 		ICE_SID_PROFID_REDIR_SW,
1573 		ICE_SID_FLD_VEC_SW
1574 	},
1575 
1576 	/* ACL */
1577 	{	ICE_SID_XLT1_ACL,
1578 		ICE_SID_XLT2_ACL,
1579 		ICE_SID_PROFID_TCAM_ACL,
1580 		ICE_SID_PROFID_REDIR_ACL,
1581 		ICE_SID_FLD_VEC_ACL
1582 	},
1583 
1584 	/* FD */
1585 	{	ICE_SID_XLT1_FD,
1586 		ICE_SID_XLT2_FD,
1587 		ICE_SID_PROFID_TCAM_FD,
1588 		ICE_SID_PROFID_REDIR_FD,
1589 		ICE_SID_FLD_VEC_FD
1590 	},
1591 
1592 	/* RSS */
1593 	{	ICE_SID_XLT1_RSS,
1594 		ICE_SID_XLT2_RSS,
1595 		ICE_SID_PROFID_TCAM_RSS,
1596 		ICE_SID_PROFID_REDIR_RSS,
1597 		ICE_SID_FLD_VEC_RSS
1598 	},
1599 
1600 	/* PE */
1601 	{	ICE_SID_XLT1_PE,
1602 		ICE_SID_XLT2_PE,
1603 		ICE_SID_PROFID_TCAM_PE,
1604 		ICE_SID_PROFID_REDIR_PE,
1605 		ICE_SID_FLD_VEC_PE
1606 	}
1607 };
1608 
1609 /**
1610  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
1611  * @hw: pointer to the hardware structure
1612  * @blk: the HW block to initialize
1613  */
1614 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1615 {
1616 	u16 pt;
1617 
1618 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1619 		u8 ptg;
1620 
1621 		ptg = hw->blk[blk].xlt1.t[pt];
1622 		if (ptg != ICE_DEFAULT_PTG) {
1623 			ice_ptg_alloc_val(hw, blk, ptg);
1624 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1625 		}
1626 	}
1627 }
1628 
1629 /**
1630  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
1631  * @hw: pointer to the hardware structure
1632  * @blk: the HW block to initialize
1633  */
1634 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1635 {
1636 	u16 vsi;
1637 
1638 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1639 		u16 vsig;
1640 
1641 		vsig = hw->blk[blk].xlt2.t[vsi];
1642 		if (vsig) {
1643 			ice_vsig_alloc_val(hw, blk, vsig);
1644 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1645 			/* no changes at this time, since this has been
1646 			 * initialized from the original package
1647 			 */
1648 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1649 		}
1650 	}
1651 }
1652 
1653 /**
1654  * ice_init_sw_db - init software database from HW tables
1655  * @hw: pointer to the hardware structure
1656  */
1657 static void ice_init_sw_db(struct ice_hw *hw)
1658 {
1659 	u16 i;
1660 
1661 	for (i = 0; i < ICE_BLK_COUNT; i++) {
1662 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
1663 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
1664 	}
1665 }
1666 
1667 /**
1668  * ice_fill_tbl - Reads content of a single table type into database
1669  * @hw: pointer to the hardware structure
1670  * @block_id: Block ID of the table to copy
1671  * @sid: Section ID of the table to copy
1672  *
1673  * Will attempt to read the entire content of a given table of a single block
1674  * into the driver database. We assume that the buffer will always
1675  * be as large or larger than the data contained in the package. If
1676  * this condition is not met, there is most likely an error in the package
1677  * contents.
1678  */
1679 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
1680 {
1681 	u32 dst_len, sect_len, offset = 0;
1682 	struct ice_prof_redir_section *pr;
1683 	struct ice_prof_id_section *pid;
1684 	struct ice_xlt1_section *xlt1;
1685 	struct ice_xlt2_section *xlt2;
1686 	struct ice_sw_fv_section *es;
1687 	struct ice_pkg_enum state;
1688 	u8 *src, *dst;
1689 	void *sect;
1690 
1691 	/* if the HW segment pointer is null then the first iteration of
1692 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
1693 	 * not be filled and return success.
1694 	 */
1695 	if (!hw->seg) {
1696 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
1697 		return;
1698 	}
1699 
1700 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1701 
1702 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
1703 
1704 	while (sect) {
1705 		switch (sid) {
1706 		case ICE_SID_XLT1_SW:
1707 		case ICE_SID_XLT1_FD:
1708 		case ICE_SID_XLT1_RSS:
1709 		case ICE_SID_XLT1_ACL:
1710 		case ICE_SID_XLT1_PE:
1711 			xlt1 = (struct ice_xlt1_section *)sect;
1712 			src = xlt1->value;
1713 			sect_len = LE16_TO_CPU(xlt1->count) *
1714 				sizeof(*hw->blk[block_id].xlt1.t);
1715 			dst = hw->blk[block_id].xlt1.t;
1716 			dst_len = hw->blk[block_id].xlt1.count *
1717 				sizeof(*hw->blk[block_id].xlt1.t);
1718 			break;
1719 		case ICE_SID_XLT2_SW:
1720 		case ICE_SID_XLT2_FD:
1721 		case ICE_SID_XLT2_RSS:
1722 		case ICE_SID_XLT2_ACL:
1723 		case ICE_SID_XLT2_PE:
1724 			xlt2 = (struct ice_xlt2_section *)sect;
1725 			src = (_FORCE_ u8 *)xlt2->value;
1726 			sect_len = LE16_TO_CPU(xlt2->count) *
1727 				sizeof(*hw->blk[block_id].xlt2.t);
1728 			dst = (u8 *)hw->blk[block_id].xlt2.t;
1729 			dst_len = hw->blk[block_id].xlt2.count *
1730 				sizeof(*hw->blk[block_id].xlt2.t);
1731 			break;
1732 		case ICE_SID_PROFID_TCAM_SW:
1733 		case ICE_SID_PROFID_TCAM_FD:
1734 		case ICE_SID_PROFID_TCAM_RSS:
1735 		case ICE_SID_PROFID_TCAM_ACL:
1736 		case ICE_SID_PROFID_TCAM_PE:
1737 			pid = (struct ice_prof_id_section *)sect;
1738 			src = (u8 *)pid->entry;
1739 			sect_len = LE16_TO_CPU(pid->count) *
1740 				sizeof(*hw->blk[block_id].prof.t);
1741 			dst = (u8 *)hw->blk[block_id].prof.t;
1742 			dst_len = hw->blk[block_id].prof.count *
1743 				sizeof(*hw->blk[block_id].prof.t);
1744 			break;
1745 		case ICE_SID_PROFID_REDIR_SW:
1746 		case ICE_SID_PROFID_REDIR_FD:
1747 		case ICE_SID_PROFID_REDIR_RSS:
1748 		case ICE_SID_PROFID_REDIR_ACL:
1749 		case ICE_SID_PROFID_REDIR_PE:
1750 			pr = (struct ice_prof_redir_section *)sect;
1751 			src = pr->redir_value;
1752 			sect_len = LE16_TO_CPU(pr->count) *
1753 				sizeof(*hw->blk[block_id].prof_redir.t);
1754 			dst = hw->blk[block_id].prof_redir.t;
1755 			dst_len = hw->blk[block_id].prof_redir.count *
1756 				sizeof(*hw->blk[block_id].prof_redir.t);
1757 			break;
1758 		case ICE_SID_FLD_VEC_SW:
1759 		case ICE_SID_FLD_VEC_FD:
1760 		case ICE_SID_FLD_VEC_RSS:
1761 		case ICE_SID_FLD_VEC_ACL:
1762 		case ICE_SID_FLD_VEC_PE:
1763 			es = (struct ice_sw_fv_section *)sect;
1764 			src = (u8 *)es->fv;
1765 			sect_len = (u32)(LE16_TO_CPU(es->count) *
1766 					 hw->blk[block_id].es.fvw) *
1767 				sizeof(*hw->blk[block_id].es.t);
1768 			dst = (u8 *)hw->blk[block_id].es.t;
1769 			dst_len = (u32)(hw->blk[block_id].es.count *
1770 					hw->blk[block_id].es.fvw) *
1771 				sizeof(*hw->blk[block_id].es.t);
1772 			break;
1773 		default:
1774 			return;
1775 		}
1776 
1777 		/* if the section offset exceeds destination length, terminate
1778 		 * table fill.
1779 		 */
1780 		if (offset > dst_len)
1781 			return;
1782 
1783 		/* if the sum of section size and offset exceed destination size
1784 		 * then we are out of bounds of the HW table size for that PF.
1785 		 * Changing section length to fill the remaining table space
1786 		 * of that PF.
1787 		 */
1788 		if ((offset + sect_len) > dst_len)
1789 			sect_len = dst_len - offset;
1790 
1791 		ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
1792 		offset += sect_len;
1793 		sect = ice_pkg_enum_section(NULL, &state, sid);
1794 	}
1795 }
1796 
1797 /**
1798  * ice_init_flow_profs - init flow profile locks and list heads
1799  * @hw: pointer to the hardware structure
1800  * @blk_idx: HW block index
1801  */
1802 static
1803 void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
1804 {
1805 	ice_init_lock(&hw->fl_profs_locks[blk_idx]);
1806 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
1807 }
1808 
1809 /**
1810  * ice_init_hw_tbls - init hardware table memory
1811  * @hw: pointer to the hardware structure
1812  */
1813 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
1814 {
1815 	u8 i;
1816 
1817 	ice_init_lock(&hw->rss_locks);
1818 	INIT_LIST_HEAD(&hw->rss_list_head);
1819 	for (i = 0; i < ICE_BLK_COUNT; i++) {
1820 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
1821 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
1822 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
1823 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
1824 		struct ice_es *es = &hw->blk[i].es;
1825 		u16 j;
1826 
1827 		if (hw->blk[i].is_list_init)
1828 			continue;
1829 
1830 		ice_init_flow_profs(hw, i);
1831 		ice_init_lock(&es->prof_map_lock);
1832 		INIT_LIST_HEAD(&es->prof_map);
1833 		hw->blk[i].is_list_init = true;
1834 
1835 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
1836 		es->reverse = blk_sizes[i].reverse;
1837 
1838 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
1839 		xlt1->count = blk_sizes[i].xlt1;
1840 
1841 		xlt1->ptypes = (struct ice_ptg_ptype *)
1842 			ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
1843 
1844 		if (!xlt1->ptypes)
1845 			goto err;
1846 
1847 		xlt1->ptg_tbl = (struct ice_ptg_entry *)
1848 			ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
1849 
1850 		if (!xlt1->ptg_tbl)
1851 			goto err;
1852 
1853 		xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
1854 		if (!xlt1->t)
1855 			goto err;
1856 
1857 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
1858 		xlt2->count = blk_sizes[i].xlt2;
1859 
1860 		xlt2->vsis = (struct ice_vsig_vsi *)
1861 			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
1862 
1863 		if (!xlt2->vsis)
1864 			goto err;
1865 
1866 		xlt2->vsig_tbl = (struct ice_vsig_entry *)
1867 			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
1868 		if (!xlt2->vsig_tbl)
1869 			goto err;
1870 
1871 		for (j = 0; j < xlt2->count; j++)
1872 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
1873 
1874 		xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
1875 		if (!xlt2->t)
1876 			goto err;
1877 
1878 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
1879 		prof->count = blk_sizes[i].prof_tcam;
1880 		prof->max_prof_id = blk_sizes[i].prof_id;
1881 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
1882 		prof->t = (struct ice_prof_tcam_entry *)
1883 			ice_calloc(hw, prof->count, sizeof(*prof->t));
1884 
1885 		if (!prof->t)
1886 			goto err;
1887 
1888 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
1889 		prof_redir->count = blk_sizes[i].prof_redir;
1890 		prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
1891 						 sizeof(*prof_redir->t));
1892 
1893 		if (!prof_redir->t)
1894 			goto err;
1895 
1896 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
1897 		es->count = blk_sizes[i].es;
1898 		es->fvw = blk_sizes[i].fvw;
1899 		es->t = (struct ice_fv_word *)
1900 			ice_calloc(hw, (u32)(es->count * es->fvw),
1901 				   sizeof(*es->t));
1902 		if (!es->t)
1903 			goto err;
1904 
1905 		es->ref_count = (u16 *)
1906 			ice_calloc(hw, es->count, sizeof(*es->ref_count));
1907 
1908 		if (!es->ref_count)
1909 			goto err;
1910 
1911 		es->written = (u8 *)
1912 			ice_calloc(hw, es->count, sizeof(*es->written));
1913 
1914 		if (!es->written)
1915 			goto err;
1916 
1917 	}
1918 	return ICE_SUCCESS;
1919 
1920 err:
1921 	ice_free_hw_tbls(hw);
1922 	return ICE_ERR_NO_MEMORY;
1923 }
1924 
1925 /**
1926  * ice_fill_blk_tbls - Read package context for tables
1927  * @hw: pointer to the hardware structure
1928  *
1929  * Reads the current package contents and populates the driver
1930  * database with the data iteratively for all advanced feature
1931  * blocks. Assume that the HW tables have been allocated.
1932  */
1933 void ice_fill_blk_tbls(struct ice_hw *hw)
1934 {
1935 	u8 i;
1936 
1937 	for (i = 0; i < ICE_BLK_COUNT; i++) {
1938 		enum ice_block blk_id = (enum ice_block)i;
1939 
1940 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
1941 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
1942 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
1943 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
1944 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
1945 	}
1946 
1947 	ice_init_sw_db(hw);
1948 }
1949 
1950 /**
1951  * ice_free_prof_map - free profile map
1952  * @hw: pointer to the hardware structure
1953  * @blk_idx: HW block index
1954  */
1955 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
1956 {
1957 	struct ice_es *es = &hw->blk[blk_idx].es;
1958 	struct ice_prof_map *del, *tmp;
1959 
1960 	ice_acquire_lock(&es->prof_map_lock);
1961 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
1962 				 ice_prof_map, list) {
1963 		LIST_DEL(&del->list);
1964 		ice_free(hw, del);
1965 	}
1966 	INIT_LIST_HEAD(&es->prof_map);
1967 	ice_release_lock(&es->prof_map_lock);
1968 }
1969 
1970 /**
1971  * ice_free_flow_profs - free flow profile entries
1972  * @hw: pointer to the hardware structure
1973  * @blk_idx: HW block index
1974  */
1975 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
1976 {
1977 	struct ice_flow_prof *p, *tmp;
1978 
1979 	ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
1980 	LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
1981 				 ice_flow_prof, l_entry) {
1982 		LIST_DEL(&p->l_entry);
1983 
1984 		ice_free(hw, p);
1985 	}
1986 	ice_release_lock(&hw->fl_profs_locks[blk_idx]);
1987 
1988 	/* if driver is in reset and tables are being cleared
1989 	 * re-initialize the flow profile list heads
1990 	 */
1991 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
1992 }
1993 
1994 /**
1995  * ice_free_vsig_tbl - free complete VSIG table entries
1996  * @hw: pointer to the hardware structure
1997  * @blk: the HW block on which to free the VSIG table entries
1998  */
1999 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2000 {
2001 	u16 i;
2002 
2003 	if (!hw->blk[blk].xlt2.vsig_tbl)
2004 		return;
2005 
2006 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2007 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2008 			ice_vsig_free(hw, blk, i);
2009 }
2010 
2011 /**
2012  * ice_free_hw_tbls - free hardware table memory
2013  * @hw: pointer to the hardware structure
2014  */
2015 void ice_free_hw_tbls(struct ice_hw *hw)
2016 {
2017 	struct ice_rss_cfg *r, *rt;
2018 	u8 i;
2019 
2020 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2021 		if (hw->blk[i].is_list_init) {
2022 			struct ice_es *es = &hw->blk[i].es;
2023 
2024 			ice_free_prof_map(hw, i);
2025 			ice_destroy_lock(&es->prof_map_lock);
2026 
2027 			ice_free_flow_profs(hw, i);
2028 			ice_destroy_lock(&hw->fl_profs_locks[i]);
2029 
2030 			hw->blk[i].is_list_init = false;
2031 		}
2032 		ice_free_vsig_tbl(hw, (enum ice_block)i);
2033 		ice_free(hw, hw->blk[i].xlt1.ptypes);
2034 		ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
2035 		ice_free(hw, hw->blk[i].xlt1.t);
2036 		ice_free(hw, hw->blk[i].xlt2.t);
2037 		ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
2038 		ice_free(hw, hw->blk[i].xlt2.vsis);
2039 		ice_free(hw, hw->blk[i].prof.t);
2040 		ice_free(hw, hw->blk[i].prof_redir.t);
2041 		ice_free(hw, hw->blk[i].es.t);
2042 		ice_free(hw, hw->blk[i].es.ref_count);
2043 		ice_free(hw, hw->blk[i].es.written);
2044 	}
2045 
2046 	LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
2047 				 ice_rss_cfg, l_entry) {
2048 		LIST_DEL(&r->l_entry);
2049 		ice_free(hw, r);
2050 	}
2051 	ice_destroy_lock(&hw->rss_locks);
2052 	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
2053 }
2054 
2055 /**
2056  * ice_clear_hw_tbls - clear HW tables and flow profiles
2057  * @hw: pointer to the hardware structure
2058  */
2059 void ice_clear_hw_tbls(struct ice_hw *hw)
2060 {
2061 	u8 i;
2062 
2063 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2064 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2065 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
2066 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2067 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2068 		struct ice_es *es = &hw->blk[i].es;
2069 
2070 		if (hw->blk[i].is_list_init) {
2071 			ice_free_prof_map(hw, i);
2072 			ice_free_flow_profs(hw, i);
2073 		}
2074 
2075 		ice_free_vsig_tbl(hw, (enum ice_block)i);
2076 
2077 		if (xlt1->ptypes)
2078 			ice_memset(xlt1->ptypes, 0,
2079 				   xlt1->count * sizeof(*xlt1->ptypes),
2080 				   ICE_NONDMA_MEM);
2081 
2082 		if (xlt1->ptg_tbl)
2083 			ice_memset(xlt1->ptg_tbl, 0,
2084 				   ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
2085 				   ICE_NONDMA_MEM);
2086 
2087 		if (xlt1->t)
2088 			ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
2089 				   ICE_NONDMA_MEM);
2090 
2091 		if (xlt2->vsis)
2092 			ice_memset(xlt2->vsis, 0,
2093 				   xlt2->count * sizeof(*xlt2->vsis),
2094 				   ICE_NONDMA_MEM);
2095 
2096 		if (xlt2->vsig_tbl)
2097 			ice_memset(xlt2->vsig_tbl, 0,
2098 				   xlt2->count * sizeof(*xlt2->vsig_tbl),
2099 				   ICE_NONDMA_MEM);
2100 
2101 		if (xlt2->t)
2102 			ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
2103 				   ICE_NONDMA_MEM);
2104 
2105 		if (prof->t)
2106 			ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
2107 				   ICE_NONDMA_MEM);
2108 
2109 		if (prof_redir->t)
2110 			ice_memset(prof_redir->t, 0,
2111 				   prof_redir->count * sizeof(*prof_redir->t),
2112 				   ICE_NONDMA_MEM);
2113 
2114 		if (es->t)
2115 			ice_memset(es->t, 0,
2116 				   es->count * sizeof(*es->t) * es->fvw,
2117 				   ICE_NONDMA_MEM);
2118 
2119 		if (es->ref_count)
2120 			ice_memset(es->ref_count, 0,
2121 				   es->count * sizeof(*es->ref_count),
2122 				   ICE_NONDMA_MEM);
2123 
2124 		if (es->written)
2125 			ice_memset(es->written, 0,
2126 				   es->count * sizeof(*es->written),
2127 				   ICE_NONDMA_MEM);
2128 
2129 	}
2130 }
2131 
2132 /**
2133  * ice_prof_gen_key - generate profile ID key
2134  * @hw: pointer to the HW struct
2135  * @blk: the block in which to write profile ID to
2136  * @ptg: packet type group (PTG) portion of key
2137  * @vsig: VSIG portion of key
2138  * @cdid: CDID portion of key
2139  * @flags: flag portion of key
2140  * @vl_msk: valid mask
2141  * @dc_msk: don't care mask
2142  * @nm_msk: never match mask
2143  * @key: output of profile ID key
2144  */
2145 static enum ice_status
2146 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
2147 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
2148 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
2149 		 u8 key[ICE_TCAM_KEY_SZ])
2150 {
2151 	struct ice_prof_id_key inkey;
2152 
2153 	inkey.xlt1 = ptg;
2154 	inkey.xlt2_cdid = CPU_TO_LE16(vsig);
2155 	inkey.flags = CPU_TO_LE16(flags);
2156 
2157 	switch (hw->blk[blk].prof.cdid_bits) {
2158 	case 0:
2159 		break;
2160 	case 2:
2161 #define ICE_CD_2_M 0xC000U
2162 #define ICE_CD_2_S 14
2163 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
2164 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
2165 		break;
2166 	case 4:
2167 #define ICE_CD_4_M 0xF000U
2168 #define ICE_CD_4_S 12
2169 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
2170 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
2171 		break;
2172 	case 8:
2173 #define ICE_CD_8_M 0xFF00U
2174 #define ICE_CD_8_S 16
2175 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
2176 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
2177 		break;
2178 	default:
2179 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
2180 		break;
2181 	}
2182 
2183 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
2184 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
2185 }
2186 
2187 /**
2188  * ice_tcam_write_entry - write TCAM entry
2189  * @hw: pointer to the HW struct
2190  * @blk: the block in which to write profile ID to
2191  * @idx: the entry index to write to
2192  * @prof_id: profile ID
2193  * @ptg: packet type group (PTG) portion of key
2194  * @vsig: VSIG portion of key
2195  * @cdid: CDID portion of key
2196  * @flags: flag portion of key
2197  * @vl_msk: valid mask
2198  * @dc_msk: don't care mask
2199  * @nm_msk: never match mask
2200  */
2201 static enum ice_status
2202 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
2203 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
2204 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
2205 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
2206 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
2207 {
2208 	struct ice_prof_tcam_entry;
2209 	enum ice_status status;
2210 
2211 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
2212 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
2213 	if (!status) {
2214 		hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
2215 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
2216 	}
2217 
2218 	return status;
2219 }
2220 
2221 /**
2222  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
2223  * @hw: pointer to the hardware structure
2224  * @blk: HW block
2225  * @vsig: VSIG to query
2226  * @refs: pointer to variable to receive the reference count
2227  */
2228 static enum ice_status
2229 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
2230 {
2231 	u16 idx = vsig & ICE_VSIG_IDX_M;
2232 	struct ice_vsig_vsi *ptr;
2233 
2234 	*refs = 0;
2235 
2236 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2237 		return ICE_ERR_DOES_NOT_EXIST;
2238 
2239 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2240 	while (ptr) {
2241 		(*refs)++;
2242 		ptr = ptr->next_vsi;
2243 	}
2244 
2245 	return ICE_SUCCESS;
2246 }
2247 
2248 /**
2249  * ice_has_prof_vsig - check to see if VSIG has a specific profile
2250  * @hw: pointer to the hardware structure
2251  * @blk: HW block
2252  * @vsig: VSIG to check against
2253  * @hdl: profile handle
2254  */
2255 static bool
2256 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
2257 {
2258 	u16 idx = vsig & ICE_VSIG_IDX_M;
2259 	struct ice_vsig_prof *ent;
2260 
2261 	LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2262 			    ice_vsig_prof, list)
2263 		if (ent->profile_cookie == hdl)
2264 			return true;
2265 
2266 	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
2267 		  vsig);
2268 	return false;
2269 }
2270 
2271 /**
2272  * ice_prof_bld_es - build profile ID extraction sequence changes
2273  * @hw: pointer to the HW struct
2274  * @blk: hardware block
2275  * @bld: the update package buffer build to add to
2276  * @chgs: the list of changes to make in hardware
2277  */
2278 static enum ice_status
2279 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
2280 		struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
2281 {
2282 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
2283 	struct ice_chs_chg *tmp;
2284 
2285 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
2286 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
2287 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
2288 			struct ice_pkg_es *p;
2289 			u32 id;
2290 
2291 			id = ice_sect_id(blk, ICE_VEC_TBL);
2292 			p = (struct ice_pkg_es *)
2293 				ice_pkg_buf_alloc_section(bld, id,
2294 							  ice_struct_size(p, es,
2295 									  1) +
2296 							  vec_size -
2297 							  sizeof(p->es[0]));
2298 
2299 			if (!p)
2300 				return ICE_ERR_MAX_LIMIT;
2301 
2302 			p->count = CPU_TO_LE16(1);
2303 			p->offset = CPU_TO_LE16(tmp->prof_id);
2304 
2305 			ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
2306 				   ICE_NONDMA_TO_NONDMA);
2307 		}
2308 
2309 	return ICE_SUCCESS;
2310 }
2311 
2312 /**
2313  * ice_prof_bld_tcam - build profile ID TCAM changes
2314  * @hw: pointer to the HW struct
2315  * @blk: hardware block
2316  * @bld: the update package buffer build to add to
2317  * @chgs: the list of changes to make in hardware
2318  */
2319 static enum ice_status
2320 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
2321 		  struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
2322 {
2323 	struct ice_chs_chg *tmp;
2324 
2325 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
2326 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
2327 			struct ice_prof_id_section *p;
2328 			u32 id;
2329 
2330 			id = ice_sect_id(blk, ICE_PROF_TCAM);
2331 			p = (struct ice_prof_id_section *)
2332 				ice_pkg_buf_alloc_section(bld, id,
2333 							  ice_struct_size(p,
2334 									  entry,
2335 									  1));
2336 
2337 			if (!p)
2338 				return ICE_ERR_MAX_LIMIT;
2339 
2340 			p->count = CPU_TO_LE16(1);
2341 			p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
2342 			p->entry[0].prof_id = tmp->prof_id;
2343 
2344 			ice_memcpy(p->entry[0].key,
2345 				   &hw->blk[blk].prof.t[tmp->tcam_idx].key,
2346 				   sizeof(hw->blk[blk].prof.t->key),
2347 				   ICE_NONDMA_TO_NONDMA);
2348 		}
2349 
2350 	return ICE_SUCCESS;
2351 }
2352 
2353 /**
2354  * ice_prof_bld_xlt1 - build XLT1 changes
2355  * @blk: hardware block
2356  * @bld: the update package buffer build to add to
2357  * @chgs: the list of changes to make in hardware
2358  */
2359 static enum ice_status
2360 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
2361 		  struct LIST_HEAD_TYPE *chgs)
2362 {
2363 	struct ice_chs_chg *tmp;
2364 
2365 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
2366 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
2367 			struct ice_xlt1_section *p;
2368 			u32 id;
2369 
2370 			id = ice_sect_id(blk, ICE_XLT1);
2371 			p = (struct ice_xlt1_section *)
2372 				ice_pkg_buf_alloc_section(bld, id,
2373 							  ice_struct_size(p,
2374 									  value,
2375 									  1));
2376 
2377 			if (!p)
2378 				return ICE_ERR_MAX_LIMIT;
2379 
2380 			p->count = CPU_TO_LE16(1);
2381 			p->offset = CPU_TO_LE16(tmp->ptype);
2382 			p->value[0] = tmp->ptg;
2383 		}
2384 
2385 	return ICE_SUCCESS;
2386 }
2387 
2388 /**
2389  * ice_prof_bld_xlt2 - build XLT2 changes
2390  * @blk: hardware block
2391  * @bld: the update package buffer build to add to
2392  * @chgs: the list of changes to make in hardware
2393  */
2394 static enum ice_status
2395 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
2396 		  struct LIST_HEAD_TYPE *chgs)
2397 {
2398 	struct ice_chs_chg *tmp;
2399 
2400 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
2401 		struct ice_xlt2_section *p;
2402 		u32 id;
2403 
2404 		switch (tmp->type) {
2405 		case ICE_VSIG_ADD:
2406 		case ICE_VSI_MOVE:
2407 		case ICE_VSIG_REM:
2408 			id = ice_sect_id(blk, ICE_XLT2);
2409 			p = (struct ice_xlt2_section *)
2410 				ice_pkg_buf_alloc_section(bld, id,
2411 							  ice_struct_size(p,
2412 									  value,
2413 									  1));
2414 
2415 			if (!p)
2416 				return ICE_ERR_MAX_LIMIT;
2417 
2418 			p->count = CPU_TO_LE16(1);
2419 			p->offset = CPU_TO_LE16(tmp->vsi);
2420 			p->value[0] = CPU_TO_LE16(tmp->vsig);
2421 			break;
2422 		default:
2423 			break;
2424 		}
2425 	}
2426 
2427 	return ICE_SUCCESS;
2428 }
2429 
2430 /**
2431  * ice_upd_prof_hw - update hardware using the change list
2432  * @hw: pointer to the HW struct
2433  * @blk: hardware block
2434  * @chgs: the list of changes to make in hardware
2435  */
2436 static enum ice_status
2437 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
2438 		struct LIST_HEAD_TYPE *chgs)
2439 {
2440 	struct ice_buf_build *b;
2441 	struct ice_chs_chg *tmp;
2442 	enum ice_status status;
2443 	u16 pkg_sects;
2444 	u16 xlt1 = 0;
2445 	u16 xlt2 = 0;
2446 	u16 tcam = 0;
2447 	u16 es = 0;
2448 	u16 sects;
2449 
2450 	/* count number of sections we need */
2451 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
2452 		switch (tmp->type) {
2453 		case ICE_PTG_ES_ADD:
2454 			if (tmp->add_ptg)
2455 				xlt1++;
2456 			if (tmp->add_prof)
2457 				es++;
2458 			break;
2459 		case ICE_TCAM_ADD:
2460 			tcam++;
2461 			break;
2462 		case ICE_VSIG_ADD:
2463 		case ICE_VSI_MOVE:
2464 		case ICE_VSIG_REM:
2465 			xlt2++;
2466 			break;
2467 		default:
2468 			break;
2469 		}
2470 	}
2471 	sects = xlt1 + xlt2 + tcam + es;
2472 
2473 	if (!sects)
2474 		return ICE_SUCCESS;
2475 
2476 	/* Build update package buffer */
2477 	b = ice_pkg_buf_alloc(hw);
2478 	if (!b)
2479 		return ICE_ERR_NO_MEMORY;
2480 
2481 	status = ice_pkg_buf_reserve_section(b, sects);
2482 	if (status)
2483 		goto error_tmp;
2484 
2485 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
2486 	if (es) {
2487 		status = ice_prof_bld_es(hw, blk, b, chgs);
2488 		if (status)
2489 			goto error_tmp;
2490 	}
2491 
2492 	if (tcam) {
2493 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
2494 		if (status)
2495 			goto error_tmp;
2496 	}
2497 
2498 	if (xlt1) {
2499 		status = ice_prof_bld_xlt1(blk, b, chgs);
2500 		if (status)
2501 			goto error_tmp;
2502 	}
2503 
2504 	if (xlt2) {
2505 		status = ice_prof_bld_xlt2(blk, b, chgs);
2506 		if (status)
2507 			goto error_tmp;
2508 	}
2509 
2510 	/* After package buffer build check if the section count in buffer is
2511 	 * non-zero and matches the number of sections detected for package
2512 	 * update.
2513 	 */
2514 	pkg_sects = ice_pkg_buf_get_active_sections(b);
2515 	if (!pkg_sects || pkg_sects != sects) {
2516 		status = ICE_ERR_INVAL_SIZE;
2517 		goto error_tmp;
2518 	}
2519 
2520 	/* update package */
2521 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
2522 	if (status == ICE_ERR_AQ_ERROR)
2523 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
2524 
2525 error_tmp:
2526 	ice_pkg_buf_free(hw, b);
2527 	return status;
2528 }
2529 
2530 /**
2531  * ice_add_prof - add profile
2532  * @hw: pointer to the HW struct
2533  * @blk: hardware block
2534  * @id: profile tracking ID
2535  * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
2536  * @es: extraction sequence (length of array is determined by the block)
2537  *
2538  * This function registers a profile, which matches a set of PTGs with a
2539  * particular extraction sequence. While the hardware profile is allocated
2540  * it will not be written until the first call to ice_add_flow that specifies
2541  * the ID value used here.
2542  */
2543 enum ice_status
2544 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
2545 	     ice_bitmap_t *ptypes, struct ice_fv_word *es)
2546 {
2547 	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
2548 	struct ice_prof_map *prof;
2549 	enum ice_status status;
2550 	u8 prof_id;
2551 	u16 ptype;
2552 
2553 	ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
2554 
2555 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2556 
2557 	/* search for existing profile */
2558 	status = ice_find_prof_id(hw, blk, es, &prof_id);
2559 	if (status) {
2560 		/* allocate profile ID */
2561 		status = ice_alloc_prof_id(hw, blk, &prof_id);
2562 		if (status)
2563 			goto err_ice_add_prof;
2564 
2565 		/* and write new es */
2566 		ice_write_es(hw, blk, prof_id, es);
2567 	}
2568 
2569 	ice_prof_inc_ref(hw, blk, prof_id);
2570 
2571 	/* add profile info */
2572 
2573 	prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
2574 	if (!prof)
2575 		goto err_ice_add_prof;
2576 
2577 	prof->profile_cookie = id;
2578 	prof->prof_id = prof_id;
2579 	prof->ptg_cnt = 0;
2580 	prof->context = 0;
2581 
2582 	/* build list of ptgs */
2583 	ice_for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
2584 		u8 ptg;
2585 
2586 		/* The package should place all ptypes in a non-zero
2587 		 * PTG, so the following call should never fail.
2588 		 */
2589 		if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
2590 			continue;
2591 
2592 		/* If PTG is already added, skip and continue */
2593 		if (ice_is_bit_set(ptgs_used, ptg))
2594 			continue;
2595 
2596 		ice_set_bit(ptg, ptgs_used);
2597 		prof->ptg[prof->ptg_cnt] = ptg;
2598 
2599 		if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
2600 			break;
2601 	}
2602 
2603 	LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
2604 	status = ICE_SUCCESS;
2605 
2606 err_ice_add_prof:
2607 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2608 	return status;
2609 }
2610 
2611 /**
2612  * ice_search_prof_id - Search for a profile tracking ID
2613  * @hw: pointer to the HW struct
2614  * @blk: hardware block
2615  * @id: profile tracking ID
2616  *
2617  * This will search for a profile tracking ID which was previously added.
2618  * The profile map lock should be held before calling this function.
2619  */
2620 struct ice_prof_map *
2621 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
2622 {
2623 	struct ice_prof_map *entry = NULL;
2624 	struct ice_prof_map *map;
2625 
2626 	LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
2627 		if (map->profile_cookie == id) {
2628 			entry = map;
2629 			break;
2630 		}
2631 
2632 	return entry;
2633 }
2634 
2635 /**
2636  * ice_set_prof_context - Set context for a given profile
2637  * @hw: pointer to the HW struct
2638  * @blk: hardware block
2639  * @id: profile tracking ID
2640  * @cntxt: context
2641  */
2642 enum ice_status
2643 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
2644 {
2645 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2646 	struct ice_prof_map *entry;
2647 
2648 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2649 	entry = ice_search_prof_id(hw, blk, id);
2650 	if (entry) {
2651 		entry->context = cntxt;
2652 		status = ICE_SUCCESS;
2653 	}
2654 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2655 	return status;
2656 }
2657 
2658 /**
2659  * ice_get_prof_context - Get context for a given profile
2660  * @hw: pointer to the HW struct
2661  * @blk: hardware block
2662  * @id: profile tracking ID
2663  * @cntxt: pointer to variable to receive the context
2664  */
2665 enum ice_status
2666 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
2667 {
2668 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2669 	struct ice_prof_map *entry;
2670 
2671 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2672 	entry = ice_search_prof_id(hw, blk, id);
2673 	if (entry) {
2674 		*cntxt = entry->context;
2675 		status = ICE_SUCCESS;
2676 	}
2677 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2678 	return status;
2679 }
2680 
2681 /**
2682  * ice_vsig_prof_id_count - count profiles in a VSIG
2683  * @hw: pointer to the HW struct
2684  * @blk: hardware block
2685  * @vsig: VSIG to remove the profile from
2686  */
2687 static u16
2688 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2689 {
2690 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
2691 	struct ice_vsig_prof *p;
2692 
2693 	LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2694 			    ice_vsig_prof, list)
2695 		count++;
2696 
2697 	return count;
2698 }
2699 
2700 /**
2701  * ice_rel_tcam_idx - release a TCAM index
2702  * @hw: pointer to the HW struct
2703  * @blk: hardware block
2704  * @idx: the index to release
2705  */
2706 static enum ice_status
2707 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
2708 {
2709 	/* Masks to invoke a never match entry */
2710 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2711 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
2712 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
2713 	enum ice_status status;
2714 
2715 	/* write the TCAM entry */
2716 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
2717 				      dc_msk, nm_msk);
2718 	if (status)
2719 		return status;
2720 
2721 	/* release the TCAM entry */
2722 	status = ice_free_tcam_ent(hw, blk, idx);
2723 
2724 	return status;
2725 }
2726 
2727 /**
2728  * ice_rem_prof_id - remove one profile from a VSIG
2729  * @hw: pointer to the HW struct
2730  * @blk: hardware block
2731  * @prof: pointer to profile structure to remove
2732  */
2733 static enum ice_status
2734 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
2735 		struct ice_vsig_prof *prof)
2736 {
2737 	enum ice_status status;
2738 	u16 i;
2739 
2740 	for (i = 0; i < prof->tcam_count; i++)
2741 		if (prof->tcam[i].in_use) {
2742 			prof->tcam[i].in_use = false;
2743 			status = ice_rel_tcam_idx(hw, blk,
2744 						  prof->tcam[i].tcam_idx);
2745 			if (status)
2746 				return ICE_ERR_HW_TABLE;
2747 		}
2748 
2749 	return ICE_SUCCESS;
2750 }
2751 
2752 /**
2753  * ice_rem_vsig - remove VSIG
2754  * @hw: pointer to the HW struct
2755  * @blk: hardware block
2756  * @vsig: the VSIG to remove
2757  * @chg: the change list
2758  */
2759 static enum ice_status
2760 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
2761 	     struct LIST_HEAD_TYPE *chg)
2762 {
2763 	u16 idx = vsig & ICE_VSIG_IDX_M;
2764 	struct ice_vsig_vsi *vsi_cur;
2765 	struct ice_vsig_prof *d, *t;
2766 
2767 	/* remove TCAM entries */
2768 	LIST_FOR_EACH_ENTRY_SAFE(d, t,
2769 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2770 				 ice_vsig_prof, list) {
2771 		enum ice_status status;
2772 
2773 		status = ice_rem_prof_id(hw, blk, d);
2774 		if (status)
2775 			return status;
2776 
2777 		LIST_DEL(&d->list);
2778 		ice_free(hw, d);
2779 	}
2780 
2781 	/* Move all VSIS associated with this VSIG to the default VSIG */
2782 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2783 	/* If the VSIG has at least 1 VSI then iterate through the list
2784 	 * and remove the VSIs before deleting the group.
2785 	 */
2786 	if (vsi_cur)
2787 		do {
2788 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2789 			struct ice_chs_chg *p;
2790 
2791 			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
2792 			if (!p)
2793 				return ICE_ERR_NO_MEMORY;
2794 
2795 			p->type = ICE_VSIG_REM;
2796 			p->orig_vsig = vsig;
2797 			p->vsig = ICE_DEFAULT_VSIG;
2798 			p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis);
2799 
2800 			LIST_ADD(&p->list_entry, chg);
2801 
2802 			vsi_cur = tmp;
2803 		} while (vsi_cur);
2804 
2805 	return ice_vsig_free(hw, blk, vsig);
2806 }
2807 
2808 /**
2809  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
2810  * @hw: pointer to the HW struct
2811  * @blk: hardware block
2812  * @vsig: VSIG to remove the profile from
2813  * @hdl: profile handle indicating which profile to remove
2814  * @chg: list to receive a record of changes
2815  */
2816 static enum ice_status
2817 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
2818 		     struct LIST_HEAD_TYPE *chg)
2819 {
2820 	u16 idx = vsig & ICE_VSIG_IDX_M;
2821 	struct ice_vsig_prof *p, *t;
2822 
2823 	LIST_FOR_EACH_ENTRY_SAFE(p, t,
2824 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2825 				 ice_vsig_prof, list)
2826 		if (p->profile_cookie == hdl) {
2827 			enum ice_status status;
2828 
2829 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
2830 				/* this is the last profile, remove the VSIG */
2831 				return ice_rem_vsig(hw, blk, vsig, chg);
2832 
2833 			status = ice_rem_prof_id(hw, blk, p);
2834 			if (!status) {
2835 				LIST_DEL(&p->list);
2836 				ice_free(hw, p);
2837 			}
2838 			return status;
2839 		}
2840 
2841 	return ICE_ERR_DOES_NOT_EXIST;
2842 }
2843 
2844 /**
2845  * ice_rem_flow_all - remove all flows with a particular profile
2846  * @hw: pointer to the HW struct
2847  * @blk: hardware block
2848  * @id: profile tracking ID
2849  */
2850 static enum ice_status
2851 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
2852 {
2853 	struct ice_chs_chg *del, *tmp;
2854 	enum ice_status status;
2855 	struct LIST_HEAD_TYPE chg;
2856 	u16 i;
2857 
2858 	INIT_LIST_HEAD(&chg);
2859 
2860 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2861 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
2862 			if (ice_has_prof_vsig(hw, blk, i, id)) {
2863 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
2864 							      &chg);
2865 				if (status)
2866 					goto err_ice_rem_flow_all;
2867 			}
2868 		}
2869 
2870 	status = ice_upd_prof_hw(hw, blk, &chg);
2871 
2872 err_ice_rem_flow_all:
2873 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
2874 		LIST_DEL(&del->list_entry);
2875 		ice_free(hw, del);
2876 	}
2877 
2878 	return status;
2879 }
2880 
2881 /**
2882  * ice_rem_prof - remove profile
2883  * @hw: pointer to the HW struct
2884  * @blk: hardware block
2885  * @id: profile tracking ID
2886  *
2887  * This will remove the profile specified by the ID parameter, which was
2888  * previously created through ice_add_prof. If any existing entries
2889  * are associated with this profile, they will be removed as well.
2890  */
2891 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
2892 {
2893 	struct ice_prof_map *pmap;
2894 	enum ice_status status;
2895 
2896 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2897 
2898 	pmap = ice_search_prof_id(hw, blk, id);
2899 	if (!pmap) {
2900 		status = ICE_ERR_DOES_NOT_EXIST;
2901 		goto err_ice_rem_prof;
2902 	}
2903 
2904 	/* remove all flows with this profile */
2905 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
2906 	if (status)
2907 		goto err_ice_rem_prof;
2908 
2909 	/* dereference profile, and possibly remove */
2910 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
2911 
2912 	LIST_DEL(&pmap->list);
2913 	ice_free(hw, pmap);
2914 
2915 err_ice_rem_prof:
2916 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2917 	return status;
2918 }
2919 
2920 /**
2921  * ice_get_prof - get profile
2922  * @hw: pointer to the HW struct
2923  * @blk: hardware block
2924  * @hdl: profile handle
2925  * @chg: change list
2926  */
2927 static enum ice_status
2928 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
2929 	     struct LIST_HEAD_TYPE *chg)
2930 {
2931 	enum ice_status status = ICE_SUCCESS;
2932 	struct ice_prof_map *map;
2933 	struct ice_chs_chg *p;
2934 	u16 i;
2935 
2936 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
2937 	/* Get the details on the profile specified by the handle ID */
2938 	map = ice_search_prof_id(hw, blk, hdl);
2939 	if (!map) {
2940 		status = ICE_ERR_DOES_NOT_EXIST;
2941 		goto err_ice_get_prof;
2942 	}
2943 
2944 	for (i = 0; i < map->ptg_cnt; i++)
2945 		if (!hw->blk[blk].es.written[map->prof_id]) {
2946 			/* add ES to change list */
2947 			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
2948 			if (!p) {
2949 				status = ICE_ERR_NO_MEMORY;
2950 				goto err_ice_get_prof;
2951 			}
2952 
2953 			p->type = ICE_PTG_ES_ADD;
2954 			p->ptype = 0;
2955 			p->ptg = map->ptg[i];
2956 			p->add_ptg = 0;
2957 
2958 			p->add_prof = 1;
2959 			p->prof_id = map->prof_id;
2960 
2961 			hw->blk[blk].es.written[map->prof_id] = true;
2962 
2963 			LIST_ADD(&p->list_entry, chg);
2964 		}
2965 
2966 err_ice_get_prof:
2967 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
2968 	/* let caller clean up the change list */
2969 	return status;
2970 }
2971 
2972 /**
2973  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
2974  * @hw: pointer to the HW struct
2975  * @blk: hardware block
2976  * @vsig: VSIG from which to copy the list
2977  * @lst: output list
2978  *
2979  * This routine makes a copy of the list of profiles in the specified VSIG.
2980  */
2981 static enum ice_status
2982 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
2983 		   struct LIST_HEAD_TYPE *lst)
2984 {
2985 	struct ice_vsig_prof *ent1, *ent2;
2986 	u16 idx = vsig & ICE_VSIG_IDX_M;
2987 
2988 	LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2989 			    ice_vsig_prof, list) {
2990 		struct ice_vsig_prof *p;
2991 
2992 		/* copy to the input list */
2993 		p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
2994 						       ICE_NONDMA_TO_NONDMA);
2995 		if (!p)
2996 			goto err_ice_get_profs_vsig;
2997 
2998 		LIST_ADD_TAIL(&p->list, lst);
2999 	}
3000 
3001 	return ICE_SUCCESS;
3002 
3003 err_ice_get_profs_vsig:
3004 	LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
3005 		LIST_DEL(&ent1->list);
3006 		ice_free(hw, ent1);
3007 	}
3008 
3009 	return ICE_ERR_NO_MEMORY;
3010 }
3011 
3012 /**
3013  * ice_add_prof_to_lst - add profile entry to a list
3014  * @hw: pointer to the HW struct
3015  * @blk: hardware block
3016  * @lst: the list to be added to
3017  * @hdl: profile handle of entry to add
3018  */
3019 static enum ice_status
3020 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
3021 		    struct LIST_HEAD_TYPE *lst, u64 hdl)
3022 {
3023 	enum ice_status status = ICE_SUCCESS;
3024 	struct ice_prof_map *map;
3025 	struct ice_vsig_prof *p;
3026 	u16 i;
3027 
3028 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
3029 	map = ice_search_prof_id(hw, blk, hdl);
3030 	if (!map) {
3031 		status = ICE_ERR_DOES_NOT_EXIST;
3032 		goto err_ice_add_prof_to_lst;
3033 	}
3034 
3035 	p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
3036 	if (!p) {
3037 		status = ICE_ERR_NO_MEMORY;
3038 		goto err_ice_add_prof_to_lst;
3039 	}
3040 
3041 	p->profile_cookie = map->profile_cookie;
3042 	p->prof_id = map->prof_id;
3043 	p->tcam_count = map->ptg_cnt;
3044 
3045 	for (i = 0; i < map->ptg_cnt; i++) {
3046 		p->tcam[i].prof_id = map->prof_id;
3047 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
3048 		p->tcam[i].ptg = map->ptg[i];
3049 	}
3050 
3051 	LIST_ADD(&p->list, lst);
3052 
3053 err_ice_add_prof_to_lst:
3054 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
3055 	return status;
3056 }
3057 
3058 /**
3059  * ice_move_vsi - move VSI to another VSIG
3060  * @hw: pointer to the HW struct
3061  * @blk: hardware block
3062  * @vsi: the VSI to move
3063  * @vsig: the VSIG to move the VSI to
3064  * @chg: the change list
3065  */
3066 static enum ice_status
3067 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
3068 	     struct LIST_HEAD_TYPE *chg)
3069 {
3070 	enum ice_status status;
3071 	struct ice_chs_chg *p;
3072 	u16 orig_vsig;
3073 
3074 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
3075 	if (!p)
3076 		return ICE_ERR_NO_MEMORY;
3077 
3078 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3079 	if (!status)
3080 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3081 
3082 	if (status) {
3083 		ice_free(hw, p);
3084 		return status;
3085 	}
3086 
3087 	p->type = ICE_VSI_MOVE;
3088 	p->vsi = vsi;
3089 	p->orig_vsig = orig_vsig;
3090 	p->vsig = vsig;
3091 
3092 	LIST_ADD(&p->list_entry, chg);
3093 
3094 	return ICE_SUCCESS;
3095 }
3096 
3097 /**
3098  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
3099  * @hw: pointer to the HW struct
3100  * @idx: the index of the TCAM entry to remove
3101  * @chg: the list of change structures to search
3102  */
3103 static void
3104 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
3105 {
3106 	struct ice_chs_chg *pos, *tmp;
3107 
3108 	LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
3109 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
3110 			LIST_DEL(&tmp->list_entry);
3111 			ice_free(hw, tmp);
3112 		}
3113 }
3114 
3115 /**
3116  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
3117  * @hw: pointer to the HW struct
3118  * @blk: hardware block
3119  * @enable: true to enable, false to disable
3120  * @vsig: the VSIG of the TCAM entry
3121  * @tcam: pointer the TCAM info structure of the TCAM to disable
3122  * @chg: the change list
3123  *
3124  * This function appends an enable or disable TCAM entry in the change log
3125  */
3126 static enum ice_status
3127 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
3128 		      u16 vsig, struct ice_tcam_inf *tcam,
3129 		      struct LIST_HEAD_TYPE *chg)
3130 {
3131 	enum ice_status status;
3132 	struct ice_chs_chg *p;
3133 
3134 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3135 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
3136 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
3137 
3138 	/* if disabling, free the TCAM */
3139 	if (!enable) {
3140 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
3141 
3142 		/* if we have already created a change for this TCAM entry, then
3143 		 * we need to remove that entry, in order to prevent writing to
3144 		 * a TCAM entry we no longer will have ownership of.
3145 		 */
3146 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
3147 		tcam->tcam_idx = 0;
3148 		tcam->in_use = 0;
3149 		return status;
3150 	}
3151 
3152 	/* for re-enabling, reallocate a TCAM */
3153 	status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx);
3154 	if (status)
3155 		return status;
3156 
3157 	/* add TCAM to change list */
3158 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
3159 	if (!p)
3160 		return ICE_ERR_NO_MEMORY;
3161 
3162 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
3163 				      tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
3164 				      nm_msk);
3165 	if (status)
3166 		goto err_ice_prof_tcam_ena_dis;
3167 
3168 	tcam->in_use = 1;
3169 
3170 	p->type = ICE_TCAM_ADD;
3171 	p->add_tcam_idx = true;
3172 	p->prof_id = tcam->prof_id;
3173 	p->ptg = tcam->ptg;
3174 	p->vsig = 0;
3175 	p->tcam_idx = tcam->tcam_idx;
3176 
3177 	/* log change */
3178 	LIST_ADD(&p->list_entry, chg);
3179 
3180 	return ICE_SUCCESS;
3181 
3182 err_ice_prof_tcam_ena_dis:
3183 	ice_free(hw, p);
3184 	return status;
3185 }
3186 
3187 /**
3188  * ice_adj_prof_priorities - adjust profile based on priorities
3189  * @hw: pointer to the HW struct
3190  * @blk: hardware block
3191  * @vsig: the VSIG for which to adjust profile priorities
3192  * @chg: the change list
3193  */
3194 static enum ice_status
3195 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3196 			struct LIST_HEAD_TYPE *chg)
3197 {
3198 	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
3199 	enum ice_status status = ICE_SUCCESS;
3200 	struct ice_vsig_prof *t;
3201 	u16 idx;
3202 
3203 	ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
3204 	idx = vsig & ICE_VSIG_IDX_M;
3205 
3206 	/* Priority is based on the order in which the profiles are added. The
3207 	 * newest added profile has highest priority and the oldest added
3208 	 * profile has the lowest priority. Since the profile property list for
3209 	 * a VSIG is sorted from newest to oldest, this code traverses the list
3210 	 * in order and enables the first of each PTG that it finds (that is not
3211 	 * already enabled); it also disables any duplicate PTGs that it finds
3212 	 * in the older profiles (that are currently enabled).
3213 	 */
3214 
3215 	LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3216 			    ice_vsig_prof, list) {
3217 		u16 i;
3218 
3219 		for (i = 0; i < t->tcam_count; i++) {
3220 			bool used;
3221 
3222 			/* Scan the priorities from newest to oldest.
3223 			 * Make sure that the newest profiles take priority.
3224 			 */
3225 			used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg);
3226 
3227 			if (used && t->tcam[i].in_use) {
3228 				/* need to mark this PTG as never match, as it
3229 				 * was already in use and therefore duplicate
3230 				 * (and lower priority)
3231 				 */
3232 				status = ice_prof_tcam_ena_dis(hw, blk, false,
3233 							       vsig,
3234 							       &t->tcam[i],
3235 							       chg);
3236 				if (status)
3237 					return status;
3238 			} else if (!used && !t->tcam[i].in_use) {
3239 				/* need to enable this PTG, as it in not in use
3240 				 * and not enabled (highest priority)
3241 				 */
3242 				status = ice_prof_tcam_ena_dis(hw, blk, true,
3243 							       vsig,
3244 							       &t->tcam[i],
3245 							       chg);
3246 				if (status)
3247 					return status;
3248 			}
3249 
3250 			/* keep track of used ptgs */
3251 			ice_set_bit(t->tcam[i].ptg, ptgs_used);
3252 		}
3253 	}
3254 
3255 	return status;
3256 }
3257 
3258 /**
3259  * ice_add_prof_id_vsig - add profile to VSIG
3260  * @hw: pointer to the HW struct
3261  * @blk: hardware block
3262  * @vsig: the VSIG to which this profile is to be added
3263  * @hdl: the profile handle indicating the profile to add
3264  * @rev: true to add entries to the end of the list
3265  * @chg: the change list
3266  */
3267 static enum ice_status
3268 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3269 		     bool rev, struct LIST_HEAD_TYPE *chg)
3270 {
3271 	/* Masks that ignore flags */
3272 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3273 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
3274 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
3275 	enum ice_status status = ICE_SUCCESS;
3276 	struct ice_prof_map *map;
3277 	struct ice_vsig_prof *t;
3278 	struct ice_chs_chg *p;
3279 	u16 vsig_idx, i;
3280 
3281 	/* Error, if this VSIG already has this profile */
3282 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
3283 		return ICE_ERR_ALREADY_EXISTS;
3284 
3285 	/* new VSIG profile structure */
3286 	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
3287 	if (!t)
3288 		return ICE_ERR_NO_MEMORY;
3289 
3290 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
3291 	/* Get the details on the profile specified by the handle ID */
3292 	map = ice_search_prof_id(hw, blk, hdl);
3293 	if (!map) {
3294 		status = ICE_ERR_DOES_NOT_EXIST;
3295 		goto err_ice_add_prof_id_vsig;
3296 	}
3297 
3298 	t->profile_cookie = map->profile_cookie;
3299 	t->prof_id = map->prof_id;
3300 	t->tcam_count = map->ptg_cnt;
3301 
3302 	/* create TCAM entries */
3303 	for (i = 0; i < map->ptg_cnt; i++) {
3304 		u16 tcam_idx;
3305 
3306 		/* add TCAM to change list */
3307 		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
3308 		if (!p) {
3309 			status = ICE_ERR_NO_MEMORY;
3310 			goto err_ice_add_prof_id_vsig;
3311 		}
3312 
3313 		/* allocate the TCAM entry index */
3314 		status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx);
3315 		if (status) {
3316 			ice_free(hw, p);
3317 			goto err_ice_add_prof_id_vsig;
3318 		}
3319 
3320 		t->tcam[i].ptg = map->ptg[i];
3321 		t->tcam[i].prof_id = map->prof_id;
3322 		t->tcam[i].tcam_idx = tcam_idx;
3323 		t->tcam[i].in_use = true;
3324 
3325 		p->type = ICE_TCAM_ADD;
3326 		p->add_tcam_idx = true;
3327 		p->prof_id = t->tcam[i].prof_id;
3328 		p->ptg = t->tcam[i].ptg;
3329 		p->vsig = vsig;
3330 		p->tcam_idx = t->tcam[i].tcam_idx;
3331 
3332 		/* write the TCAM entry */
3333 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
3334 					      t->tcam[i].prof_id,
3335 					      t->tcam[i].ptg, vsig, 0, 0,
3336 					      vl_msk, dc_msk, nm_msk);
3337 		if (status) {
3338 			ice_free(hw, p);
3339 			goto err_ice_add_prof_id_vsig;
3340 		}
3341 
3342 		/* log change */
3343 		LIST_ADD(&p->list_entry, chg);
3344 	}
3345 
3346 	/* add profile to VSIG */
3347 	vsig_idx = vsig & ICE_VSIG_IDX_M;
3348 	if (rev)
3349 		LIST_ADD_TAIL(&t->list,
3350 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3351 	else
3352 		LIST_ADD(&t->list,
3353 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3354 
3355 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
3356 	return status;
3357 
3358 err_ice_add_prof_id_vsig:
3359 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
3360 	/* let caller clean up the change list */
3361 	ice_free(hw, t);
3362 	return status;
3363 }
3364 
3365 /**
3366  * ice_create_prof_id_vsig - add a new VSIG with a single profile
3367  * @hw: pointer to the HW struct
3368  * @blk: hardware block
3369  * @vsi: the initial VSI that will be in VSIG
3370  * @hdl: the profile handle of the profile that will be added to the VSIG
3371  * @chg: the change list
3372  */
3373 static enum ice_status
3374 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
3375 			struct LIST_HEAD_TYPE *chg)
3376 {
3377 	enum ice_status status;
3378 	struct ice_chs_chg *p;
3379 	u16 new_vsig;
3380 
3381 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
3382 	if (!p)
3383 		return ICE_ERR_NO_MEMORY;
3384 
3385 	new_vsig = ice_vsig_alloc(hw, blk);
3386 	if (!new_vsig) {
3387 		status = ICE_ERR_HW_TABLE;
3388 		goto err_ice_create_prof_id_vsig;
3389 	}
3390 
3391 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
3392 	if (status)
3393 		goto err_ice_create_prof_id_vsig;
3394 
3395 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
3396 	if (status)
3397 		goto err_ice_create_prof_id_vsig;
3398 
3399 	p->type = ICE_VSIG_ADD;
3400 	p->vsi = vsi;
3401 	p->orig_vsig = ICE_DEFAULT_VSIG;
3402 	p->vsig = new_vsig;
3403 
3404 	LIST_ADD(&p->list_entry, chg);
3405 
3406 	return ICE_SUCCESS;
3407 
3408 err_ice_create_prof_id_vsig:
3409 	/* let caller clean up the change list */
3410 	ice_free(hw, p);
3411 	return status;
3412 }
3413 
3414 /**
3415  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
3416  * @hw: pointer to the HW struct
3417  * @blk: hardware block
3418  * @vsi: the initial VSI that will be in VSIG
3419  * @lst: the list of profile that will be added to the VSIG
3420  * @new_vsig: return of new VSIG
3421  * @chg: the change list
3422  */
3423 static enum ice_status
3424 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
3425 			 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
3426 			 struct LIST_HEAD_TYPE *chg)
3427 {
3428 	struct ice_vsig_prof *t;
3429 	enum ice_status status;
3430 	u16 vsig;
3431 
3432 	vsig = ice_vsig_alloc(hw, blk);
3433 	if (!vsig)
3434 		return ICE_ERR_HW_TABLE;
3435 
3436 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
3437 	if (status)
3438 		return status;
3439 
3440 	LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
3441 		/* Reverse the order here since we are copying the list */
3442 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
3443 					      true, chg);
3444 		if (status)
3445 			return status;
3446 	}
3447 
3448 	*new_vsig = vsig;
3449 
3450 	return ICE_SUCCESS;
3451 }
3452 
3453 /**
3454  * ice_find_prof_vsig - find a VSIG with a specific profile handle
3455  * @hw: pointer to the HW struct
3456  * @blk: hardware block
3457  * @hdl: the profile handle of the profile to search for
3458  * @vsig: returns the VSIG with the matching profile
3459  */
3460 static bool
3461 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
3462 {
3463 	struct ice_vsig_prof *t;
3464 	enum ice_status status;
3465 	struct LIST_HEAD_TYPE lst;
3466 
3467 	INIT_LIST_HEAD(&lst);
3468 
3469 	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
3470 	if (!t)
3471 		return false;
3472 
3473 	t->profile_cookie = hdl;
3474 	LIST_ADD(&t->list, &lst);
3475 
3476 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
3477 
3478 	LIST_DEL(&t->list);
3479 	ice_free(hw, t);
3480 
3481 	return status == ICE_SUCCESS;
3482 }
3483 
3484 /**
3485  * ice_add_vsi_flow - add VSI flow
3486  * @hw: pointer to the HW struct
3487  * @blk: hardware block
3488  * @vsi: input VSI
3489  * @vsig: target VSIG to include the input VSI
3490  *
3491  * Calling this function will add the VSI to a given VSIG and
3492  * update the HW tables accordingly. This call can be used to
3493  * add multiple VSIs to a VSIG if we know beforehand that those
3494  * VSIs have the same characteristics of the VSIG. This will
3495  * save time in generating a new VSIG and TCAMs till a match is
3496  * found and subsequent rollback when a matching VSIG is found.
3497  */
3498 enum ice_status
3499 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3500 {
3501 	struct ice_chs_chg *tmp, *del;
3502 	struct LIST_HEAD_TYPE chg;
3503 	enum ice_status status;
3504 
3505 	/* if target VSIG is default the move is invalid */
3506 	if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
3507 		return ICE_ERR_PARAM;
3508 
3509 	INIT_LIST_HEAD(&chg);
3510 
3511 	/* move VSI to the VSIG that matches */
3512 	status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3513 	/* update hardware if success */
3514 	if (!status)
3515 		status = ice_upd_prof_hw(hw, blk, &chg);
3516 
3517 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
3518 		LIST_DEL(&del->list_entry);
3519 		ice_free(hw, del);
3520 	}
3521 
3522 	return status;
3523 }
3524 
3525 /**
3526  * ice_add_prof_id_flow - add profile flow
3527  * @hw: pointer to the HW struct
3528  * @blk: hardware block
3529  * @vsi: the VSI to enable with the profile specified by ID
3530  * @hdl: profile handle
3531  *
3532  * Calling this function will update the hardware tables to enable the
3533  * profile indicated by the ID parameter for the VSIs specified in the VSI
3534  * array. Once successfully called, the flow will be enabled.
3535  */
3536 enum ice_status
3537 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
3538 {
3539 	struct ice_vsig_prof *tmp1, *del1;
3540 	struct ice_chs_chg *tmp, *del;
3541 	struct LIST_HEAD_TYPE union_lst;
3542 	enum ice_status status;
3543 	struct LIST_HEAD_TYPE chg;
3544 	u16 vsig;
3545 
3546 	INIT_LIST_HEAD(&union_lst);
3547 	INIT_LIST_HEAD(&chg);
3548 
3549 	/* Get profile */
3550 	status = ice_get_prof(hw, blk, hdl, &chg);
3551 	if (status)
3552 		return status;
3553 
3554 	/* determine if VSI is already part of a VSIG */
3555 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
3556 	if (!status && vsig) {
3557 		bool only_vsi;
3558 		u16 or_vsig;
3559 		u16 ref;
3560 
3561 		/* found in VSIG */
3562 		or_vsig = vsig;
3563 
3564 		/* make sure that there is no overlap/conflict between the new
3565 		 * characteristics and the existing ones; we don't support that
3566 		 * scenario
3567 		 */
3568 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
3569 			status = ICE_ERR_ALREADY_EXISTS;
3570 			goto err_ice_add_prof_id_flow;
3571 		}
3572 
3573 		/* last VSI in the VSIG? */
3574 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
3575 		if (status)
3576 			goto err_ice_add_prof_id_flow;
3577 		only_vsi = (ref == 1);
3578 
3579 		/* create a union of the current profiles and the one being
3580 		 * added
3581 		 */
3582 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
3583 		if (status)
3584 			goto err_ice_add_prof_id_flow;
3585 
3586 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
3587 		if (status)
3588 			goto err_ice_add_prof_id_flow;
3589 
3590 		/* search for an existing VSIG with an exact charc match */
3591 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
3592 		if (!status) {
3593 			/* move VSI to the VSIG that matches */
3594 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3595 			if (status)
3596 				goto err_ice_add_prof_id_flow;
3597 
3598 			/* VSI has been moved out of or_vsig. If the or_vsig had
3599 			 * only that VSI it is now empty and can be removed.
3600 			 */
3601 			if (only_vsi) {
3602 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
3603 				if (status)
3604 					goto err_ice_add_prof_id_flow;
3605 			}
3606 		} else if (only_vsi) {
3607 			/* If the original VSIG only contains one VSI, then it
3608 			 * will be the requesting VSI. In this case the VSI is
3609 			 * not sharing entries and we can simply add the new
3610 			 * profile to the VSIG.
3611 			 */
3612 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
3613 						      &chg);
3614 			if (status)
3615 				goto err_ice_add_prof_id_flow;
3616 
3617 			/* Adjust priorities */
3618 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
3619 			if (status)
3620 				goto err_ice_add_prof_id_flow;
3621 		} else {
3622 			/* No match, so we need a new VSIG */
3623 			status = ice_create_vsig_from_lst(hw, blk, vsi,
3624 							  &union_lst, &vsig,
3625 							  &chg);
3626 			if (status)
3627 				goto err_ice_add_prof_id_flow;
3628 
3629 			/* Adjust priorities */
3630 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
3631 			if (status)
3632 				goto err_ice_add_prof_id_flow;
3633 		}
3634 	} else {
3635 		/* need to find or add a VSIG */
3636 		/* search for an existing VSIG with an exact charc match */
3637 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
3638 			/* found an exact match */
3639 			/* add or move VSI to the VSIG that matches */
3640 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3641 			if (status)
3642 				goto err_ice_add_prof_id_flow;
3643 		} else {
3644 			/* we did not find an exact match */
3645 			/* we need to add a VSIG */
3646 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
3647 							 &chg);
3648 			if (status)
3649 				goto err_ice_add_prof_id_flow;
3650 		}
3651 	}
3652 
3653 	/* update hardware */
3654 	if (!status)
3655 		status = ice_upd_prof_hw(hw, blk, &chg);
3656 
3657 err_ice_add_prof_id_flow:
3658 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
3659 		LIST_DEL(&del->list_entry);
3660 		ice_free(hw, del);
3661 	}
3662 
3663 	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
3664 		LIST_DEL(&del1->list);
3665 		ice_free(hw, del1);
3666 	}
3667 
3668 	return status;
3669 }
3670 
3671 /**
3672  * ice_add_flow - add flow
3673  * @hw: pointer to the HW struct
3674  * @blk: hardware block
3675  * @vsi: array of VSIs to enable with the profile specified by ID
3676  * @count: number of elements in the VSI array
3677  * @id: profile tracking ID
3678  *
3679  * Calling this function will update the hardware tables to enable the
3680  * profile indicated by the ID parameter for the VSIs specified in the VSI
3681  * array. Once successfully called, the flow will be enabled.
3682  */
3683 enum ice_status
3684 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
3685 	     u64 id)
3686 {
3687 	u16 i;
3688 
3689 	for (i = 0; i < count; i++) {
3690 		enum ice_status status;
3691 
3692 		status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
3693 		if (status)
3694 			return status;
3695 	}
3696 
3697 	return ICE_SUCCESS;
3698 }
3699 
3700 /**
3701  * ice_rem_prof_from_list - remove a profile from list
3702  * @hw: pointer to the HW struct
3703  * @lst: list to remove the profile from
3704  * @hdl: the profile handle indicating the profile to remove
3705  */
3706 static enum ice_status
3707 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
3708 {
3709 	struct ice_vsig_prof *ent, *tmp;
3710 
3711 	LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
3712 		if (ent->profile_cookie == hdl) {
3713 			LIST_DEL(&ent->list);
3714 			ice_free(hw, ent);
3715 			return ICE_SUCCESS;
3716 		}
3717 
3718 	return ICE_ERR_DOES_NOT_EXIST;
3719 }
3720 
3721 /**
3722  * ice_rem_prof_id_flow - remove flow
3723  * @hw: pointer to the HW struct
3724  * @blk: hardware block
3725  * @vsi: the VSI from which to remove the profile specified by ID
3726  * @hdl: profile tracking handle
3727  *
3728  * Calling this function will update the hardware tables to remove the
3729  * profile indicated by the ID parameter for the VSIs specified in the VSI
3730  * array. Once successfully called, the flow will be disabled.
3731  */
3732 enum ice_status
3733 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
3734 {
3735 	struct ice_vsig_prof *tmp1, *del1;
3736 	struct ice_chs_chg *tmp, *del;
3737 	struct LIST_HEAD_TYPE chg, copy;
3738 	enum ice_status status;
3739 	u16 vsig;
3740 
3741 	INIT_LIST_HEAD(&copy);
3742 	INIT_LIST_HEAD(&chg);
3743 
3744 	/* determine if VSI is already part of a VSIG */
3745 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
3746 	if (!status && vsig) {
3747 		bool last_profile;
3748 		bool only_vsi;
3749 		u16 ref;
3750 
3751 		/* found in VSIG */
3752 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
3753 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
3754 		if (status)
3755 			goto err_ice_rem_prof_id_flow;
3756 		only_vsi = (ref == 1);
3757 
3758 		if (only_vsi) {
3759 			/* If the original VSIG only contains one reference,
3760 			 * which will be the requesting VSI, then the VSI is not
3761 			 * sharing entries and we can simply remove the specific
3762 			 * characteristics from the VSIG.
3763 			 */
3764 
3765 			if (last_profile) {
3766 				/* If there are no profiles left for this VSIG,
3767 				 * then simply remove the VSIG.
3768 				 */
3769 				status = ice_rem_vsig(hw, blk, vsig, &chg);
3770 				if (status)
3771 					goto err_ice_rem_prof_id_flow;
3772 			} else {
3773 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
3774 							      hdl, &chg);
3775 				if (status)
3776 					goto err_ice_rem_prof_id_flow;
3777 
3778 				/* Adjust priorities */
3779 				status = ice_adj_prof_priorities(hw, blk, vsig,
3780 								 &chg);
3781 				if (status)
3782 					goto err_ice_rem_prof_id_flow;
3783 			}
3784 
3785 		} else {
3786 			/* Make a copy of the VSIG's list of Profiles */
3787 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
3788 			if (status)
3789 				goto err_ice_rem_prof_id_flow;
3790 
3791 			/* Remove specified profile entry from the list */
3792 			status = ice_rem_prof_from_list(hw, &copy, hdl);
3793 			if (status)
3794 				goto err_ice_rem_prof_id_flow;
3795 
3796 			if (LIST_EMPTY(&copy)) {
3797 				status = ice_move_vsi(hw, blk, vsi,
3798 						      ICE_DEFAULT_VSIG, &chg);
3799 				if (status)
3800 					goto err_ice_rem_prof_id_flow;
3801 
3802 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
3803 							    &vsig)) {
3804 				/* found an exact match */
3805 				/* add or move VSI to the VSIG that matches */
3806 				/* Search for a VSIG with a matching profile
3807 				 * list
3808 				 */
3809 
3810 				/* Found match, move VSI to the matching VSIG */
3811 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
3812 				if (status)
3813 					goto err_ice_rem_prof_id_flow;
3814 			} else {
3815 				/* since no existing VSIG supports this
3816 				 * characteristic pattern, we need to create a
3817 				 * new VSIG and TCAM entries
3818 				 */
3819 				status = ice_create_vsig_from_lst(hw, blk, vsi,
3820 								  &copy, &vsig,
3821 								  &chg);
3822 				if (status)
3823 					goto err_ice_rem_prof_id_flow;
3824 
3825 				/* Adjust priorities */
3826 				status = ice_adj_prof_priorities(hw, blk, vsig,
3827 								 &chg);
3828 				if (status)
3829 					goto err_ice_rem_prof_id_flow;
3830 			}
3831 		}
3832 	} else {
3833 		status = ICE_ERR_DOES_NOT_EXIST;
3834 	}
3835 
3836 	/* update hardware tables */
3837 	if (!status)
3838 		status = ice_upd_prof_hw(hw, blk, &chg);
3839 
3840 err_ice_rem_prof_id_flow:
3841 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
3842 		LIST_DEL(&del->list_entry);
3843 		ice_free(hw, del);
3844 	}
3845 
3846 	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &copy, ice_vsig_prof, list) {
3847 		LIST_DEL(&del1->list);
3848 		ice_free(hw, del1);
3849 	}
3850 
3851 	return status;
3852 }
3853 
3854 /**
3855  * ice_rem_flow - remove flow
3856  * @hw: pointer to the HW struct
3857  * @blk: hardware block
3858  * @vsi: array of VSIs from which to remove the profile specified by ID
3859  * @count: number of elements in the VSI array
3860  * @id: profile tracking ID
3861  *
3862  * The function will remove flows from the specified VSIs that were enabled
3863  * using ice_add_flow. The ID value will indicated which profile will be
3864  * removed. Once successfully called, the flow will be disabled.
3865  */
3866 enum ice_status
3867 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
3868 	     u64 id)
3869 {
3870 	u16 i;
3871 
3872 	for (i = 0; i < count; i++) {
3873 		enum ice_status status;
3874 
3875 		status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
3876 		if (status)
3877 			return status;
3878 	}
3879 
3880 	return ICE_SUCCESS;
3881 }
3882