xref: /freebsd/sys/dev/ice/ice_flex_pipe.c (revision 7e1d3eefd410ca0fbae5a217422821244c3eeee4)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_flex_pipe.h"
35 #include "ice_protocol_type.h"
36 #include "ice_flow.h"
37 
38 /* To support tunneling entries by PF, the package will append the PF number to
39  * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
40  */
41 #define ICE_TNL_PRE	"TNL_"
42 static const struct ice_tunnel_type_scan tnls[] = {
43 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
44 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
45 	{ TNL_LAST,		"" }
46 };
47 
48 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
49 	/* SWITCH */
50 	{
51 		ICE_SID_XLT0_SW,
52 		ICE_SID_XLT_KEY_BUILDER_SW,
53 		ICE_SID_XLT1_SW,
54 		ICE_SID_XLT2_SW,
55 		ICE_SID_PROFID_TCAM_SW,
56 		ICE_SID_PROFID_REDIR_SW,
57 		ICE_SID_FLD_VEC_SW,
58 		ICE_SID_CDID_KEY_BUILDER_SW,
59 		ICE_SID_CDID_REDIR_SW
60 	},
61 
62 	/* ACL */
63 	{
64 		ICE_SID_XLT0_ACL,
65 		ICE_SID_XLT_KEY_BUILDER_ACL,
66 		ICE_SID_XLT1_ACL,
67 		ICE_SID_XLT2_ACL,
68 		ICE_SID_PROFID_TCAM_ACL,
69 		ICE_SID_PROFID_REDIR_ACL,
70 		ICE_SID_FLD_VEC_ACL,
71 		ICE_SID_CDID_KEY_BUILDER_ACL,
72 		ICE_SID_CDID_REDIR_ACL
73 	},
74 
75 	/* FD */
76 	{
77 		ICE_SID_XLT0_FD,
78 		ICE_SID_XLT_KEY_BUILDER_FD,
79 		ICE_SID_XLT1_FD,
80 		ICE_SID_XLT2_FD,
81 		ICE_SID_PROFID_TCAM_FD,
82 		ICE_SID_PROFID_REDIR_FD,
83 		ICE_SID_FLD_VEC_FD,
84 		ICE_SID_CDID_KEY_BUILDER_FD,
85 		ICE_SID_CDID_REDIR_FD
86 	},
87 
88 	/* RSS */
89 	{
90 		ICE_SID_XLT0_RSS,
91 		ICE_SID_XLT_KEY_BUILDER_RSS,
92 		ICE_SID_XLT1_RSS,
93 		ICE_SID_XLT2_RSS,
94 		ICE_SID_PROFID_TCAM_RSS,
95 		ICE_SID_PROFID_REDIR_RSS,
96 		ICE_SID_FLD_VEC_RSS,
97 		ICE_SID_CDID_KEY_BUILDER_RSS,
98 		ICE_SID_CDID_REDIR_RSS
99 	},
100 
101 	/* PE */
102 	{
103 		ICE_SID_XLT0_PE,
104 		ICE_SID_XLT_KEY_BUILDER_PE,
105 		ICE_SID_XLT1_PE,
106 		ICE_SID_XLT2_PE,
107 		ICE_SID_PROFID_TCAM_PE,
108 		ICE_SID_PROFID_REDIR_PE,
109 		ICE_SID_FLD_VEC_PE,
110 		ICE_SID_CDID_KEY_BUILDER_PE,
111 		ICE_SID_CDID_REDIR_PE
112 	}
113 };
114 
115 /**
116  * ice_sect_id - returns section ID
117  * @blk: block type
118  * @sect: section type
119  *
120  * This helper function returns the proper section ID given a block type and a
121  * section type.
122  */
123 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
124 {
125 	return ice_sect_lkup[blk][sect];
126 }
127 
128 /**
129  * ice_pkg_val_buf
130  * @buf: pointer to the ice buffer
131  *
132  * This helper function validates a buffer's header.
133  */
134 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
135 {
136 	struct ice_buf_hdr *hdr;
137 	u16 section_count;
138 	u16 data_end;
139 
140 	hdr = (struct ice_buf_hdr *)buf->buf;
141 	/* verify data */
142 	section_count = LE16_TO_CPU(hdr->section_count);
143 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
144 		return NULL;
145 
146 	data_end = LE16_TO_CPU(hdr->data_end);
147 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
148 		return NULL;
149 
150 	return hdr;
151 }
152 
153 /**
154  * ice_find_buf_table
155  * @ice_seg: pointer to the ice segment
156  *
157  * Returns the address of the buffer table within the ice segment.
158  */
159 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
160 {
161 	struct ice_nvm_table *nvms;
162 
163 	nvms = (struct ice_nvm_table *)
164 		(ice_seg->device_table +
165 		 LE32_TO_CPU(ice_seg->device_table_count));
166 
167 	return (_FORCE_ struct ice_buf_table *)
168 		(nvms->vers + LE32_TO_CPU(nvms->table_count));
169 }
170 
171 /**
172  * ice_pkg_enum_buf
173  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174  * @state: pointer to the enum state
175  *
176  * This function will enumerate all the buffers in the ice segment. The first
177  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
178  * ice_seg is set to NULL which continues the enumeration. When the function
179  * returns a NULL pointer, then the end of the buffers has been reached, or an
180  * unexpected value has been detected (for example an invalid section count or
181  * an invalid buffer end value).
182  */
183 static struct ice_buf_hdr *
184 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
185 {
186 	if (ice_seg) {
187 		state->buf_table = ice_find_buf_table(ice_seg);
188 		if (!state->buf_table)
189 			return NULL;
190 
191 		state->buf_idx = 0;
192 		return ice_pkg_val_buf(state->buf_table->buf_array);
193 	}
194 
195 	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
196 		return ice_pkg_val_buf(state->buf_table->buf_array +
197 				       state->buf_idx);
198 	else
199 		return NULL;
200 }
201 
202 /**
203  * ice_pkg_advance_sect
204  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
205  * @state: pointer to the enum state
206  *
207  * This helper function will advance the section within the ice segment,
208  * also advancing the buffer if needed.
209  */
210 static bool
211 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
212 {
213 	if (!ice_seg && !state->buf)
214 		return false;
215 
216 	if (!ice_seg && state->buf)
217 		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
218 			return true;
219 
220 	state->buf = ice_pkg_enum_buf(ice_seg, state);
221 	if (!state->buf)
222 		return false;
223 
224 	/* start of new buffer, reset section index */
225 	state->sect_idx = 0;
226 	return true;
227 }
228 
229 /**
230  * ice_pkg_enum_section
231  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
232  * @state: pointer to the enum state
233  * @sect_type: section type to enumerate
234  *
235  * This function will enumerate all the sections of a particular type in the
236  * ice segment. The first call is made with the ice_seg parameter non-NULL;
237  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
238  * When the function returns a NULL pointer, then the end of the matching
239  * sections has been reached.
240  */
241 static void *
242 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
243 		     u32 sect_type)
244 {
245 	u16 offset, size;
246 
247 	if (ice_seg)
248 		state->type = sect_type;
249 
250 	if (!ice_pkg_advance_sect(ice_seg, state))
251 		return NULL;
252 
253 	/* scan for next matching section */
254 	while (state->buf->section_entry[state->sect_idx].type !=
255 	       CPU_TO_LE32(state->type))
256 		if (!ice_pkg_advance_sect(NULL, state))
257 			return NULL;
258 
259 	/* validate section */
260 	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
261 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
262 		return NULL;
263 
264 	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
265 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
266 		return NULL;
267 
268 	/* make sure the section fits in the buffer */
269 	if (offset + size > ICE_PKG_BUF_SIZE)
270 		return NULL;
271 
272 	state->sect_type =
273 		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
274 
275 	/* calc pointer to this section */
276 	state->sect = ((u8 *)state->buf) +
277 		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
278 
279 	return state->sect;
280 }
281 
282 /**
283  * ice_pkg_enum_entry
284  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
285  * @state: pointer to the enum state
286  * @sect_type: section type to enumerate
287  * @offset: pointer to variable that receives the offset in the table (optional)
288  * @handler: function that handles access to the entries into the section type
289  *
290  * This function will enumerate all the entries in particular section type in
291  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
292  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
293  * When the function returns a NULL pointer, then the end of the entries has
294  * been reached.
295  *
296  * Since each section may have a different header and entry size, the handler
297  * function is needed to determine the number and location entries in each
298  * section.
299  *
300  * The offset parameter is optional, but should be used for sections that
301  * contain an offset for each section table. For such cases, the section handler
302  * function must return the appropriate offset + index to give the absolution
303  * offset for each entry. For example, if the base for a section's header
304  * indicates a base offset of 10, and the index for the entry is 2, then
305  * section handler function should set the offset to 10 + 2 = 12.
306  */
307 static void *
308 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
309 		   u32 sect_type, u32 *offset,
310 		   void *(*handler)(u32 sect_type, void *section,
311 				    u32 index, u32 *offset))
312 {
313 	void *entry;
314 
315 	if (ice_seg) {
316 		if (!handler)
317 			return NULL;
318 
319 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
320 			return NULL;
321 
322 		state->entry_idx = 0;
323 		state->handler = handler;
324 	} else {
325 		state->entry_idx++;
326 	}
327 
328 	if (!state->handler)
329 		return NULL;
330 
331 	/* get entry */
332 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
333 			       offset);
334 	if (!entry) {
335 		/* end of a section, look for another section of this type */
336 		if (!ice_pkg_enum_section(NULL, state, 0))
337 			return NULL;
338 
339 		state->entry_idx = 0;
340 		entry = state->handler(state->sect_type, state->sect,
341 				       state->entry_idx, offset);
342 	}
343 
344 	return entry;
345 }
346 
347 /**
348  * ice_boost_tcam_handler
349  * @sect_type: section type
350  * @section: pointer to section
351  * @index: index of the boost TCAM entry to be returned
352  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
353  *
354  * This is a callback function that can be passed to ice_pkg_enum_entry.
355  * Handles enumeration of individual boost TCAM entries.
356  */
357 static void *
358 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
359 {
360 	struct ice_boost_tcam_section *boost;
361 
362 	if (!section)
363 		return NULL;
364 
365 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
366 		return NULL;
367 
368 	/* cppcheck-suppress nullPointer */
369 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
370 		return NULL;
371 
372 	if (offset)
373 		*offset = 0;
374 
375 	boost = (struct ice_boost_tcam_section *)section;
376 	if (index >= LE16_TO_CPU(boost->count))
377 		return NULL;
378 
379 	return boost->tcam + index;
380 }
381 
382 /**
383  * ice_find_boost_entry
384  * @ice_seg: pointer to the ice segment (non-NULL)
385  * @addr: Boost TCAM address of entry to search for
386  * @entry: returns pointer to the entry
387  *
388  * Finds a particular Boost TCAM entry and returns a pointer to that entry
389  * if it is found. The ice_seg parameter must not be NULL since the first call
390  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
391  */
392 static enum ice_status
393 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
394 		     struct ice_boost_tcam_entry **entry)
395 {
396 	struct ice_boost_tcam_entry *tcam;
397 	struct ice_pkg_enum state;
398 
399 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
400 
401 	if (!ice_seg)
402 		return ICE_ERR_PARAM;
403 
404 	do {
405 		tcam = (struct ice_boost_tcam_entry *)
406 		       ice_pkg_enum_entry(ice_seg, &state,
407 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
408 					  ice_boost_tcam_handler);
409 		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
410 			*entry = tcam;
411 			return ICE_SUCCESS;
412 		}
413 
414 		ice_seg = NULL;
415 	} while (tcam);
416 
417 	*entry = NULL;
418 	return ICE_ERR_CFG;
419 }
420 
421 /**
422  * ice_label_enum_handler
423  * @sect_type: section type
424  * @section: pointer to section
425  * @index: index of the label entry to be returned
426  * @offset: pointer to receive absolute offset, always zero for label sections
427  *
428  * This is a callback function that can be passed to ice_pkg_enum_entry.
429  * Handles enumeration of individual label entries.
430  */
431 static void *
432 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
433 		       u32 *offset)
434 {
435 	struct ice_label_section *labels;
436 
437 	if (!section)
438 		return NULL;
439 
440 	/* cppcheck-suppress nullPointer */
441 	if (index > ICE_MAX_LABELS_IN_BUF)
442 		return NULL;
443 
444 	if (offset)
445 		*offset = 0;
446 
447 	labels = (struct ice_label_section *)section;
448 	if (index >= LE16_TO_CPU(labels->count))
449 		return NULL;
450 
451 	return labels->label + index;
452 }
453 
454 /**
455  * ice_enum_labels
456  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
457  * @type: the section type that will contain the label (0 on subsequent calls)
458  * @state: ice_pkg_enum structure that will hold the state of the enumeration
459  * @value: pointer to a value that will return the label's value if found
460  *
461  * Enumerates a list of labels in the package. The caller will call
462  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
463  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
464  * the end of the list has been reached.
465  */
466 static char *
467 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
468 		u16 *value)
469 {
470 	struct ice_label *label;
471 
472 	/* Check for valid label section on first call */
473 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
474 		return NULL;
475 
476 	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
477 						       NULL,
478 						       ice_label_enum_handler);
479 	if (!label)
480 		return NULL;
481 
482 	*value = LE16_TO_CPU(label->value);
483 	return label->name;
484 }
485 
486 /**
487  * ice_add_tunnel_hint
488  * @hw: pointer to the HW structure
489  * @label_name: label text
490  * @val: value of the tunnel port boost entry
491  */
492 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
493 {
494 	if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
495 		u16 i;
496 
497 		for (i = 0; tnls[i].type != TNL_LAST; i++) {
498 			size_t len = strlen(tnls[i].label_prefix);
499 
500 			/* Look for matching label start, before continuing */
501 			if (strncmp(label_name, tnls[i].label_prefix, len))
502 				continue;
503 
504 			/* Make sure this label matches our PF. Note that the PF
505 			 * character ('0' - '7') will be located where our
506 			 * prefix string's null terminator is located.
507 			 */
508 			if ((label_name[len] - '0') == hw->pf_id) {
509 				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
510 				hw->tnl.tbl[hw->tnl.count].valid = false;
511 				hw->tnl.tbl[hw->tnl.count].in_use = false;
512 				hw->tnl.tbl[hw->tnl.count].marked = false;
513 				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
514 				hw->tnl.tbl[hw->tnl.count].port = 0;
515 				hw->tnl.count++;
516 				break;
517 			}
518 		}
519 	}
520 }
521 
522 /**
523  * ice_init_pkg_hints
524  * @hw: pointer to the HW structure
525  * @ice_seg: pointer to the segment of the package scan (non-NULL)
526  *
527  * This function will scan the package and save off relevant information
528  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
529  * since the first call to ice_enum_labels requires a pointer to an actual
530  * ice_seg structure.
531  */
532 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
533 {
534 	struct ice_pkg_enum state;
535 	char *label_name;
536 	u16 val;
537 	int i;
538 
539 	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
540 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
541 
542 	if (!ice_seg)
543 		return;
544 
545 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
546 				     &val);
547 
548 	while (label_name) {
549 		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
550 			/* check for a tunnel entry */
551 			ice_add_tunnel_hint(hw, label_name, val);
552 
553 		label_name = ice_enum_labels(NULL, 0, &state, &val);
554 	}
555 
556 	/* Cache the appropriate boost TCAM entry pointers for tunnels */
557 	for (i = 0; i < hw->tnl.count; i++) {
558 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
559 				     &hw->tnl.tbl[i].boost_entry);
560 		if (hw->tnl.tbl[i].boost_entry)
561 			hw->tnl.tbl[i].valid = true;
562 	}
563 }
564 
565 /* Key creation */
566 
567 #define ICE_DC_KEY	0x1	/* don't care */
568 #define ICE_DC_KEYINV	0x1
569 #define ICE_NM_KEY	0x0	/* never match */
570 #define ICE_NM_KEYINV	0x0
571 #define ICE_0_KEY	0x1	/* match 0 */
572 #define ICE_0_KEYINV	0x0
573 #define ICE_1_KEY	0x0	/* match 1 */
574 #define ICE_1_KEYINV	0x1
575 
576 /**
577  * ice_gen_key_word - generate 16-bits of a key/mask word
578  * @val: the value
579  * @valid: valid bits mask (change only the valid bits)
580  * @dont_care: don't care mask
581  * @nvr_mtch: never match mask
582  * @key: pointer to an array of where the resulting key portion
583  * @key_inv: pointer to an array of where the resulting key invert portion
584  *
585  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
586  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
587  * of key and 8 bits of key invert.
588  *
589  *     '0' =    b01, always match a 0 bit
590  *     '1' =    b10, always match a 1 bit
591  *     '?' =    b11, don't care bit (always matches)
592  *     '~' =    b00, never match bit
593  *
594  * Input:
595  *          val:         b0  1  0  1  0  1
596  *          dont_care:   b0  0  1  1  0  0
597  *          never_mtch:  b0  0  0  0  1  1
598  *          ------------------------------
599  * Result:  key:        b01 10 11 11 00 00
600  */
601 static enum ice_status
602 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
603 		 u8 *key_inv)
604 {
605 	u8 in_key = *key, in_key_inv = *key_inv;
606 	u8 i;
607 
608 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
609 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
610 		return ICE_ERR_CFG;
611 
612 	*key = 0;
613 	*key_inv = 0;
614 
615 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
616 	for (i = 0; i < 8; i++) {
617 		*key >>= 1;
618 		*key_inv >>= 1;
619 
620 		if (!(valid & 0x1)) { /* change only valid bits */
621 			*key |= (in_key & 0x1) << 7;
622 			*key_inv |= (in_key_inv & 0x1) << 7;
623 		} else if (dont_care & 0x1) { /* don't care bit */
624 			*key |= ICE_DC_KEY << 7;
625 			*key_inv |= ICE_DC_KEYINV << 7;
626 		} else if (nvr_mtch & 0x1) { /* never match bit */
627 			*key |= ICE_NM_KEY << 7;
628 			*key_inv |= ICE_NM_KEYINV << 7;
629 		} else if (val & 0x01) { /* exact 1 match */
630 			*key |= ICE_1_KEY << 7;
631 			*key_inv |= ICE_1_KEYINV << 7;
632 		} else { /* exact 0 match */
633 			*key |= ICE_0_KEY << 7;
634 			*key_inv |= ICE_0_KEYINV << 7;
635 		}
636 
637 		dont_care >>= 1;
638 		nvr_mtch >>= 1;
639 		valid >>= 1;
640 		val >>= 1;
641 		in_key >>= 1;
642 		in_key_inv >>= 1;
643 	}
644 
645 	return ICE_SUCCESS;
646 }
647 
648 /**
649  * ice_bits_max_set - determine if the number of bits set is within a maximum
650  * @mask: pointer to the byte array which is the mask
651  * @size: the number of bytes in the mask
652  * @max: the max number of set bits
653  *
654  * This function determines if there are at most 'max' number of bits set in an
655  * array. Returns true if the number for bits set is <= max or will return false
656  * otherwise.
657  */
658 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
659 {
660 	u16 count = 0;
661 	u16 i;
662 
663 	/* check each byte */
664 	for (i = 0; i < size; i++) {
665 		/* if 0, go to next byte */
666 		if (!mask[i])
667 			continue;
668 
669 		/* We know there is at least one set bit in this byte because of
670 		 * the above check; if we already have found 'max' number of
671 		 * bits set, then we can return failure now.
672 		 */
673 		if (count == max)
674 			return false;
675 
676 		/* count the bits in this byte, checking threshold */
677 		count += ice_hweight8(mask[i]);
678 		if (count > max)
679 			return false;
680 	}
681 
682 	return true;
683 }
684 
685 /**
686  * ice_set_key - generate a variable sized key with multiples of 16-bits
687  * @key: pointer to where the key will be stored
688  * @size: the size of the complete key in bytes (must be even)
689  * @val: array of 8-bit values that makes up the value portion of the key
690  * @upd: array of 8-bit masks that determine what key portion to update
691  * @dc: array of 8-bit masks that make up the don't care mask
692  * @nm: array of 8-bit masks that make up the never match mask
693  * @off: the offset of the first byte in the key to update
694  * @len: the number of bytes in the key update
695  *
696  * This function generates a key from a value, a don't care mask and a never
697  * match mask.
698  * upd, dc, and nm are optional parameters, and can be NULL:
699  *	upd == NULL --> upd mask is all 1's (update all bits)
700  *	dc == NULL --> dc mask is all 0's (no don't care bits)
701  *	nm == NULL --> nm mask is all 0's (no never match bits)
702  */
703 static enum ice_status
704 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
705 	    u16 len)
706 {
707 	u16 half_size;
708 	u16 i;
709 
710 	/* size must be a multiple of 2 bytes. */
711 	if (size % 2)
712 		return ICE_ERR_CFG;
713 	half_size = size / 2;
714 
715 	if (off + len > half_size)
716 		return ICE_ERR_CFG;
717 
718 	/* Make sure at most one bit is set in the never match mask. Having more
719 	 * than one never match mask bit set will cause HW to consume excessive
720 	 * power otherwise; this is a power management efficiency check.
721 	 */
722 #define ICE_NVR_MTCH_BITS_MAX	1
723 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
724 		return ICE_ERR_CFG;
725 
726 	for (i = 0; i < len; i++)
727 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
728 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
729 				     key + off + i, key + half_size + off + i))
730 			return ICE_ERR_CFG;
731 
732 	return ICE_SUCCESS;
733 }
734 
735 /**
736  * ice_acquire_global_cfg_lock
737  * @hw: pointer to the HW structure
738  * @access: access type (read or write)
739  *
740  * This function will request ownership of the global config lock for reading
741  * or writing of the package. When attempting to obtain write access, the
742  * caller must check for the following two return values:
743  *
744  * ICE_SUCCESS        - Means the caller has acquired the global config lock
745  *                      and can perform writing of the package.
746  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
747  *                      package or has found that no update was necessary; in
748  *                      this case, the caller can just skip performing any
749  *                      update of the package.
750  */
751 static enum ice_status
752 ice_acquire_global_cfg_lock(struct ice_hw *hw,
753 			    enum ice_aq_res_access_type access)
754 {
755 	enum ice_status status;
756 
757 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
758 
759 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
760 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
761 
762 	if (status == ICE_ERR_AQ_NO_WORK)
763 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
764 
765 	return status;
766 }
767 
768 /**
769  * ice_release_global_cfg_lock
770  * @hw: pointer to the HW structure
771  *
772  * This function will release the global config lock.
773  */
774 static void ice_release_global_cfg_lock(struct ice_hw *hw)
775 {
776 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
777 }
778 
779 /**
780  * ice_acquire_change_lock
781  * @hw: pointer to the HW structure
782  * @access: access type (read or write)
783  *
784  * This function will request ownership of the change lock.
785  */
786 static enum ice_status
787 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
788 {
789 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
790 
791 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
792 			       ICE_CHANGE_LOCK_TIMEOUT);
793 }
794 
795 /**
796  * ice_release_change_lock
797  * @hw: pointer to the HW structure
798  *
799  * This function will release the change lock using the proper Admin Command.
800  */
801 static void ice_release_change_lock(struct ice_hw *hw)
802 {
803 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
804 
805 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
806 }
807 
808 /**
809  * ice_aq_download_pkg
810  * @hw: pointer to the hardware structure
811  * @pkg_buf: the package buffer to transfer
812  * @buf_size: the size of the package buffer
813  * @last_buf: last buffer indicator
814  * @error_offset: returns error offset
815  * @error_info: returns error information
816  * @cd: pointer to command details structure or NULL
817  *
818  * Download Package (0x0C40)
819  */
820 static enum ice_status
821 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
822 		    u16 buf_size, bool last_buf, u32 *error_offset,
823 		    u32 *error_info, struct ice_sq_cd *cd)
824 {
825 	struct ice_aqc_download_pkg *cmd;
826 	struct ice_aq_desc desc;
827 	enum ice_status status;
828 
829 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
830 
831 	if (error_offset)
832 		*error_offset = 0;
833 	if (error_info)
834 		*error_info = 0;
835 
836 	cmd = &desc.params.download_pkg;
837 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
838 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
839 
840 	if (last_buf)
841 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
842 
843 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
844 	if (status == ICE_ERR_AQ_ERROR) {
845 		/* Read error from buffer only when the FW returned an error */
846 		struct ice_aqc_download_pkg_resp *resp;
847 
848 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
849 		if (error_offset)
850 			*error_offset = LE32_TO_CPU(resp->error_offset);
851 		if (error_info)
852 			*error_info = LE32_TO_CPU(resp->error_info);
853 	}
854 
855 	return status;
856 }
857 
858 /**
859  * ice_aq_upload_section
860  * @hw: pointer to the hardware structure
861  * @pkg_buf: the package buffer which will receive the section
862  * @buf_size: the size of the package buffer
863  * @cd: pointer to command details structure or NULL
864  *
865  * Upload Section (0x0C41)
866  */
867 enum ice_status
868 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
869 		      u16 buf_size, struct ice_sq_cd *cd)
870 {
871 	struct ice_aq_desc desc;
872 
873 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
874 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
875 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
876 
877 	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
878 }
879 
880 /**
881  * ice_aq_update_pkg
882  * @hw: pointer to the hardware structure
883  * @pkg_buf: the package cmd buffer
884  * @buf_size: the size of the package cmd buffer
885  * @last_buf: last buffer indicator
886  * @error_offset: returns error offset
887  * @error_info: returns error information
888  * @cd: pointer to command details structure or NULL
889  *
890  * Update Package (0x0C42)
891  */
892 static enum ice_status
893 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
894 		  bool last_buf, u32 *error_offset, u32 *error_info,
895 		  struct ice_sq_cd *cd)
896 {
897 	struct ice_aqc_download_pkg *cmd;
898 	struct ice_aq_desc desc;
899 	enum ice_status status;
900 
901 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
902 
903 	if (error_offset)
904 		*error_offset = 0;
905 	if (error_info)
906 		*error_info = 0;
907 
908 	cmd = &desc.params.download_pkg;
909 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
910 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
911 
912 	if (last_buf)
913 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
914 
915 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
916 	if (status == ICE_ERR_AQ_ERROR) {
917 		/* Read error from buffer only when the FW returned an error */
918 		struct ice_aqc_download_pkg_resp *resp;
919 
920 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
921 		if (error_offset)
922 			*error_offset = LE32_TO_CPU(resp->error_offset);
923 		if (error_info)
924 			*error_info = LE32_TO_CPU(resp->error_info);
925 	}
926 
927 	return status;
928 }
929 
930 /**
931  * ice_find_seg_in_pkg
932  * @hw: pointer to the hardware structure
933  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
934  * @pkg_hdr: pointer to the package header to be searched
935  *
936  * This function searches a package file for a particular segment type. On
937  * success it returns a pointer to the segment header, otherwise it will
938  * return NULL.
939  */
940 static struct ice_generic_seg_hdr *
941 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
942 		    struct ice_pkg_hdr *pkg_hdr)
943 {
944 	u32 i;
945 
946 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
947 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
948 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
949 		  pkg_hdr->pkg_format_ver.update,
950 		  pkg_hdr->pkg_format_ver.draft);
951 
952 	/* Search all package segments for the requested segment type */
953 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
954 		struct ice_generic_seg_hdr *seg;
955 
956 		seg = (struct ice_generic_seg_hdr *)
957 			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
958 
959 		if (LE32_TO_CPU(seg->seg_type) == seg_type)
960 			return seg;
961 	}
962 
963 	return NULL;
964 }
965 
966 /**
967  * ice_update_pkg_no_lock
968  * @hw: pointer to the hardware structure
969  * @bufs: pointer to an array of buffers
970  * @count: the number of buffers in the array
971  */
972 static enum ice_status
973 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
974 {
975 	enum ice_status status = ICE_SUCCESS;
976 	u32 i;
977 
978 	for (i = 0; i < count; i++) {
979 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
980 		bool last = ((i + 1) == count);
981 		u32 offset, info;
982 
983 		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
984 					   last, &offset, &info, NULL);
985 
986 		if (status) {
987 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
988 				  status, offset, info);
989 			break;
990 		}
991 	}
992 
993 	return status;
994 }
995 
996 /**
997  * ice_update_pkg
998  * @hw: pointer to the hardware structure
999  * @bufs: pointer to an array of buffers
1000  * @count: the number of buffers in the array
1001  *
1002  * Obtains change lock and updates package.
1003  */
1004 enum ice_status
1005 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1006 {
1007 	enum ice_status status;
1008 
1009 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1010 	if (status)
1011 		return status;
1012 
1013 	status = ice_update_pkg_no_lock(hw, bufs, count);
1014 
1015 	ice_release_change_lock(hw);
1016 
1017 	return status;
1018 }
1019 
1020 /**
1021  * ice_dwnld_cfg_bufs
1022  * @hw: pointer to the hardware structure
1023  * @bufs: pointer to an array of buffers
1024  * @count: the number of buffers in the array
1025  *
1026  * Obtains global config lock and downloads the package configuration buffers
1027  * to the firmware. Metadata buffers are skipped, and the first metadata buffer
1028  * found indicates that the rest of the buffers are all metadata buffers.
1029  */
1030 static enum ice_status
1031 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1032 {
1033 	enum ice_status status;
1034 	struct ice_buf_hdr *bh;
1035 	u32 offset, info, i;
1036 
1037 	if (!bufs || !count)
1038 		return ICE_ERR_PARAM;
1039 
1040 	/* If the first buffer's first section has its metadata bit set
1041 	 * then there are no buffers to be downloaded, and the operation is
1042 	 * considered a success.
1043 	 */
1044 	bh = (struct ice_buf_hdr *)bufs;
1045 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
1046 		return ICE_SUCCESS;
1047 
1048 	/* reset pkg_dwnld_status in case this function is called in the
1049 	 * reset/rebuild flow
1050 	 */
1051 	hw->pkg_dwnld_status = ICE_AQ_RC_OK;
1052 
1053 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1054 	if (status) {
1055 		if (status == ICE_ERR_AQ_NO_WORK)
1056 			hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
1057 		else
1058 			hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1059 		return status;
1060 	}
1061 
1062 	for (i = 0; i < count; i++) {
1063 		bool last = ((i + 1) == count);
1064 
1065 		if (!last) {
1066 			/* check next buffer for metadata flag */
1067 			bh = (struct ice_buf_hdr *)(bufs + i + 1);
1068 
1069 			/* A set metadata flag in the next buffer will signal
1070 			 * that the current buffer will be the last buffer
1071 			 * downloaded
1072 			 */
1073 			if (LE16_TO_CPU(bh->section_count))
1074 				if (LE32_TO_CPU(bh->section_entry[0].type) &
1075 				    ICE_METADATA_BUF)
1076 					last = true;
1077 		}
1078 
1079 		bh = (struct ice_buf_hdr *)(bufs + i);
1080 
1081 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1082 					     &offset, &info, NULL);
1083 
1084 		/* Save AQ status from download package */
1085 		hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1086 		if (status) {
1087 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1088 				  status, offset, info);
1089 
1090 			break;
1091 		}
1092 
1093 		if (last)
1094 			break;
1095 	}
1096 
1097 	if (!status) {
1098 		status = ice_set_vlan_mode(hw);
1099 		if (status)
1100 			ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1101 				  status);
1102 	}
1103 
1104 	ice_release_global_cfg_lock(hw);
1105 
1106 	return status;
1107 }
1108 
1109 /**
1110  * ice_aq_get_pkg_info_list
1111  * @hw: pointer to the hardware structure
1112  * @pkg_info: the buffer which will receive the information list
1113  * @buf_size: the size of the pkg_info information buffer
1114  * @cd: pointer to command details structure or NULL
1115  *
1116  * Get Package Info List (0x0C43)
1117  */
1118 static enum ice_status
1119 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1120 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
1121 			 u16 buf_size, struct ice_sq_cd *cd)
1122 {
1123 	struct ice_aq_desc desc;
1124 
1125 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1126 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1127 
1128 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1129 }
1130 
1131 /**
1132  * ice_download_pkg
1133  * @hw: pointer to the hardware structure
1134  * @ice_seg: pointer to the segment of the package to be downloaded
1135  *
1136  * Handles the download of a complete package.
1137  */
1138 static enum ice_status
1139 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1140 {
1141 	struct ice_buf_table *ice_buf_tbl;
1142 	enum ice_status status;
1143 
1144 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1145 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1146 		  ice_seg->hdr.seg_format_ver.major,
1147 		  ice_seg->hdr.seg_format_ver.minor,
1148 		  ice_seg->hdr.seg_format_ver.update,
1149 		  ice_seg->hdr.seg_format_ver.draft);
1150 
1151 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1152 		  LE32_TO_CPU(ice_seg->hdr.seg_type),
1153 		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1154 
1155 	ice_buf_tbl = ice_find_buf_table(ice_seg);
1156 
1157 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1158 		  LE32_TO_CPU(ice_buf_tbl->buf_count));
1159 
1160 	status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1161 				    LE32_TO_CPU(ice_buf_tbl->buf_count));
1162 
1163 	ice_cache_vlan_mode(hw);
1164 
1165 	return status;
1166 }
1167 
1168 /**
1169  * ice_init_pkg_info
1170  * @hw: pointer to the hardware structure
1171  * @pkg_hdr: pointer to the driver's package hdr
1172  *
1173  * Saves off the package details into the HW structure.
1174  */
1175 static enum ice_status
1176 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1177 {
1178 	struct ice_generic_seg_hdr *seg_hdr;
1179 
1180 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1181 	if (!pkg_hdr)
1182 		return ICE_ERR_PARAM;
1183 
1184 	seg_hdr = (struct ice_generic_seg_hdr *)
1185 		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1186 	if (seg_hdr) {
1187 		struct ice_meta_sect *meta;
1188 		struct ice_pkg_enum state;
1189 
1190 		ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1191 
1192 		/* Get package information from the Metadata Section */
1193 		meta = (struct ice_meta_sect *)
1194 			ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1195 					     ICE_SID_METADATA);
1196 		if (!meta) {
1197 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1198 			return ICE_ERR_CFG;
1199 		}
1200 
1201 		hw->pkg_ver = meta->ver;
1202 		ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
1203 			   ICE_NONDMA_TO_NONDMA);
1204 
1205 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1206 			  meta->ver.major, meta->ver.minor, meta->ver.update,
1207 			  meta->ver.draft, meta->name);
1208 
1209 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1210 		ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1211 			   sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
1212 
1213 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1214 			  seg_hdr->seg_format_ver.major,
1215 			  seg_hdr->seg_format_ver.minor,
1216 			  seg_hdr->seg_format_ver.update,
1217 			  seg_hdr->seg_format_ver.draft,
1218 			  seg_hdr->seg_id);
1219 	} else {
1220 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1221 		return ICE_ERR_CFG;
1222 	}
1223 
1224 	return ICE_SUCCESS;
1225 }
1226 
1227 /**
1228  * ice_get_pkg_info
1229  * @hw: pointer to the hardware structure
1230  *
1231  * Store details of the package currently loaded in HW into the HW structure.
1232  */
1233 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1234 {
1235 	struct ice_aqc_get_pkg_info_resp *pkg_info;
1236 	enum ice_status status;
1237 	u16 size;
1238 	u32 i;
1239 
1240 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1241 
1242 	size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1243 	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1244 	if (!pkg_info)
1245 		return ICE_ERR_NO_MEMORY;
1246 
1247 	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1248 	if (status)
1249 		goto init_pkg_free_alloc;
1250 
1251 	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1252 #define ICE_PKG_FLAG_COUNT	4
1253 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1254 		u8 place = 0;
1255 
1256 		if (pkg_info->pkg_info[i].is_active) {
1257 			flags[place++] = 'A';
1258 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1259 			hw->active_track_id =
1260 				LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
1261 			ice_memcpy(hw->active_pkg_name,
1262 				   pkg_info->pkg_info[i].name,
1263 				   sizeof(pkg_info->pkg_info[i].name),
1264 				   ICE_NONDMA_TO_NONDMA);
1265 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1266 		}
1267 		if (pkg_info->pkg_info[i].is_active_at_boot)
1268 			flags[place++] = 'B';
1269 		if (pkg_info->pkg_info[i].is_modified)
1270 			flags[place++] = 'M';
1271 		if (pkg_info->pkg_info[i].is_in_nvm)
1272 			flags[place++] = 'N';
1273 
1274 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1275 			  i, pkg_info->pkg_info[i].ver.major,
1276 			  pkg_info->pkg_info[i].ver.minor,
1277 			  pkg_info->pkg_info[i].ver.update,
1278 			  pkg_info->pkg_info[i].ver.draft,
1279 			  pkg_info->pkg_info[i].name, flags);
1280 	}
1281 
1282 init_pkg_free_alloc:
1283 	ice_free(hw, pkg_info);
1284 
1285 	return status;
1286 }
1287 
1288 /**
1289  * ice_find_label_value
1290  * @ice_seg: pointer to the ice segment (non-NULL)
1291  * @name: name of the label to search for
1292  * @type: the section type that will contain the label
1293  * @value: pointer to a value that will return the label's value if found
1294  *
1295  * Finds a label's value given the label name and the section type to search.
1296  * The ice_seg parameter must not be NULL since the first call to
1297  * ice_enum_labels requires a pointer to an actual ice_seg structure.
1298  */
1299 enum ice_status
1300 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
1301 		     u16 *value)
1302 {
1303 	struct ice_pkg_enum state;
1304 	char *label_name;
1305 	u16 val;
1306 
1307 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1308 
1309 	if (!ice_seg)
1310 		return ICE_ERR_PARAM;
1311 
1312 	do {
1313 		label_name = ice_enum_labels(ice_seg, type, &state, &val);
1314 		if (label_name && !strcmp(label_name, name)) {
1315 			*value = val;
1316 			return ICE_SUCCESS;
1317 		}
1318 
1319 		ice_seg = NULL;
1320 	} while (label_name);
1321 
1322 	return ICE_ERR_CFG;
1323 }
1324 
1325 /**
1326  * ice_verify_pkg - verify package
1327  * @pkg: pointer to the package buffer
1328  * @len: size of the package buffer
1329  *
1330  * Verifies various attributes of the package file, including length, format
1331  * version, and the requirement of at least one segment.
1332  */
1333 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1334 {
1335 	u32 seg_count;
1336 	u32 i;
1337 
1338 	if (len < ice_struct_size(pkg, seg_offset, 1))
1339 		return ICE_ERR_BUF_TOO_SHORT;
1340 
1341 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1342 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1343 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1344 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1345 		return ICE_ERR_CFG;
1346 
1347 	/* pkg must have at least one segment */
1348 	seg_count = LE32_TO_CPU(pkg->seg_count);
1349 	if (seg_count < 1)
1350 		return ICE_ERR_CFG;
1351 
1352 	/* make sure segment array fits in package length */
1353 	if (len < ice_struct_size(pkg, seg_offset, seg_count))
1354 		return ICE_ERR_BUF_TOO_SHORT;
1355 
1356 	/* all segments must fit within length */
1357 	for (i = 0; i < seg_count; i++) {
1358 		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1359 		struct ice_generic_seg_hdr *seg;
1360 
1361 		/* segment header must fit */
1362 		if (len < off + sizeof(*seg))
1363 			return ICE_ERR_BUF_TOO_SHORT;
1364 
1365 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1366 
1367 		/* segment body must fit */
1368 		if (len < off + LE32_TO_CPU(seg->seg_size))
1369 			return ICE_ERR_BUF_TOO_SHORT;
1370 	}
1371 
1372 	return ICE_SUCCESS;
1373 }
1374 
1375 /**
1376  * ice_free_seg - free package segment pointer
1377  * @hw: pointer to the hardware structure
1378  *
1379  * Frees the package segment pointer in the proper manner, depending on if the
1380  * segment was allocated or just the passed in pointer was stored.
1381  */
1382 void ice_free_seg(struct ice_hw *hw)
1383 {
1384 	if (hw->pkg_copy) {
1385 		ice_free(hw, hw->pkg_copy);
1386 		hw->pkg_copy = NULL;
1387 		hw->pkg_size = 0;
1388 	}
1389 	hw->seg = NULL;
1390 }
1391 
1392 /**
1393  * ice_init_pkg_regs - initialize additional package registers
1394  * @hw: pointer to the hardware structure
1395  */
1396 static void ice_init_pkg_regs(struct ice_hw *hw)
1397 {
1398 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1399 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1400 #define ICE_SW_BLK_IDX	0
1401 
1402 	/* setup Switch block input mask, which is 48-bits in two parts */
1403 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1404 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1405 }
1406 
1407 /**
1408  * ice_chk_pkg_version - check package version for compatibility with driver
1409  * @pkg_ver: pointer to a version structure to check
1410  *
1411  * Check to make sure that the package about to be downloaded is compatible with
1412  * the driver. To be compatible, the major and minor components of the package
1413  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1414  * definitions.
1415  */
1416 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1417 {
1418 	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1419 	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1420 		return ICE_ERR_NOT_SUPPORTED;
1421 
1422 	return ICE_SUCCESS;
1423 }
1424 
1425 /**
1426  * ice_chk_pkg_compat
1427  * @hw: pointer to the hardware structure
1428  * @ospkg: pointer to the package hdr
1429  * @seg: pointer to the package segment hdr
1430  *
1431  * This function checks the package version compatibility with driver and NVM
1432  */
1433 static enum ice_status
1434 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1435 		   struct ice_seg **seg)
1436 {
1437 	struct ice_aqc_get_pkg_info_resp *pkg;
1438 	enum ice_status status;
1439 	u16 size;
1440 	u32 i;
1441 
1442 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1443 
1444 	/* Check package version compatibility */
1445 	status = ice_chk_pkg_version(&hw->pkg_ver);
1446 	if (status) {
1447 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1448 		return status;
1449 	}
1450 
1451 	/* find ICE segment in given package */
1452 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1453 						     ospkg);
1454 	if (!*seg) {
1455 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1456 		return ICE_ERR_CFG;
1457 	}
1458 
1459 	/* Check if FW is compatible with the OS package */
1460 	size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1461 	pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1462 	if (!pkg)
1463 		return ICE_ERR_NO_MEMORY;
1464 
1465 	status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1466 	if (status)
1467 		goto fw_ddp_compat_free_alloc;
1468 
1469 	for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1470 		/* loop till we find the NVM package */
1471 		if (!pkg->pkg_info[i].is_in_nvm)
1472 			continue;
1473 		if ((*seg)->hdr.seg_format_ver.major !=
1474 			pkg->pkg_info[i].ver.major ||
1475 		    (*seg)->hdr.seg_format_ver.minor >
1476 			pkg->pkg_info[i].ver.minor) {
1477 			status = ICE_ERR_FW_DDP_MISMATCH;
1478 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1479 		}
1480 		/* done processing NVM package so break */
1481 		break;
1482 	}
1483 fw_ddp_compat_free_alloc:
1484 	ice_free(hw, pkg);
1485 	return status;
1486 }
1487 
1488 /**
1489  * ice_sw_fv_handler
1490  * @sect_type: section type
1491  * @section: pointer to section
1492  * @index: index of the field vector entry to be returned
1493  * @offset: ptr to variable that receives the offset in the field vector table
1494  *
1495  * This is a callback function that can be passed to ice_pkg_enum_entry.
1496  * This function treats the given section as of type ice_sw_fv_section and
1497  * enumerates offset field. "offset" is an index into the field vector table.
1498  */
1499 static void *
1500 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1501 {
1502 	struct ice_sw_fv_section *fv_section =
1503 		(struct ice_sw_fv_section *)section;
1504 
1505 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1506 		return NULL;
1507 	if (index >= LE16_TO_CPU(fv_section->count))
1508 		return NULL;
1509 	if (offset)
1510 		/* "index" passed in to this function is relative to a given
1511 		 * 4k block. To get to the true index into the field vector
1512 		 * table need to add the relative index to the base_offset
1513 		 * field of this section
1514 		 */
1515 		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
1516 	return fv_section->fv + index;
1517 }
1518 
1519 /**
1520  * ice_get_prof_index_max - get the max profile index for used profile
1521  * @hw: pointer to the HW struct
1522  *
1523  * Calling this function will get the max profile index for used profile
1524  * and store the index number in struct ice_switch_info *switch_info
1525  * in hw for following use.
1526  */
1527 static int ice_get_prof_index_max(struct ice_hw *hw)
1528 {
1529 	u16 prof_index = 0, j, max_prof_index = 0;
1530 	struct ice_pkg_enum state;
1531 	struct ice_seg *ice_seg;
1532 	bool flag = false;
1533 	struct ice_fv *fv;
1534 	u32 offset;
1535 
1536 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1537 
1538 	if (!hw->seg)
1539 		return ICE_ERR_PARAM;
1540 
1541 	ice_seg = hw->seg;
1542 
1543 	do {
1544 		fv = (struct ice_fv *)
1545 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1546 					   &offset, ice_sw_fv_handler);
1547 		if (!fv)
1548 			break;
1549 		ice_seg = NULL;
1550 
1551 		/* in the profile that not be used, the prot_id is set to 0xff
1552 		 * and the off is set to 0x1ff for all the field vectors.
1553 		 */
1554 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1555 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1556 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1557 				flag = true;
1558 		if (flag && prof_index > max_prof_index)
1559 			max_prof_index = prof_index;
1560 
1561 		prof_index++;
1562 		flag = false;
1563 	} while (fv);
1564 
1565 	hw->switch_info->max_used_prof_index = max_prof_index;
1566 
1567 	return ICE_SUCCESS;
1568 }
1569 
1570 /**
1571  * ice_init_pkg - initialize/download package
1572  * @hw: pointer to the hardware structure
1573  * @buf: pointer to the package buffer
1574  * @len: size of the package buffer
1575  *
1576  * This function initializes a package. The package contains HW tables
1577  * required to do packet processing. First, the function extracts package
1578  * information such as version. Then it finds the ice configuration segment
1579  * within the package; this function then saves a copy of the segment pointer
1580  * within the supplied package buffer. Next, the function will cache any hints
1581  * from the package, followed by downloading the package itself. Note, that if
1582  * a previous PF driver has already downloaded the package successfully, then
1583  * the current driver will not have to download the package again.
1584  *
1585  * The local package contents will be used to query default behavior and to
1586  * update specific sections of the HW's version of the package (e.g. to update
1587  * the parse graph to understand new protocols).
1588  *
1589  * This function stores a pointer to the package buffer memory, and it is
1590  * expected that the supplied buffer will not be freed immediately. If the
1591  * package buffer needs to be freed, such as when read from a file, use
1592  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1593  * case.
1594  */
1595 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1596 {
1597 	struct ice_pkg_hdr *pkg;
1598 	enum ice_status status;
1599 	struct ice_seg *seg;
1600 
1601 	if (!buf || !len)
1602 		return ICE_ERR_PARAM;
1603 
1604 	pkg = (struct ice_pkg_hdr *)buf;
1605 	status = ice_verify_pkg(pkg, len);
1606 	if (status) {
1607 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1608 			  status);
1609 		return status;
1610 	}
1611 
1612 	/* initialize package info */
1613 	status = ice_init_pkg_info(hw, pkg);
1614 	if (status)
1615 		return status;
1616 
1617 	/* before downloading the package, check package version for
1618 	 * compatibility with driver
1619 	 */
1620 	status = ice_chk_pkg_compat(hw, pkg, &seg);
1621 	if (status)
1622 		return status;
1623 
1624 	/* initialize package hints and then download package */
1625 	ice_init_pkg_hints(hw, seg);
1626 	status = ice_download_pkg(hw, seg);
1627 	if (status == ICE_ERR_AQ_NO_WORK) {
1628 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1629 		status = ICE_SUCCESS;
1630 	}
1631 
1632 	/* Get information on the package currently loaded in HW, then make sure
1633 	 * the driver is compatible with this version.
1634 	 */
1635 	if (!status) {
1636 		status = ice_get_pkg_info(hw);
1637 		if (!status)
1638 			status = ice_chk_pkg_version(&hw->active_pkg_ver);
1639 	}
1640 
1641 	if (!status) {
1642 		hw->seg = seg;
1643 		/* on successful package download update other required
1644 		 * registers to support the package and fill HW tables
1645 		 * with package content.
1646 		 */
1647 		ice_init_pkg_regs(hw);
1648 		ice_fill_blk_tbls(hw);
1649 		ice_get_prof_index_max(hw);
1650 	} else {
1651 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1652 			  status);
1653 	}
1654 
1655 	return status;
1656 }
1657 
1658 /**
1659  * ice_copy_and_init_pkg - initialize/download a copy of the package
1660  * @hw: pointer to the hardware structure
1661  * @buf: pointer to the package buffer
1662  * @len: size of the package buffer
1663  *
1664  * This function copies the package buffer, and then calls ice_init_pkg() to
1665  * initialize the copied package contents.
1666  *
1667  * The copying is necessary if the package buffer supplied is constant, or if
1668  * the memory may disappear shortly after calling this function.
1669  *
1670  * If the package buffer resides in the data segment and can be modified, the
1671  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1672  *
1673  * However, if the package buffer needs to be copied first, such as when being
1674  * read from a file, the caller should use ice_copy_and_init_pkg().
1675  *
1676  * This function will first copy the package buffer, before calling
1677  * ice_init_pkg(). The caller is free to immediately destroy the original
1678  * package buffer, as the new copy will be managed by this function and
1679  * related routines.
1680  */
1681 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1682 {
1683 	enum ice_status status;
1684 	u8 *buf_copy;
1685 
1686 	if (!buf || !len)
1687 		return ICE_ERR_PARAM;
1688 
1689 	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1690 
1691 	status = ice_init_pkg(hw, buf_copy, len);
1692 	if (status) {
1693 		/* Free the copy, since we failed to initialize the package */
1694 		ice_free(hw, buf_copy);
1695 	} else {
1696 		/* Track the copied pkg so we can free it later */
1697 		hw->pkg_copy = buf_copy;
1698 		hw->pkg_size = len;
1699 	}
1700 
1701 	return status;
1702 }
1703 
1704 /**
1705  * ice_pkg_buf_alloc
1706  * @hw: pointer to the HW structure
1707  *
1708  * Allocates a package buffer and returns a pointer to the buffer header.
1709  * Note: all package contents must be in Little Endian form.
1710  */
1711 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1712 {
1713 	struct ice_buf_build *bld;
1714 	struct ice_buf_hdr *buf;
1715 
1716 	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1717 	if (!bld)
1718 		return NULL;
1719 
1720 	buf = (struct ice_buf_hdr *)bld;
1721 	buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1722 					     section_entry));
1723 	return bld;
1724 }
1725 
1726 /**
1727  * ice_get_sw_prof_type - determine switch profile type
1728  * @hw: pointer to the HW structure
1729  * @fv: pointer to the switch field vector
1730  */
1731 static enum ice_prof_type
1732 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1733 {
1734 	u16 i;
1735 
1736 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1737 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1738 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1739 		    fv->ew[i].off == ICE_VNI_OFFSET)
1740 			return ICE_PROF_TUN_UDP;
1741 
1742 		/* GRE tunnel will have GRE protocol */
1743 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1744 			return ICE_PROF_TUN_GRE;
1745 	}
1746 
1747 	return ICE_PROF_NON_TUN;
1748 }
1749 
1750 /**
1751  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1752  * @hw: pointer to hardware structure
1753  * @req_profs: type of profiles requested
1754  * @bm: pointer to memory for returning the bitmap of field vectors
1755  */
1756 void
1757 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1758 		     ice_bitmap_t *bm)
1759 {
1760 	struct ice_pkg_enum state;
1761 	struct ice_seg *ice_seg;
1762 	struct ice_fv *fv;
1763 
1764 	if (req_profs == ICE_PROF_ALL) {
1765 		ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1766 		return;
1767 	}
1768 
1769 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1770 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1771 	ice_seg = hw->seg;
1772 	do {
1773 		enum ice_prof_type prof_type;
1774 		u32 offset;
1775 
1776 		fv = (struct ice_fv *)
1777 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1778 					   &offset, ice_sw_fv_handler);
1779 		ice_seg = NULL;
1780 
1781 		if (fv) {
1782 			/* Determine field vector type */
1783 			prof_type = ice_get_sw_prof_type(hw, fv);
1784 
1785 			if (req_profs & prof_type)
1786 				ice_set_bit((u16)offset, bm);
1787 		}
1788 	} while (fv);
1789 }
1790 
1791 /**
1792  * ice_get_sw_fv_list
1793  * @hw: pointer to the HW structure
1794  * @prot_ids: field vector to search for with a given protocol ID
1795  * @ids_cnt: lookup/protocol count
1796  * @bm: bitmap of field vectors to consider
1797  * @fv_list: Head of a list
1798  *
1799  * Finds all the field vector entries from switch block that contain
1800  * a given protocol ID and returns a list of structures of type
1801  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1802  * definition and profile ID information
1803  * NOTE: The caller of the function is responsible for freeing the memory
1804  * allocated for every list entry.
1805  */
1806 enum ice_status
1807 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1808 		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1809 {
1810 	struct ice_sw_fv_list_entry *fvl;
1811 	struct ice_sw_fv_list_entry *tmp;
1812 	struct ice_pkg_enum state;
1813 	struct ice_seg *ice_seg;
1814 	struct ice_fv *fv;
1815 	u32 offset;
1816 
1817 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1818 
1819 	if (!ids_cnt || !hw->seg)
1820 		return ICE_ERR_PARAM;
1821 
1822 	ice_seg = hw->seg;
1823 	do {
1824 		u16 i;
1825 
1826 		fv = (struct ice_fv *)
1827 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1828 					   &offset, ice_sw_fv_handler);
1829 		if (!fv)
1830 			break;
1831 		ice_seg = NULL;
1832 
1833 		/* If field vector is not in the bitmap list, then skip this
1834 		 * profile.
1835 		 */
1836 		if (!ice_is_bit_set(bm, (u16)offset))
1837 			continue;
1838 
1839 		for (i = 0; i < ids_cnt; i++) {
1840 			int j;
1841 
1842 			/* This code assumes that if a switch field vector line
1843 			 * has a matching protocol, then this line will contain
1844 			 * the entries necessary to represent every field in
1845 			 * that protocol header.
1846 			 */
1847 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1848 				if (fv->ew[j].prot_id == prot_ids[i])
1849 					break;
1850 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1851 				break;
1852 			if (i + 1 == ids_cnt) {
1853 				fvl = (struct ice_sw_fv_list_entry *)
1854 					ice_malloc(hw, sizeof(*fvl));
1855 				if (!fvl)
1856 					goto err;
1857 				fvl->fv_ptr = fv;
1858 				fvl->profile_id = offset;
1859 				LIST_ADD(&fvl->list_entry, fv_list);
1860 				break;
1861 			}
1862 		}
1863 	} while (fv);
1864 	if (LIST_EMPTY(fv_list))
1865 		return ICE_ERR_CFG;
1866 	return ICE_SUCCESS;
1867 
1868 err:
1869 	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1870 				 list_entry) {
1871 		LIST_DEL(&fvl->list_entry);
1872 		ice_free(hw, fvl);
1873 	}
1874 
1875 	return ICE_ERR_NO_MEMORY;
1876 }
1877 
1878 /**
1879  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1880  * @hw: pointer to hardware structure
1881  */
1882 void ice_init_prof_result_bm(struct ice_hw *hw)
1883 {
1884 	struct ice_pkg_enum state;
1885 	struct ice_seg *ice_seg;
1886 	struct ice_fv *fv;
1887 
1888 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1889 
1890 	if (!hw->seg)
1891 		return;
1892 
1893 	ice_seg = hw->seg;
1894 	do {
1895 		u32 off;
1896 		u16 i;
1897 
1898 		fv = (struct ice_fv *)
1899 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1900 					   &off, ice_sw_fv_handler);
1901 		ice_seg = NULL;
1902 		if (!fv)
1903 			break;
1904 
1905 		ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1906 				ICE_MAX_FV_WORDS);
1907 
1908 		/* Determine empty field vector indices, these can be
1909 		 * used for recipe results. Skip index 0, since it is
1910 		 * always used for Switch ID.
1911 		 */
1912 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1913 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1914 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1915 				ice_set_bit(i,
1916 					    hw->switch_info->prof_res_bm[off]);
1917 	} while (fv);
1918 }
1919 
1920 /**
1921  * ice_pkg_buf_free
1922  * @hw: pointer to the HW structure
1923  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1924  *
1925  * Frees a package buffer
1926  */
1927 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1928 {
1929 	ice_free(hw, bld);
1930 }
1931 
1932 /**
1933  * ice_pkg_buf_reserve_section
1934  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1935  * @count: the number of sections to reserve
1936  *
1937  * Reserves one or more section table entries in a package buffer. This routine
1938  * can be called multiple times as long as they are made before calling
1939  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1940  * is called once, the number of sections that can be allocated will not be able
1941  * to be increased; not using all reserved sections is fine, but this will
1942  * result in some wasted space in the buffer.
1943  * Note: all package contents must be in Little Endian form.
1944  */
1945 static enum ice_status
1946 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1947 {
1948 	struct ice_buf_hdr *buf;
1949 	u16 section_count;
1950 	u16 data_end;
1951 
1952 	if (!bld)
1953 		return ICE_ERR_PARAM;
1954 
1955 	buf = (struct ice_buf_hdr *)&bld->buf;
1956 
1957 	/* already an active section, can't increase table size */
1958 	section_count = LE16_TO_CPU(buf->section_count);
1959 	if (section_count > 0)
1960 		return ICE_ERR_CFG;
1961 
1962 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1963 		return ICE_ERR_CFG;
1964 	bld->reserved_section_table_entries += count;
1965 
1966 	data_end = LE16_TO_CPU(buf->data_end) +
1967 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1968 	buf->data_end = CPU_TO_LE16(data_end);
1969 
1970 	return ICE_SUCCESS;
1971 }
1972 
1973 /**
1974  * ice_pkg_buf_alloc_section
1975  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1976  * @type: the section type value
1977  * @size: the size of the section to reserve (in bytes)
1978  *
1979  * Reserves memory in the buffer for a section's content and updates the
1980  * buffers' status accordingly. This routine returns a pointer to the first
1981  * byte of the section start within the buffer, which is used to fill in the
1982  * section contents.
1983  * Note: all package contents must be in Little Endian form.
1984  */
1985 static void *
1986 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1987 {
1988 	struct ice_buf_hdr *buf;
1989 	u16 sect_count;
1990 	u16 data_end;
1991 
1992 	if (!bld || !type || !size)
1993 		return NULL;
1994 
1995 	buf = (struct ice_buf_hdr *)&bld->buf;
1996 
1997 	/* check for enough space left in buffer */
1998 	data_end = LE16_TO_CPU(buf->data_end);
1999 
2000 	/* section start must align on 4 byte boundary */
2001 	data_end = ICE_ALIGN(data_end, 4);
2002 
2003 	if ((data_end + size) > ICE_MAX_S_DATA_END)
2004 		return NULL;
2005 
2006 	/* check for more available section table entries */
2007 	sect_count = LE16_TO_CPU(buf->section_count);
2008 	if (sect_count < bld->reserved_section_table_entries) {
2009 		void *section_ptr = ((u8 *)buf) + data_end;
2010 
2011 		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
2012 		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
2013 		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
2014 
2015 		data_end += size;
2016 		buf->data_end = CPU_TO_LE16(data_end);
2017 
2018 		buf->section_count = CPU_TO_LE16(sect_count + 1);
2019 		return section_ptr;
2020 	}
2021 
2022 	/* no free section table entries */
2023 	return NULL;
2024 }
2025 
2026 /**
2027  * ice_pkg_buf_alloc_single_section
2028  * @hw: pointer to the HW structure
2029  * @type: the section type value
2030  * @size: the size of the section to reserve (in bytes)
2031  * @section: returns pointer to the section
2032  *
2033  * Allocates a package buffer with a single section.
2034  * Note: all package contents must be in Little Endian form.
2035  */
2036 struct ice_buf_build *
2037 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
2038 				 void **section)
2039 {
2040 	struct ice_buf_build *buf;
2041 
2042 	if (!section)
2043 		return NULL;
2044 
2045 	buf = ice_pkg_buf_alloc(hw);
2046 	if (!buf)
2047 		return NULL;
2048 
2049 	if (ice_pkg_buf_reserve_section(buf, 1))
2050 		goto ice_pkg_buf_alloc_single_section_err;
2051 
2052 	*section = ice_pkg_buf_alloc_section(buf, type, size);
2053 	if (!*section)
2054 		goto ice_pkg_buf_alloc_single_section_err;
2055 
2056 	return buf;
2057 
2058 ice_pkg_buf_alloc_single_section_err:
2059 	ice_pkg_buf_free(hw, buf);
2060 	return NULL;
2061 }
2062 
2063 /**
2064  * ice_pkg_buf_unreserve_section
2065  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2066  * @count: the number of sections to unreserve
2067  *
2068  * Unreserves one or more section table entries in a package buffer, releasing
2069  * space that can be used for section data. This routine can be called
2070  * multiple times as long as they are made before calling
2071  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
2072  * is called once, the number of sections that can be allocated will not be able
2073  * to be increased; not using all reserved sections is fine, but this will
2074  * result in some wasted space in the buffer.
2075  * Note: all package contents must be in Little Endian form.
2076  */
2077 enum ice_status
2078 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
2079 {
2080 	struct ice_buf_hdr *buf;
2081 	u16 section_count;
2082 	u16 data_end;
2083 
2084 	if (!bld)
2085 		return ICE_ERR_PARAM;
2086 
2087 	buf = (struct ice_buf_hdr *)&bld->buf;
2088 
2089 	/* already an active section, can't decrease table size */
2090 	section_count = LE16_TO_CPU(buf->section_count);
2091 	if (section_count > 0)
2092 		return ICE_ERR_CFG;
2093 
2094 	if (count > bld->reserved_section_table_entries)
2095 		return ICE_ERR_CFG;
2096 	bld->reserved_section_table_entries -= count;
2097 
2098 	data_end = LE16_TO_CPU(buf->data_end) -
2099 		FLEX_ARRAY_SIZE(buf, section_entry, count);
2100 	buf->data_end = CPU_TO_LE16(data_end);
2101 
2102 	return ICE_SUCCESS;
2103 }
2104 
2105 /**
2106  * ice_pkg_buf_get_free_space
2107  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2108  *
2109  * Returns the number of free bytes remaining in the buffer.
2110  * Note: all package contents must be in Little Endian form.
2111  */
2112 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
2113 {
2114 	struct ice_buf_hdr *buf;
2115 
2116 	if (!bld)
2117 		return 0;
2118 
2119 	buf = (struct ice_buf_hdr *)&bld->buf;
2120 	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
2121 }
2122 
2123 /**
2124  * ice_pkg_buf_get_active_sections
2125  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2126  *
2127  * Returns the number of active sections. Before using the package buffer
2128  * in an update package command, the caller should make sure that there is at
2129  * least one active section - otherwise, the buffer is not legal and should
2130  * not be used.
2131  * Note: all package contents must be in Little Endian form.
2132  */
2133 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
2134 {
2135 	struct ice_buf_hdr *buf;
2136 
2137 	if (!bld)
2138 		return 0;
2139 
2140 	buf = (struct ice_buf_hdr *)&bld->buf;
2141 	return LE16_TO_CPU(buf->section_count);
2142 }
2143 
2144 /**
2145  * ice_pkg_buf
2146  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2147  *
2148  * Return a pointer to the buffer's header
2149  */
2150 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
2151 {
2152 	if (!bld)
2153 		return NULL;
2154 
2155 	return &bld->buf;
2156 }
2157 
2158 /**
2159  * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
2160  * @hw: pointer to the HW structure
2161  * @port: port to search for
2162  * @index: optionally returns index
2163  *
2164  * Returns whether a port is already in use as a tunnel, and optionally its
2165  * index
2166  */
2167 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
2168 {
2169 	u16 i;
2170 
2171 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2172 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2173 			if (index)
2174 				*index = i;
2175 			return true;
2176 		}
2177 
2178 	return false;
2179 }
2180 
2181 /**
2182  * ice_tunnel_port_in_use
2183  * @hw: pointer to the HW structure
2184  * @port: port to search for
2185  * @index: optionally returns index
2186  *
2187  * Returns whether a port is already in use as a tunnel, and optionally its
2188  * index
2189  */
2190 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
2191 {
2192 	bool res;
2193 
2194 	ice_acquire_lock(&hw->tnl_lock);
2195 	res = ice_tunnel_port_in_use_hlpr(hw, port, index);
2196 	ice_release_lock(&hw->tnl_lock);
2197 
2198 	return res;
2199 }
2200 
2201 /**
2202  * ice_tunnel_get_type
2203  * @hw: pointer to the HW structure
2204  * @port: port to search for
2205  * @type: returns tunnel index
2206  *
2207  * For a given port number, will return the type of tunnel.
2208  */
2209 bool
2210 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
2211 {
2212 	bool res = false;
2213 	u16 i;
2214 
2215 	ice_acquire_lock(&hw->tnl_lock);
2216 
2217 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2218 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2219 			*type = hw->tnl.tbl[i].type;
2220 			res = true;
2221 			break;
2222 		}
2223 
2224 	ice_release_lock(&hw->tnl_lock);
2225 
2226 	return res;
2227 }
2228 
2229 /**
2230  * ice_find_free_tunnel_entry
2231  * @hw: pointer to the HW structure
2232  * @type: tunnel type
2233  * @index: optionally returns index
2234  *
2235  * Returns whether there is a free tunnel entry, and optionally its index
2236  */
2237 static bool
2238 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2239 			   u16 *index)
2240 {
2241 	u16 i;
2242 
2243 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2244 		if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
2245 		    hw->tnl.tbl[i].type == type) {
2246 			if (index)
2247 				*index = i;
2248 			return true;
2249 		}
2250 
2251 	return false;
2252 }
2253 
2254 /**
2255  * ice_get_open_tunnel_port - retrieve an open tunnel port
2256  * @hw: pointer to the HW structure
2257  * @type: tunnel type (TNL_ALL will return any open port)
2258  * @port: returns open port
2259  */
2260 bool
2261 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
2262 			 u16 *port)
2263 {
2264 	bool res = false;
2265 	u16 i;
2266 
2267 	ice_acquire_lock(&hw->tnl_lock);
2268 
2269 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2270 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2271 		    (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
2272 			*port = hw->tnl.tbl[i].port;
2273 			res = true;
2274 			break;
2275 		}
2276 
2277 	ice_release_lock(&hw->tnl_lock);
2278 
2279 	return res;
2280 }
2281 
2282 /**
2283  * ice_create_tunnel
2284  * @hw: pointer to the HW structure
2285  * @type: type of tunnel
2286  * @port: port of tunnel to create
2287  *
2288  * Create a tunnel by updating the parse graph in the parser. We do that by
2289  * creating a package buffer with the tunnel info and issuing an update package
2290  * command.
2291  */
2292 enum ice_status
2293 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
2294 {
2295 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
2296 	enum ice_status status = ICE_ERR_MAX_LIMIT;
2297 	struct ice_buf_build *bld;
2298 	u16 index;
2299 
2300 	ice_acquire_lock(&hw->tnl_lock);
2301 
2302 	if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
2303 		hw->tnl.tbl[index].ref++;
2304 		status = ICE_SUCCESS;
2305 		goto ice_create_tunnel_end;
2306 	}
2307 
2308 	if (!ice_find_free_tunnel_entry(hw, type, &index)) {
2309 		status = ICE_ERR_OUT_OF_RANGE;
2310 		goto ice_create_tunnel_end;
2311 	}
2312 
2313 	bld = ice_pkg_buf_alloc(hw);
2314 	if (!bld) {
2315 		status = ICE_ERR_NO_MEMORY;
2316 		goto ice_create_tunnel_end;
2317 	}
2318 
2319 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
2320 	if (ice_pkg_buf_reserve_section(bld, 2))
2321 		goto ice_create_tunnel_err;
2322 
2323 	sect_rx = (struct ice_boost_tcam_section *)
2324 		ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2325 					  ice_struct_size(sect_rx, tcam, 1));
2326 	if (!sect_rx)
2327 		goto ice_create_tunnel_err;
2328 	sect_rx->count = CPU_TO_LE16(1);
2329 
2330 	sect_tx = (struct ice_boost_tcam_section *)
2331 		ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2332 					  ice_struct_size(sect_tx, tcam, 1));
2333 	if (!sect_tx)
2334 		goto ice_create_tunnel_err;
2335 	sect_tx->count = CPU_TO_LE16(1);
2336 
2337 	/* copy original boost entry to update package buffer */
2338 	ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2339 		   sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
2340 
2341 	/* over-write the never-match dest port key bits with the encoded port
2342 	 * bits
2343 	 */
2344 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2345 		    (u8 *)&port, NULL, NULL, NULL,
2346 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2347 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2348 
2349 	/* exact copy of entry to Tx section entry */
2350 	ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2351 		   ICE_NONDMA_TO_NONDMA);
2352 
2353 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2354 	if (!status) {
2355 		hw->tnl.tbl[index].port = port;
2356 		hw->tnl.tbl[index].in_use = true;
2357 		hw->tnl.tbl[index].ref = 1;
2358 	}
2359 
2360 ice_create_tunnel_err:
2361 	ice_pkg_buf_free(hw, bld);
2362 
2363 ice_create_tunnel_end:
2364 	ice_release_lock(&hw->tnl_lock);
2365 
2366 	return status;
2367 }
2368 
2369 /**
2370  * ice_destroy_tunnel
2371  * @hw: pointer to the HW structure
2372  * @port: port of tunnel to destroy (ignored if the all parameter is true)
2373  * @all: flag that states to destroy all tunnels
2374  *
2375  * Destroys a tunnel or all tunnels by creating an update package buffer
2376  * targeting the specific updates requested and then performing an update
2377  * package.
2378  */
2379 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
2380 {
2381 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
2382 	enum ice_status status = ICE_ERR_MAX_LIMIT;
2383 	struct ice_buf_build *bld;
2384 	u16 count = 0;
2385 	u16 index;
2386 	u16 size;
2387 	u16 i, j;
2388 
2389 	ice_acquire_lock(&hw->tnl_lock);
2390 
2391 	if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
2392 		if (hw->tnl.tbl[index].ref > 1) {
2393 			hw->tnl.tbl[index].ref--;
2394 			status = ICE_SUCCESS;
2395 			goto ice_destroy_tunnel_end;
2396 		}
2397 
2398 	/* determine count */
2399 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2400 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2401 		    (all || hw->tnl.tbl[i].port == port))
2402 			count++;
2403 
2404 	if (!count) {
2405 		status = ICE_ERR_PARAM;
2406 		goto ice_destroy_tunnel_end;
2407 	}
2408 
2409 	/* size of section - there is at least one entry */
2410 	size = ice_struct_size(sect_rx, tcam, count);
2411 
2412 	bld = ice_pkg_buf_alloc(hw);
2413 	if (!bld) {
2414 		status = ICE_ERR_NO_MEMORY;
2415 		goto ice_destroy_tunnel_end;
2416 	}
2417 
2418 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
2419 	if (ice_pkg_buf_reserve_section(bld, 2))
2420 		goto ice_destroy_tunnel_err;
2421 
2422 	sect_rx = (struct ice_boost_tcam_section *)
2423 		ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2424 					  size);
2425 	if (!sect_rx)
2426 		goto ice_destroy_tunnel_err;
2427 	sect_rx->count = CPU_TO_LE16(count);
2428 
2429 	sect_tx = (struct ice_boost_tcam_section *)
2430 		ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2431 					  size);
2432 	if (!sect_tx)
2433 		goto ice_destroy_tunnel_err;
2434 	sect_tx->count = CPU_TO_LE16(count);
2435 
2436 	/* copy original boost entry to update package buffer, one copy to Rx
2437 	 * section, another copy to the Tx section
2438 	 */
2439 	for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2440 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2441 		    (all || hw->tnl.tbl[i].port == port)) {
2442 			ice_memcpy(sect_rx->tcam + j,
2443 				   hw->tnl.tbl[i].boost_entry,
2444 				   sizeof(*sect_rx->tcam),
2445 				   ICE_NONDMA_TO_NONDMA);
2446 			ice_memcpy(sect_tx->tcam + j,
2447 				   hw->tnl.tbl[i].boost_entry,
2448 				   sizeof(*sect_tx->tcam),
2449 				   ICE_NONDMA_TO_NONDMA);
2450 			hw->tnl.tbl[i].marked = true;
2451 			j++;
2452 		}
2453 
2454 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2455 	if (!status)
2456 		for (i = 0; i < hw->tnl.count &&
2457 		     i < ICE_TUNNEL_MAX_ENTRIES; i++)
2458 			if (hw->tnl.tbl[i].marked) {
2459 				hw->tnl.tbl[i].ref = 0;
2460 				hw->tnl.tbl[i].port = 0;
2461 				hw->tnl.tbl[i].in_use = false;
2462 				hw->tnl.tbl[i].marked = false;
2463 			}
2464 
2465 ice_destroy_tunnel_err:
2466 	ice_pkg_buf_free(hw, bld);
2467 
2468 ice_destroy_tunnel_end:
2469 	ice_release_lock(&hw->tnl_lock);
2470 
2471 	return status;
2472 }
2473 
2474 /**
2475  * ice_replay_tunnels
2476  * @hw: pointer to the HW structure
2477  *
2478  * Replays all tunnels
2479  */
2480 enum ice_status ice_replay_tunnels(struct ice_hw *hw)
2481 {
2482 	enum ice_status status = ICE_SUCCESS;
2483 	u16 i;
2484 
2485 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2486 
2487 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) {
2488 		enum ice_tunnel_type type = hw->tnl.tbl[i].type;
2489 		u16 refs = hw->tnl.tbl[i].ref;
2490 		u16 port = hw->tnl.tbl[i].port;
2491 
2492 		if (!hw->tnl.tbl[i].in_use)
2493 			continue;
2494 
2495 		/* Replay tunnels one at a time by destroying them, then
2496 		 * recreating them
2497 		 */
2498 		hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */
2499 		status = ice_destroy_tunnel(hw, port, false);
2500 		if (status) {
2501 			ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n",
2502 				  status, port);
2503 			break;
2504 		}
2505 
2506 		status = ice_create_tunnel(hw, type, port);
2507 		if (status) {
2508 			ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n",
2509 				  status, port);
2510 			break;
2511 		}
2512 
2513 		/* reset to original ref count */
2514 		hw->tnl.tbl[i].ref = refs;
2515 	}
2516 
2517 	return status;
2518 }
2519 
2520 /**
2521  * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2522  * @hw: pointer to the hardware structure
2523  * @blk: hardware block
2524  * @prof: profile ID
2525  * @fv_idx: field vector word index
2526  * @prot: variable to receive the protocol ID
2527  * @off: variable to receive the protocol offset
2528  */
2529 enum ice_status
2530 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2531 		  u8 *prot, u16 *off)
2532 {
2533 	struct ice_fv_word *fv_ext;
2534 
2535 	if (prof >= hw->blk[blk].es.count)
2536 		return ICE_ERR_PARAM;
2537 
2538 	if (fv_idx >= hw->blk[blk].es.fvw)
2539 		return ICE_ERR_PARAM;
2540 
2541 	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2542 
2543 	*prot = fv_ext[fv_idx].prot_id;
2544 	*off = fv_ext[fv_idx].off;
2545 
2546 	return ICE_SUCCESS;
2547 }
2548 
2549 /* PTG Management */
2550 
2551 /**
2552  * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
2553  * @hw: pointer to the hardware structure
2554  * @blk: HW block
2555  *
2556  * This function will update the XLT1 hardware table to reflect the new
2557  * packet type group configuration.
2558  */
2559 enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
2560 {
2561 	struct ice_xlt1_section *sect;
2562 	struct ice_buf_build *bld;
2563 	enum ice_status status;
2564 	u16 index;
2565 
2566 	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
2567 					       ice_struct_size(sect, value,
2568 							       ICE_XLT1_CNT),
2569 					       (void **)&sect);
2570 	if (!bld)
2571 		return ICE_ERR_NO_MEMORY;
2572 
2573 	sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
2574 	sect->offset = CPU_TO_LE16(0);
2575 	for (index = 0; index < ICE_XLT1_CNT; index++)
2576 		sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
2577 
2578 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2579 
2580 	ice_pkg_buf_free(hw, bld);
2581 
2582 	return status;
2583 }
2584 
2585 /**
2586  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2587  * @hw: pointer to the hardware structure
2588  * @blk: HW block
2589  * @ptype: the ptype to search for
2590  * @ptg: pointer to variable that receives the PTG
2591  *
2592  * This function will search the PTGs for a particular ptype, returning the
2593  * PTG ID that contains it through the PTG parameter, with the value of
2594  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2595  */
2596 static enum ice_status
2597 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2598 {
2599 	if (ptype >= ICE_XLT1_CNT || !ptg)
2600 		return ICE_ERR_PARAM;
2601 
2602 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2603 	return ICE_SUCCESS;
2604 }
2605 
2606 /**
2607  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2608  * @hw: pointer to the hardware structure
2609  * @blk: HW block
2610  * @ptg: the PTG to allocate
2611  *
2612  * This function allocates a given packet type group ID specified by the PTG
2613  * parameter.
2614  */
2615 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2616 {
2617 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2618 }
2619 
2620 /**
2621  * ice_ptg_free - Frees a packet type group
2622  * @hw: pointer to the hardware structure
2623  * @blk: HW block
2624  * @ptg: the PTG ID to free
2625  *
2626  * This function frees a packet type group, and returns all the current ptypes
2627  * within it to the default PTG.
2628  */
2629 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2630 {
2631 	struct ice_ptg_ptype *p, *temp;
2632 
2633 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
2634 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2635 	while (p) {
2636 		p->ptg = ICE_DEFAULT_PTG;
2637 		temp = p->next_ptype;
2638 		p->next_ptype = NULL;
2639 		p = temp;
2640 	}
2641 
2642 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
2643 }
2644 
2645 /**
2646  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2647  * @hw: pointer to the hardware structure
2648  * @blk: HW block
2649  * @ptype: the ptype to remove
2650  * @ptg: the PTG to remove the ptype from
2651  *
2652  * This function will remove the ptype from the specific PTG, and move it to
2653  * the default PTG (ICE_DEFAULT_PTG).
2654  */
2655 static enum ice_status
2656 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2657 {
2658 	struct ice_ptg_ptype **ch;
2659 	struct ice_ptg_ptype *p;
2660 
2661 	if (ptype > ICE_XLT1_CNT - 1)
2662 		return ICE_ERR_PARAM;
2663 
2664 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2665 		return ICE_ERR_DOES_NOT_EXIST;
2666 
2667 	/* Should not happen if .in_use is set, bad config */
2668 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2669 		return ICE_ERR_CFG;
2670 
2671 	/* find the ptype within this PTG, and bypass the link over it */
2672 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2673 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2674 	while (p) {
2675 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2676 			*ch = p->next_ptype;
2677 			break;
2678 		}
2679 
2680 		ch = &p->next_ptype;
2681 		p = p->next_ptype;
2682 	}
2683 
2684 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2685 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2686 
2687 	return ICE_SUCCESS;
2688 }
2689 
2690 /**
2691  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2692  * @hw: pointer to the hardware structure
2693  * @blk: HW block
2694  * @ptype: the ptype to add or move
2695  * @ptg: the PTG to add or move the ptype to
2696  *
2697  * This function will either add or move a ptype to a particular PTG depending
2698  * on if the ptype is already part of another group. Note that using a
2699  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2700  * default PTG.
2701  */
2702 static enum ice_status
2703 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2704 {
2705 	enum ice_status status;
2706 	u8 original_ptg;
2707 
2708 	if (ptype > ICE_XLT1_CNT - 1)
2709 		return ICE_ERR_PARAM;
2710 
2711 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2712 		return ICE_ERR_DOES_NOT_EXIST;
2713 
2714 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2715 	if (status)
2716 		return status;
2717 
2718 	/* Is ptype already in the correct PTG? */
2719 	if (original_ptg == ptg)
2720 		return ICE_SUCCESS;
2721 
2722 	/* Remove from original PTG and move back to the default PTG */
2723 	if (original_ptg != ICE_DEFAULT_PTG)
2724 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2725 
2726 	/* Moving to default PTG? Then we're done with this request */
2727 	if (ptg == ICE_DEFAULT_PTG)
2728 		return ICE_SUCCESS;
2729 
2730 	/* Add ptype to PTG at beginning of list */
2731 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2732 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2733 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2734 		&hw->blk[blk].xlt1.ptypes[ptype];
2735 
2736 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2737 	hw->blk[blk].xlt1.t[ptype] = ptg;
2738 
2739 	return ICE_SUCCESS;
2740 }
2741 
2742 /* Block / table size info */
2743 struct ice_blk_size_details {
2744 	u16 xlt1;			/* # XLT1 entries */
2745 	u16 xlt2;			/* # XLT2 entries */
2746 	u16 prof_tcam;			/* # profile ID TCAM entries */
2747 	u16 prof_id;			/* # profile IDs */
2748 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
2749 	u16 prof_redir;			/* # profile redirection entries */
2750 	u16 es;				/* # extraction sequence entries */
2751 	u16 fvw;			/* # field vector words */
2752 	u8 overwrite;			/* overwrite existing entries allowed */
2753 	u8 reverse;			/* reverse FV order */
2754 };
2755 
2756 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2757 	/**
2758 	 * Table Definitions
2759 	 * XLT1 - Number of entries in XLT1 table
2760 	 * XLT2 - Number of entries in XLT2 table
2761 	 * TCAM - Number of entries Profile ID TCAM table
2762 	 * CDID - Control Domain ID of the hardware block
2763 	 * PRED - Number of entries in the Profile Redirection Table
2764 	 * FV   - Number of entries in the Field Vector
2765 	 * FVW  - Width (in WORDs) of the Field Vector
2766 	 * OVR  - Overwrite existing table entries
2767 	 * REV  - Reverse FV
2768 	 */
2769 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
2770 	/*          Overwrite   , Reverse FV */
2771 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
2772 		    false, false },
2773 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
2774 		    false, false },
2775 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2776 		    false, true  },
2777 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2778 		    true,  true  },
2779 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
2780 		    false, false },
2781 };
2782 
2783 enum ice_sid_all {
2784 	ICE_SID_XLT1_OFF = 0,
2785 	ICE_SID_XLT2_OFF,
2786 	ICE_SID_PR_OFF,
2787 	ICE_SID_PR_REDIR_OFF,
2788 	ICE_SID_ES_OFF,
2789 	ICE_SID_OFF_COUNT,
2790 };
2791 
2792 /* Characteristic handling */
2793 
2794 /**
2795  * ice_match_prop_lst - determine if properties of two lists match
2796  * @list1: first properties list
2797  * @list2: second properties list
2798  *
2799  * Count, cookies and the order must match in order to be considered equivalent.
2800  */
2801 static bool
2802 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2803 {
2804 	struct ice_vsig_prof *tmp1;
2805 	struct ice_vsig_prof *tmp2;
2806 	u16 chk_count = 0;
2807 	u16 count = 0;
2808 
2809 	/* compare counts */
2810 	LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
2811 		count++;
2812 	LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
2813 		chk_count++;
2814 	/* cppcheck-suppress knownConditionTrueFalse */
2815 	if (!count || count != chk_count)
2816 		return false;
2817 
2818 	tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2819 	tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2820 
2821 	/* profile cookies must compare, and in the exact same order to take
2822 	 * into account priority
2823 	 */
2824 	while (count--) {
2825 		if (tmp2->profile_cookie != tmp1->profile_cookie)
2826 			return false;
2827 
2828 		tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2829 		tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2830 	}
2831 
2832 	return true;
2833 }
2834 
2835 /* VSIG Management */
2836 
2837 /**
2838  * ice_vsig_update_xlt2_sect - update one section of XLT2 table
2839  * @hw: pointer to the hardware structure
2840  * @blk: HW block
2841  * @vsi: HW VSI number to program
2842  * @vsig: VSIG for the VSI
2843  *
2844  * This function will update the XLT2 hardware table with the input VSI
2845  * group configuration.
2846  */
2847 static enum ice_status
2848 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
2849 			  u16 vsig)
2850 {
2851 	struct ice_xlt2_section *sect;
2852 	struct ice_buf_build *bld;
2853 	enum ice_status status;
2854 
2855 	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
2856 					       ice_struct_size(sect, value, 1),
2857 					       (void **)&sect);
2858 	if (!bld)
2859 		return ICE_ERR_NO_MEMORY;
2860 
2861 	sect->count = CPU_TO_LE16(1);
2862 	sect->offset = CPU_TO_LE16(vsi);
2863 	sect->value[0] = CPU_TO_LE16(vsig);
2864 
2865 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2866 
2867 	ice_pkg_buf_free(hw, bld);
2868 
2869 	return status;
2870 }
2871 
2872 /**
2873  * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
2874  * @hw: pointer to the hardware structure
2875  * @blk: HW block
2876  *
2877  * This function will update the XLT2 hardware table with the input VSI
2878  * group configuration of used vsis.
2879  */
2880 enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
2881 {
2882 	u16 vsi;
2883 
2884 	for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
2885 		/* update only vsis that have been changed */
2886 		if (hw->blk[blk].xlt2.vsis[vsi].changed) {
2887 			enum ice_status status;
2888 			u16 vsig;
2889 
2890 			vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2891 			status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
2892 			if (status)
2893 				return status;
2894 
2895 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
2896 		}
2897 	}
2898 
2899 	return ICE_SUCCESS;
2900 }
2901 
2902 /**
2903  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2904  * @hw: pointer to the hardware structure
2905  * @blk: HW block
2906  * @vsi: VSI of interest
2907  * @vsig: pointer to receive the VSI group
2908  *
2909  * This function will lookup the VSI entry in the XLT2 list and return
2910  * the VSI group its associated with.
2911  */
2912 enum ice_status
2913 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2914 {
2915 	if (!vsig || vsi >= ICE_MAX_VSI)
2916 		return ICE_ERR_PARAM;
2917 
2918 	/* As long as there's a default or valid VSIG associated with the input
2919 	 * VSI, the functions returns a success. Any handling of VSIG will be
2920 	 * done by the following add, update or remove functions.
2921 	 */
2922 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2923 
2924 	return ICE_SUCCESS;
2925 }
2926 
2927 /**
2928  * ice_vsig_alloc_val - allocate a new VSIG by value
2929  * @hw: pointer to the hardware structure
2930  * @blk: HW block
2931  * @vsig: the VSIG to allocate
2932  *
2933  * This function will allocate a given VSIG specified by the VSIG parameter.
2934  */
2935 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2936 {
2937 	u16 idx = vsig & ICE_VSIG_IDX_M;
2938 
2939 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2940 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2941 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2942 	}
2943 
2944 	return ICE_VSIG_VALUE(idx, hw->pf_id);
2945 }
2946 
2947 /**
2948  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2949  * @hw: pointer to the hardware structure
2950  * @blk: HW block
2951  *
2952  * This function will iterate through the VSIG list and mark the first
2953  * unused entry for the new VSIG entry as used and return that value.
2954  */
2955 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2956 {
2957 	u16 i;
2958 
2959 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2960 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2961 			return ice_vsig_alloc_val(hw, blk, i);
2962 
2963 	return ICE_DEFAULT_VSIG;
2964 }
2965 
2966 /**
2967  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2968  * @hw: pointer to the hardware structure
2969  * @blk: HW block
2970  * @chs: characteristic list
2971  * @vsig: returns the VSIG with the matching profiles, if found
2972  *
2973  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2974  * a group have the same characteristic set. To check if there exists a VSIG
2975  * which has the same characteristics as the input characteristics; this
2976  * function will iterate through the XLT2 list and return the VSIG that has a
2977  * matching configuration. In order to make sure that priorities are accounted
2978  * for, the list must match exactly, including the order in which the
2979  * characteristics are listed.
2980  */
2981 static enum ice_status
2982 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2983 			struct LIST_HEAD_TYPE *chs, u16 *vsig)
2984 {
2985 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2986 	u16 i;
2987 
2988 	for (i = 0; i < xlt2->count; i++)
2989 		if (xlt2->vsig_tbl[i].in_use &&
2990 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2991 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2992 			return ICE_SUCCESS;
2993 		}
2994 
2995 	return ICE_ERR_DOES_NOT_EXIST;
2996 }
2997 
2998 /**
2999  * ice_vsig_free - free VSI group
3000  * @hw: pointer to the hardware structure
3001  * @blk: HW block
3002  * @vsig: VSIG to remove
3003  *
3004  * The function will remove all VSIs associated with the input VSIG and move
3005  * them to the DEFAULT_VSIG and mark the VSIG available.
3006  */
3007 static enum ice_status
3008 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3009 {
3010 	struct ice_vsig_prof *dtmp, *del;
3011 	struct ice_vsig_vsi *vsi_cur;
3012 	u16 idx;
3013 
3014 	idx = vsig & ICE_VSIG_IDX_M;
3015 	if (idx >= ICE_MAX_VSIGS)
3016 		return ICE_ERR_PARAM;
3017 
3018 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3019 		return ICE_ERR_DOES_NOT_EXIST;
3020 
3021 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
3022 
3023 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3024 	/* If the VSIG has at least 1 VSI then iterate through the
3025 	 * list and remove the VSIs before deleting the group.
3026 	 */
3027 	if (vsi_cur) {
3028 		/* remove all vsis associated with this VSIG XLT2 entry */
3029 		do {
3030 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
3031 
3032 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
3033 			vsi_cur->changed = 1;
3034 			vsi_cur->next_vsi = NULL;
3035 			vsi_cur = tmp;
3036 		} while (vsi_cur);
3037 
3038 		/* NULL terminate head of VSI list */
3039 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
3040 	}
3041 
3042 	/* free characteristic list */
3043 	LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
3044 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3045 				 ice_vsig_prof, list) {
3046 		LIST_DEL(&del->list);
3047 		ice_free(hw, del);
3048 	}
3049 
3050 	/* if VSIG characteristic list was cleared for reset
3051 	 * re-initialize the list head
3052 	 */
3053 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
3054 
3055 	return ICE_SUCCESS;
3056 }
3057 
3058 /**
3059  * ice_vsig_remove_vsi - remove VSI from VSIG
3060  * @hw: pointer to the hardware structure
3061  * @blk: HW block
3062  * @vsi: VSI to remove
3063  * @vsig: VSI group to remove from
3064  *
3065  * The function will remove the input VSI from its VSI group and move it
3066  * to the DEFAULT_VSIG.
3067  */
3068 static enum ice_status
3069 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3070 {
3071 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
3072 	u16 idx;
3073 
3074 	idx = vsig & ICE_VSIG_IDX_M;
3075 
3076 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3077 		return ICE_ERR_PARAM;
3078 
3079 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3080 		return ICE_ERR_DOES_NOT_EXIST;
3081 
3082 	/* entry already in default VSIG, don't have to remove */
3083 	if (idx == ICE_DEFAULT_VSIG)
3084 		return ICE_SUCCESS;
3085 
3086 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3087 	if (!(*vsi_head))
3088 		return ICE_ERR_CFG;
3089 
3090 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
3091 	vsi_cur = (*vsi_head);
3092 
3093 	/* iterate the VSI list, skip over the entry to be removed */
3094 	while (vsi_cur) {
3095 		if (vsi_tgt == vsi_cur) {
3096 			(*vsi_head) = vsi_cur->next_vsi;
3097 			break;
3098 		}
3099 		vsi_head = &vsi_cur->next_vsi;
3100 		vsi_cur = vsi_cur->next_vsi;
3101 	}
3102 
3103 	/* verify if VSI was removed from group list */
3104 	if (!vsi_cur)
3105 		return ICE_ERR_DOES_NOT_EXIST;
3106 
3107 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
3108 	vsi_cur->changed = 1;
3109 	vsi_cur->next_vsi = NULL;
3110 
3111 	return ICE_SUCCESS;
3112 }
3113 
3114 /**
3115  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
3116  * @hw: pointer to the hardware structure
3117  * @blk: HW block
3118  * @vsi: VSI to move
3119  * @vsig: destination VSI group
3120  *
3121  * This function will move or add the input VSI to the target VSIG.
3122  * The function will find the original VSIG the VSI belongs to and
3123  * move the entry to the DEFAULT_VSIG, update the original VSIG and
3124  * then move entry to the new VSIG.
3125  */
3126 static enum ice_status
3127 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3128 {
3129 	struct ice_vsig_vsi *tmp;
3130 	enum ice_status status;
3131 	u16 orig_vsig, idx;
3132 
3133 	idx = vsig & ICE_VSIG_IDX_M;
3134 
3135 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3136 		return ICE_ERR_PARAM;
3137 
3138 	/* if VSIG not in use and VSIG is not default type this VSIG
3139 	 * doesn't exist.
3140 	 */
3141 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
3142 	    vsig != ICE_DEFAULT_VSIG)
3143 		return ICE_ERR_DOES_NOT_EXIST;
3144 
3145 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3146 	if (status)
3147 		return status;
3148 
3149 	/* no update required if vsigs match */
3150 	if (orig_vsig == vsig)
3151 		return ICE_SUCCESS;
3152 
3153 	if (orig_vsig != ICE_DEFAULT_VSIG) {
3154 		/* remove entry from orig_vsig and add to default VSIG */
3155 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
3156 		if (status)
3157 			return status;
3158 	}
3159 
3160 	if (idx == ICE_DEFAULT_VSIG)
3161 		return ICE_SUCCESS;
3162 
3163 	/* Create VSI entry and add VSIG and prop_mask values */
3164 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
3165 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
3166 
3167 	/* Add new entry to the head of the VSIG list */
3168 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3169 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
3170 		&hw->blk[blk].xlt2.vsis[vsi];
3171 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
3172 	hw->blk[blk].xlt2.t[vsi] = vsig;
3173 
3174 	return ICE_SUCCESS;
3175 }
3176 
3177 /**
3178  * ice_find_prof_id - find profile ID for a given field vector
3179  * @hw: pointer to the hardware structure
3180  * @blk: HW block
3181  * @fv: field vector to search for
3182  * @prof_id: receives the profile ID
3183  */
3184 static enum ice_status
3185 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
3186 		 struct ice_fv_word *fv, u8 *prof_id)
3187 {
3188 	struct ice_es *es = &hw->blk[blk].es;
3189 	u16 off;
3190 	u8 i;
3191 
3192 	for (i = 0; i < (u8)es->count; i++) {
3193 		off = i * es->fvw;
3194 
3195 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
3196 			continue;
3197 
3198 		*prof_id = i;
3199 		return ICE_SUCCESS;
3200 	}
3201 
3202 	return ICE_ERR_DOES_NOT_EXIST;
3203 }
3204 
3205 /**
3206  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
3207  * @blk: the block type
3208  * @rsrc_type: pointer to variable to receive the resource type
3209  */
3210 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3211 {
3212 	switch (blk) {
3213 	case ICE_BLK_RSS:
3214 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
3215 		break;
3216 	case ICE_BLK_PE:
3217 		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
3218 		break;
3219 	default:
3220 		return false;
3221 	}
3222 	return true;
3223 }
3224 
3225 /**
3226  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
3227  * @blk: the block type
3228  * @rsrc_type: pointer to variable to receive the resource type
3229  */
3230 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3231 {
3232 	switch (blk) {
3233 	case ICE_BLK_RSS:
3234 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
3235 		break;
3236 	case ICE_BLK_PE:
3237 		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
3238 		break;
3239 	default:
3240 		return false;
3241 	}
3242 	return true;
3243 }
3244 
3245 /**
3246  * ice_alloc_tcam_ent - allocate hardware TCAM entry
3247  * @hw: pointer to the HW struct
3248  * @blk: the block to allocate the TCAM for
3249  * @btm: true to allocate from bottom of table, false to allocate from top
3250  * @tcam_idx: pointer to variable to receive the TCAM entry
3251  *
3252  * This function allocates a new entry in a Profile ID TCAM for a specific
3253  * block.
3254  */
3255 static enum ice_status
3256 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3257 		   u16 *tcam_idx)
3258 {
3259 	u16 res_type;
3260 
3261 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3262 		return ICE_ERR_PARAM;
3263 
3264 	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3265 }
3266 
3267 /**
3268  * ice_free_tcam_ent - free hardware TCAM entry
3269  * @hw: pointer to the HW struct
3270  * @blk: the block from which to free the TCAM entry
3271  * @tcam_idx: the TCAM entry to free
3272  *
3273  * This function frees an entry in a Profile ID TCAM for a specific block.
3274  */
3275 static enum ice_status
3276 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3277 {
3278 	u16 res_type;
3279 
3280 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3281 		return ICE_ERR_PARAM;
3282 
3283 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3284 }
3285 
3286 /**
3287  * ice_alloc_prof_id - allocate profile ID
3288  * @hw: pointer to the HW struct
3289  * @blk: the block to allocate the profile ID for
3290  * @prof_id: pointer to variable to receive the profile ID
3291  *
3292  * This function allocates a new profile ID, which also corresponds to a Field
3293  * Vector (Extraction Sequence) entry.
3294  */
3295 static enum ice_status
3296 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3297 {
3298 	enum ice_status status;
3299 	u16 res_type;
3300 	u16 get_prof;
3301 
3302 	if (!ice_prof_id_rsrc_type(blk, &res_type))
3303 		return ICE_ERR_PARAM;
3304 
3305 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3306 	if (!status)
3307 		*prof_id = (u8)get_prof;
3308 
3309 	return status;
3310 }
3311 
3312 /**
3313  * ice_free_prof_id - free profile ID
3314  * @hw: pointer to the HW struct
3315  * @blk: the block from which to free the profile ID
3316  * @prof_id: the profile ID to free
3317  *
3318  * This function frees a profile ID, which also corresponds to a Field Vector.
3319  */
3320 static enum ice_status
3321 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3322 {
3323 	u16 tmp_prof_id = (u16)prof_id;
3324 	u16 res_type;
3325 
3326 	if (!ice_prof_id_rsrc_type(blk, &res_type))
3327 		return ICE_ERR_PARAM;
3328 
3329 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3330 }
3331 
3332 /**
3333  * ice_prof_inc_ref - increment reference count for profile
3334  * @hw: pointer to the HW struct
3335  * @blk: the block from which to free the profile ID
3336  * @prof_id: the profile ID for which to increment the reference count
3337  */
3338 static enum ice_status
3339 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3340 {
3341 	if (prof_id > hw->blk[blk].es.count)
3342 		return ICE_ERR_PARAM;
3343 
3344 	hw->blk[blk].es.ref_count[prof_id]++;
3345 
3346 	return ICE_SUCCESS;
3347 }
3348 
3349 /**
3350  * ice_write_es - write an extraction sequence to hardware
3351  * @hw: pointer to the HW struct
3352  * @blk: the block in which to write the extraction sequence
3353  * @prof_id: the profile ID to write
3354  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3355  */
3356 static void
3357 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3358 	     struct ice_fv_word *fv)
3359 {
3360 	u16 off;
3361 
3362 	off = prof_id * hw->blk[blk].es.fvw;
3363 	if (!fv) {
3364 		ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3365 			   sizeof(*fv), ICE_NONDMA_MEM);
3366 		hw->blk[blk].es.written[prof_id] = false;
3367 	} else {
3368 		ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3369 			   sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3370 	}
3371 }
3372 
3373 /**
3374  * ice_prof_dec_ref - decrement reference count for profile
3375  * @hw: pointer to the HW struct
3376  * @blk: the block from which to free the profile ID
3377  * @prof_id: the profile ID for which to decrement the reference count
3378  */
3379 static enum ice_status
3380 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3381 {
3382 	if (prof_id > hw->blk[blk].es.count)
3383 		return ICE_ERR_PARAM;
3384 
3385 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3386 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
3387 			ice_write_es(hw, blk, prof_id, NULL);
3388 			return ice_free_prof_id(hw, blk, prof_id);
3389 		}
3390 	}
3391 
3392 	return ICE_SUCCESS;
3393 }
3394 
3395 /* Block / table section IDs */
3396 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3397 	/* SWITCH */
3398 	{	ICE_SID_XLT1_SW,
3399 		ICE_SID_XLT2_SW,
3400 		ICE_SID_PROFID_TCAM_SW,
3401 		ICE_SID_PROFID_REDIR_SW,
3402 		ICE_SID_FLD_VEC_SW
3403 	},
3404 
3405 	/* ACL */
3406 	{	ICE_SID_XLT1_ACL,
3407 		ICE_SID_XLT2_ACL,
3408 		ICE_SID_PROFID_TCAM_ACL,
3409 		ICE_SID_PROFID_REDIR_ACL,
3410 		ICE_SID_FLD_VEC_ACL
3411 	},
3412 
3413 	/* FD */
3414 	{	ICE_SID_XLT1_FD,
3415 		ICE_SID_XLT2_FD,
3416 		ICE_SID_PROFID_TCAM_FD,
3417 		ICE_SID_PROFID_REDIR_FD,
3418 		ICE_SID_FLD_VEC_FD
3419 	},
3420 
3421 	/* RSS */
3422 	{	ICE_SID_XLT1_RSS,
3423 		ICE_SID_XLT2_RSS,
3424 		ICE_SID_PROFID_TCAM_RSS,
3425 		ICE_SID_PROFID_REDIR_RSS,
3426 		ICE_SID_FLD_VEC_RSS
3427 	},
3428 
3429 	/* PE */
3430 	{	ICE_SID_XLT1_PE,
3431 		ICE_SID_XLT2_PE,
3432 		ICE_SID_PROFID_TCAM_PE,
3433 		ICE_SID_PROFID_REDIR_PE,
3434 		ICE_SID_FLD_VEC_PE
3435 	}
3436 };
3437 
3438 /**
3439  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3440  * @hw: pointer to the hardware structure
3441  * @blk: the HW block to initialize
3442  */
3443 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3444 {
3445 	u16 pt;
3446 
3447 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3448 		u8 ptg;
3449 
3450 		ptg = hw->blk[blk].xlt1.t[pt];
3451 		if (ptg != ICE_DEFAULT_PTG) {
3452 			ice_ptg_alloc_val(hw, blk, ptg);
3453 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3454 		}
3455 	}
3456 }
3457 
3458 /**
3459  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3460  * @hw: pointer to the hardware structure
3461  * @blk: the HW block to initialize
3462  */
3463 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3464 {
3465 	u16 vsi;
3466 
3467 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3468 		u16 vsig;
3469 
3470 		vsig = hw->blk[blk].xlt2.t[vsi];
3471 		if (vsig) {
3472 			ice_vsig_alloc_val(hw, blk, vsig);
3473 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3474 			/* no changes at this time, since this has been
3475 			 * initialized from the original package
3476 			 */
3477 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3478 		}
3479 	}
3480 }
3481 
3482 /**
3483  * ice_init_sw_db - init software database from HW tables
3484  * @hw: pointer to the hardware structure
3485  */
3486 static void ice_init_sw_db(struct ice_hw *hw)
3487 {
3488 	u16 i;
3489 
3490 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3491 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3492 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3493 	}
3494 }
3495 
3496 /**
3497  * ice_fill_tbl - Reads content of a single table type into database
3498  * @hw: pointer to the hardware structure
3499  * @block_id: Block ID of the table to copy
3500  * @sid: Section ID of the table to copy
3501  *
3502  * Will attempt to read the entire content of a given table of a single block
3503  * into the driver database. We assume that the buffer will always
3504  * be as large or larger than the data contained in the package. If
3505  * this condition is not met, there is most likely an error in the package
3506  * contents.
3507  */
3508 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3509 {
3510 	u32 dst_len, sect_len, offset = 0;
3511 	struct ice_prof_redir_section *pr;
3512 	struct ice_prof_id_section *pid;
3513 	struct ice_xlt1_section *xlt1;
3514 	struct ice_xlt2_section *xlt2;
3515 	struct ice_sw_fv_section *es;
3516 	struct ice_pkg_enum state;
3517 	u8 *src, *dst;
3518 	void *sect;
3519 
3520 	/* if the HW segment pointer is null then the first iteration of
3521 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
3522 	 * not be filled and return success.
3523 	 */
3524 	if (!hw->seg) {
3525 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3526 		return;
3527 	}
3528 
3529 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3530 
3531 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
3532 
3533 	while (sect) {
3534 		switch (sid) {
3535 		case ICE_SID_XLT1_SW:
3536 		case ICE_SID_XLT1_FD:
3537 		case ICE_SID_XLT1_RSS:
3538 		case ICE_SID_XLT1_ACL:
3539 		case ICE_SID_XLT1_PE:
3540 			xlt1 = (struct ice_xlt1_section *)sect;
3541 			src = xlt1->value;
3542 			sect_len = LE16_TO_CPU(xlt1->count) *
3543 				sizeof(*hw->blk[block_id].xlt1.t);
3544 			dst = hw->blk[block_id].xlt1.t;
3545 			dst_len = hw->blk[block_id].xlt1.count *
3546 				sizeof(*hw->blk[block_id].xlt1.t);
3547 			break;
3548 		case ICE_SID_XLT2_SW:
3549 		case ICE_SID_XLT2_FD:
3550 		case ICE_SID_XLT2_RSS:
3551 		case ICE_SID_XLT2_ACL:
3552 		case ICE_SID_XLT2_PE:
3553 			xlt2 = (struct ice_xlt2_section *)sect;
3554 			src = (_FORCE_ u8 *)xlt2->value;
3555 			sect_len = LE16_TO_CPU(xlt2->count) *
3556 				sizeof(*hw->blk[block_id].xlt2.t);
3557 			dst = (u8 *)hw->blk[block_id].xlt2.t;
3558 			dst_len = hw->blk[block_id].xlt2.count *
3559 				sizeof(*hw->blk[block_id].xlt2.t);
3560 			break;
3561 		case ICE_SID_PROFID_TCAM_SW:
3562 		case ICE_SID_PROFID_TCAM_FD:
3563 		case ICE_SID_PROFID_TCAM_RSS:
3564 		case ICE_SID_PROFID_TCAM_ACL:
3565 		case ICE_SID_PROFID_TCAM_PE:
3566 			pid = (struct ice_prof_id_section *)sect;
3567 			src = (u8 *)pid->entry;
3568 			sect_len = LE16_TO_CPU(pid->count) *
3569 				sizeof(*hw->blk[block_id].prof.t);
3570 			dst = (u8 *)hw->blk[block_id].prof.t;
3571 			dst_len = hw->blk[block_id].prof.count *
3572 				sizeof(*hw->blk[block_id].prof.t);
3573 			break;
3574 		case ICE_SID_PROFID_REDIR_SW:
3575 		case ICE_SID_PROFID_REDIR_FD:
3576 		case ICE_SID_PROFID_REDIR_RSS:
3577 		case ICE_SID_PROFID_REDIR_ACL:
3578 		case ICE_SID_PROFID_REDIR_PE:
3579 			pr = (struct ice_prof_redir_section *)sect;
3580 			src = pr->redir_value;
3581 			sect_len = LE16_TO_CPU(pr->count) *
3582 				sizeof(*hw->blk[block_id].prof_redir.t);
3583 			dst = hw->blk[block_id].prof_redir.t;
3584 			dst_len = hw->blk[block_id].prof_redir.count *
3585 				sizeof(*hw->blk[block_id].prof_redir.t);
3586 			break;
3587 		case ICE_SID_FLD_VEC_SW:
3588 		case ICE_SID_FLD_VEC_FD:
3589 		case ICE_SID_FLD_VEC_RSS:
3590 		case ICE_SID_FLD_VEC_ACL:
3591 		case ICE_SID_FLD_VEC_PE:
3592 			es = (struct ice_sw_fv_section *)sect;
3593 			src = (u8 *)es->fv;
3594 			sect_len = (u32)(LE16_TO_CPU(es->count) *
3595 					 hw->blk[block_id].es.fvw) *
3596 				sizeof(*hw->blk[block_id].es.t);
3597 			dst = (u8 *)hw->blk[block_id].es.t;
3598 			dst_len = (u32)(hw->blk[block_id].es.count *
3599 					hw->blk[block_id].es.fvw) *
3600 				sizeof(*hw->blk[block_id].es.t);
3601 			break;
3602 		default:
3603 			return;
3604 		}
3605 
3606 		/* if the section offset exceeds destination length, terminate
3607 		 * table fill.
3608 		 */
3609 		if (offset > dst_len)
3610 			return;
3611 
3612 		/* if the sum of section size and offset exceed destination size
3613 		 * then we are out of bounds of the HW table size for that PF.
3614 		 * Changing section length to fill the remaining table space
3615 		 * of that PF.
3616 		 */
3617 		if ((offset + sect_len) > dst_len)
3618 			sect_len = dst_len - offset;
3619 
3620 		ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3621 		offset += sect_len;
3622 		sect = ice_pkg_enum_section(NULL, &state, sid);
3623 	}
3624 }
3625 
3626 /**
3627  * ice_fill_blk_tbls - Read package context for tables
3628  * @hw: pointer to the hardware structure
3629  *
3630  * Reads the current package contents and populates the driver
3631  * database with the data iteratively for all advanced feature
3632  * blocks. Assume that the HW tables have been allocated.
3633  */
3634 void ice_fill_blk_tbls(struct ice_hw *hw)
3635 {
3636 	u8 i;
3637 
3638 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3639 		enum ice_block blk_id = (enum ice_block)i;
3640 
3641 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3642 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3643 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3644 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3645 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3646 	}
3647 
3648 	ice_init_sw_db(hw);
3649 }
3650 
3651 /**
3652  * ice_free_prof_map - free profile map
3653  * @hw: pointer to the hardware structure
3654  * @blk_idx: HW block index
3655  */
3656 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3657 {
3658 	struct ice_es *es = &hw->blk[blk_idx].es;
3659 	struct ice_prof_map *del, *tmp;
3660 
3661 	ice_acquire_lock(&es->prof_map_lock);
3662 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3663 				 ice_prof_map, list) {
3664 		LIST_DEL(&del->list);
3665 		ice_free(hw, del);
3666 	}
3667 	INIT_LIST_HEAD(&es->prof_map);
3668 	ice_release_lock(&es->prof_map_lock);
3669 }
3670 
3671 /**
3672  * ice_free_flow_profs - free flow profile entries
3673  * @hw: pointer to the hardware structure
3674  * @blk_idx: HW block index
3675  */
3676 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3677 {
3678 	struct ice_flow_prof *p, *tmp;
3679 
3680 	ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
3681 	LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
3682 				 ice_flow_prof, l_entry) {
3683 		LIST_DEL(&p->l_entry);
3684 
3685 		ice_free(hw, p);
3686 	}
3687 	ice_release_lock(&hw->fl_profs_locks[blk_idx]);
3688 
3689 	/* if driver is in reset and tables are being cleared
3690 	 * re-initialize the flow profile list heads
3691 	 */
3692 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3693 }
3694 
3695 /**
3696  * ice_free_vsig_tbl - free complete VSIG table entries
3697  * @hw: pointer to the hardware structure
3698  * @blk: the HW block on which to free the VSIG table entries
3699  */
3700 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3701 {
3702 	u16 i;
3703 
3704 	if (!hw->blk[blk].xlt2.vsig_tbl)
3705 		return;
3706 
3707 	for (i = 1; i < ICE_MAX_VSIGS; i++)
3708 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3709 			ice_vsig_free(hw, blk, i);
3710 }
3711 
3712 /**
3713  * ice_free_hw_tbls - free hardware table memory
3714  * @hw: pointer to the hardware structure
3715  */
3716 void ice_free_hw_tbls(struct ice_hw *hw)
3717 {
3718 	struct ice_rss_cfg *r, *rt;
3719 	u8 i;
3720 
3721 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3722 		if (hw->blk[i].is_list_init) {
3723 			struct ice_es *es = &hw->blk[i].es;
3724 
3725 			ice_free_prof_map(hw, i);
3726 			ice_destroy_lock(&es->prof_map_lock);
3727 
3728 			ice_free_flow_profs(hw, i);
3729 			ice_destroy_lock(&hw->fl_profs_locks[i]);
3730 
3731 			hw->blk[i].is_list_init = false;
3732 		}
3733 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3734 		ice_free(hw, hw->blk[i].xlt1.ptypes);
3735 		ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
3736 		ice_free(hw, hw->blk[i].xlt1.t);
3737 		ice_free(hw, hw->blk[i].xlt2.t);
3738 		ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
3739 		ice_free(hw, hw->blk[i].xlt2.vsis);
3740 		ice_free(hw, hw->blk[i].prof.t);
3741 		ice_free(hw, hw->blk[i].prof_redir.t);
3742 		ice_free(hw, hw->blk[i].es.t);
3743 		ice_free(hw, hw->blk[i].es.ref_count);
3744 		ice_free(hw, hw->blk[i].es.written);
3745 	}
3746 
3747 	LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
3748 				 ice_rss_cfg, l_entry) {
3749 		LIST_DEL(&r->l_entry);
3750 		ice_free(hw, r);
3751 	}
3752 	ice_destroy_lock(&hw->rss_locks);
3753 	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
3754 }
3755 
3756 /**
3757  * ice_init_flow_profs - init flow profile locks and list heads
3758  * @hw: pointer to the hardware structure
3759  * @blk_idx: HW block index
3760  */
3761 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3762 {
3763 	ice_init_lock(&hw->fl_profs_locks[blk_idx]);
3764 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3765 }
3766 
3767 /**
3768  * ice_clear_hw_tbls - clear HW tables and flow profiles
3769  * @hw: pointer to the hardware structure
3770  */
3771 void ice_clear_hw_tbls(struct ice_hw *hw)
3772 {
3773 	u8 i;
3774 
3775 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3776 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3777 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3778 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3779 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3780 		struct ice_es *es = &hw->blk[i].es;
3781 
3782 		if (hw->blk[i].is_list_init) {
3783 			ice_free_prof_map(hw, i);
3784 			ice_free_flow_profs(hw, i);
3785 		}
3786 
3787 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3788 
3789 		ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
3790 			   ICE_NONDMA_MEM);
3791 		ice_memset(xlt1->ptg_tbl, 0,
3792 			   ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
3793 			   ICE_NONDMA_MEM);
3794 		ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
3795 			   ICE_NONDMA_MEM);
3796 
3797 		ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
3798 			   ICE_NONDMA_MEM);
3799 		ice_memset(xlt2->vsig_tbl, 0,
3800 			   xlt2->count * sizeof(*xlt2->vsig_tbl),
3801 			   ICE_NONDMA_MEM);
3802 		ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
3803 			   ICE_NONDMA_MEM);
3804 
3805 		ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
3806 			   ICE_NONDMA_MEM);
3807 		ice_memset(prof_redir->t, 0,
3808 			   prof_redir->count * sizeof(*prof_redir->t),
3809 			   ICE_NONDMA_MEM);
3810 
3811 		ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
3812 			   ICE_NONDMA_MEM);
3813 		ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
3814 			   ICE_NONDMA_MEM);
3815 		ice_memset(es->written, 0, es->count * sizeof(*es->written),
3816 			   ICE_NONDMA_MEM);
3817 	}
3818 }
3819 
3820 /**
3821  * ice_init_hw_tbls - init hardware table memory
3822  * @hw: pointer to the hardware structure
3823  */
3824 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3825 {
3826 	u8 i;
3827 
3828 	ice_init_lock(&hw->rss_locks);
3829 	INIT_LIST_HEAD(&hw->rss_list_head);
3830 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3831 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3832 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3833 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3834 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3835 		struct ice_es *es = &hw->blk[i].es;
3836 		u16 j;
3837 
3838 		if (hw->blk[i].is_list_init)
3839 			continue;
3840 
3841 		ice_init_flow_profs(hw, i);
3842 		ice_init_lock(&es->prof_map_lock);
3843 		INIT_LIST_HEAD(&es->prof_map);
3844 		hw->blk[i].is_list_init = true;
3845 
3846 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
3847 		es->reverse = blk_sizes[i].reverse;
3848 
3849 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3850 		xlt1->count = blk_sizes[i].xlt1;
3851 
3852 		xlt1->ptypes = (struct ice_ptg_ptype *)
3853 			ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
3854 
3855 		if (!xlt1->ptypes)
3856 			goto err;
3857 
3858 		xlt1->ptg_tbl = (struct ice_ptg_entry *)
3859 			ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
3860 
3861 		if (!xlt1->ptg_tbl)
3862 			goto err;
3863 
3864 		xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
3865 		if (!xlt1->t)
3866 			goto err;
3867 
3868 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3869 		xlt2->count = blk_sizes[i].xlt2;
3870 
3871 		xlt2->vsis = (struct ice_vsig_vsi *)
3872 			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
3873 
3874 		if (!xlt2->vsis)
3875 			goto err;
3876 
3877 		xlt2->vsig_tbl = (struct ice_vsig_entry *)
3878 			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
3879 		if (!xlt2->vsig_tbl)
3880 			goto err;
3881 
3882 		for (j = 0; j < xlt2->count; j++)
3883 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3884 
3885 		xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
3886 		if (!xlt2->t)
3887 			goto err;
3888 
3889 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3890 		prof->count = blk_sizes[i].prof_tcam;
3891 		prof->max_prof_id = blk_sizes[i].prof_id;
3892 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3893 		prof->t = (struct ice_prof_tcam_entry *)
3894 			ice_calloc(hw, prof->count, sizeof(*prof->t));
3895 
3896 		if (!prof->t)
3897 			goto err;
3898 
3899 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3900 		prof_redir->count = blk_sizes[i].prof_redir;
3901 		prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
3902 						 sizeof(*prof_redir->t));
3903 
3904 		if (!prof_redir->t)
3905 			goto err;
3906 
3907 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3908 		es->count = blk_sizes[i].es;
3909 		es->fvw = blk_sizes[i].fvw;
3910 		es->t = (struct ice_fv_word *)
3911 			ice_calloc(hw, (u32)(es->count * es->fvw),
3912 				   sizeof(*es->t));
3913 		if (!es->t)
3914 			goto err;
3915 
3916 		es->ref_count = (u16 *)
3917 			ice_calloc(hw, es->count, sizeof(*es->ref_count));
3918 
3919 		if (!es->ref_count)
3920 			goto err;
3921 
3922 		es->written = (u8 *)
3923 			ice_calloc(hw, es->count, sizeof(*es->written));
3924 
3925 		if (!es->written)
3926 			goto err;
3927 
3928 	}
3929 	return ICE_SUCCESS;
3930 
3931 err:
3932 	ice_free_hw_tbls(hw);
3933 	return ICE_ERR_NO_MEMORY;
3934 }
3935 
3936 /**
3937  * ice_prof_gen_key - generate profile ID key
3938  * @hw: pointer to the HW struct
3939  * @blk: the block in which to write profile ID to
3940  * @ptg: packet type group (PTG) portion of key
3941  * @vsig: VSIG portion of key
3942  * @cdid: CDID portion of key
3943  * @flags: flag portion of key
3944  * @vl_msk: valid mask
3945  * @dc_msk: don't care mask
3946  * @nm_msk: never match mask
3947  * @key: output of profile ID key
3948  */
3949 static enum ice_status
3950 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3951 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3952 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3953 		 u8 key[ICE_TCAM_KEY_SZ])
3954 {
3955 	struct ice_prof_id_key inkey;
3956 
3957 	inkey.xlt1 = ptg;
3958 	inkey.xlt2_cdid = CPU_TO_LE16(vsig);
3959 	inkey.flags = CPU_TO_LE16(flags);
3960 
3961 	switch (hw->blk[blk].prof.cdid_bits) {
3962 	case 0:
3963 		break;
3964 	case 2:
3965 #define ICE_CD_2_M 0xC000U
3966 #define ICE_CD_2_S 14
3967 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
3968 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
3969 		break;
3970 	case 4:
3971 #define ICE_CD_4_M 0xF000U
3972 #define ICE_CD_4_S 12
3973 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
3974 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
3975 		break;
3976 	case 8:
3977 #define ICE_CD_8_M 0xFF00U
3978 #define ICE_CD_8_S 16
3979 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
3980 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
3981 		break;
3982 	default:
3983 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3984 		break;
3985 	}
3986 
3987 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3988 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3989 }
3990 
3991 /**
3992  * ice_tcam_write_entry - write TCAM entry
3993  * @hw: pointer to the HW struct
3994  * @blk: the block in which to write profile ID to
3995  * @idx: the entry index to write to
3996  * @prof_id: profile ID
3997  * @ptg: packet type group (PTG) portion of key
3998  * @vsig: VSIG portion of key
3999  * @cdid: CDID portion of key
4000  * @flags: flag portion of key
4001  * @vl_msk: valid mask
4002  * @dc_msk: don't care mask
4003  * @nm_msk: never match mask
4004  */
4005 static enum ice_status
4006 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4007 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4008 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4009 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4010 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4011 {
4012 	struct ice_prof_tcam_entry;
4013 	enum ice_status status;
4014 
4015 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4016 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4017 	if (!status) {
4018 		hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
4019 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
4020 	}
4021 
4022 	return status;
4023 }
4024 
4025 /**
4026  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
4027  * @hw: pointer to the hardware structure
4028  * @blk: HW block
4029  * @vsig: VSIG to query
4030  * @refs: pointer to variable to receive the reference count
4031  */
4032 static enum ice_status
4033 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4034 {
4035 	u16 idx = vsig & ICE_VSIG_IDX_M;
4036 	struct ice_vsig_vsi *ptr;
4037 
4038 	*refs = 0;
4039 
4040 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4041 		return ICE_ERR_DOES_NOT_EXIST;
4042 
4043 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4044 	while (ptr) {
4045 		(*refs)++;
4046 		ptr = ptr->next_vsi;
4047 	}
4048 
4049 	return ICE_SUCCESS;
4050 }
4051 
4052 /**
4053  * ice_has_prof_vsig - check to see if VSIG has a specific profile
4054  * @hw: pointer to the hardware structure
4055  * @blk: HW block
4056  * @vsig: VSIG to check against
4057  * @hdl: profile handle
4058  */
4059 static bool
4060 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4061 {
4062 	u16 idx = vsig & ICE_VSIG_IDX_M;
4063 	struct ice_vsig_prof *ent;
4064 
4065 	LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4066 			    ice_vsig_prof, list)
4067 		if (ent->profile_cookie == hdl)
4068 			return true;
4069 
4070 	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4071 		  vsig);
4072 	return false;
4073 }
4074 
4075 /**
4076  * ice_prof_bld_es - build profile ID extraction sequence changes
4077  * @hw: pointer to the HW struct
4078  * @blk: hardware block
4079  * @bld: the update package buffer build to add to
4080  * @chgs: the list of changes to make in hardware
4081  */
4082 static enum ice_status
4083 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4084 		struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4085 {
4086 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4087 	struct ice_chs_chg *tmp;
4088 
4089 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4090 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4091 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4092 			struct ice_pkg_es *p;
4093 			u32 id;
4094 
4095 			id = ice_sect_id(blk, ICE_VEC_TBL);
4096 			p = (struct ice_pkg_es *)
4097 				ice_pkg_buf_alloc_section(bld, id,
4098 							  ice_struct_size(p, es,
4099 									  1) +
4100 							  vec_size -
4101 							  sizeof(p->es[0]));
4102 
4103 			if (!p)
4104 				return ICE_ERR_MAX_LIMIT;
4105 
4106 			p->count = CPU_TO_LE16(1);
4107 			p->offset = CPU_TO_LE16(tmp->prof_id);
4108 
4109 			ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
4110 				   ICE_NONDMA_TO_NONDMA);
4111 		}
4112 
4113 	return ICE_SUCCESS;
4114 }
4115 
4116 /**
4117  * ice_prof_bld_tcam - build profile ID TCAM changes
4118  * @hw: pointer to the HW struct
4119  * @blk: hardware block
4120  * @bld: the update package buffer build to add to
4121  * @chgs: the list of changes to make in hardware
4122  */
4123 static enum ice_status
4124 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4125 		  struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4126 {
4127 	struct ice_chs_chg *tmp;
4128 
4129 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4130 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4131 			struct ice_prof_id_section *p;
4132 			u32 id;
4133 
4134 			id = ice_sect_id(blk, ICE_PROF_TCAM);
4135 			p = (struct ice_prof_id_section *)
4136 				ice_pkg_buf_alloc_section(bld, id,
4137 							  ice_struct_size(p,
4138 									  entry,
4139 									  1));
4140 
4141 			if (!p)
4142 				return ICE_ERR_MAX_LIMIT;
4143 
4144 			p->count = CPU_TO_LE16(1);
4145 			p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
4146 			p->entry[0].prof_id = tmp->prof_id;
4147 
4148 			ice_memcpy(p->entry[0].key,
4149 				   &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4150 				   sizeof(hw->blk[blk].prof.t->key),
4151 				   ICE_NONDMA_TO_NONDMA);
4152 		}
4153 
4154 	return ICE_SUCCESS;
4155 }
4156 
4157 /**
4158  * ice_prof_bld_xlt1 - build XLT1 changes
4159  * @blk: hardware block
4160  * @bld: the update package buffer build to add to
4161  * @chgs: the list of changes to make in hardware
4162  */
4163 static enum ice_status
4164 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4165 		  struct LIST_HEAD_TYPE *chgs)
4166 {
4167 	struct ice_chs_chg *tmp;
4168 
4169 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4170 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4171 			struct ice_xlt1_section *p;
4172 			u32 id;
4173 
4174 			id = ice_sect_id(blk, ICE_XLT1);
4175 			p = (struct ice_xlt1_section *)
4176 				ice_pkg_buf_alloc_section(bld, id,
4177 							  ice_struct_size(p,
4178 									  value,
4179 									  1));
4180 
4181 			if (!p)
4182 				return ICE_ERR_MAX_LIMIT;
4183 
4184 			p->count = CPU_TO_LE16(1);
4185 			p->offset = CPU_TO_LE16(tmp->ptype);
4186 			p->value[0] = tmp->ptg;
4187 		}
4188 
4189 	return ICE_SUCCESS;
4190 }
4191 
4192 /**
4193  * ice_prof_bld_xlt2 - build XLT2 changes
4194  * @blk: hardware block
4195  * @bld: the update package buffer build to add to
4196  * @chgs: the list of changes to make in hardware
4197  */
4198 static enum ice_status
4199 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4200 		  struct LIST_HEAD_TYPE *chgs)
4201 {
4202 	struct ice_chs_chg *tmp;
4203 
4204 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4205 		struct ice_xlt2_section *p;
4206 		u32 id;
4207 
4208 		switch (tmp->type) {
4209 		case ICE_VSIG_ADD:
4210 		case ICE_VSI_MOVE:
4211 		case ICE_VSIG_REM:
4212 			id = ice_sect_id(blk, ICE_XLT2);
4213 			p = (struct ice_xlt2_section *)
4214 				ice_pkg_buf_alloc_section(bld, id,
4215 							  ice_struct_size(p,
4216 									  value,
4217 									  1));
4218 
4219 			if (!p)
4220 				return ICE_ERR_MAX_LIMIT;
4221 
4222 			p->count = CPU_TO_LE16(1);
4223 			p->offset = CPU_TO_LE16(tmp->vsi);
4224 			p->value[0] = CPU_TO_LE16(tmp->vsig);
4225 			break;
4226 		default:
4227 			break;
4228 		}
4229 	}
4230 
4231 	return ICE_SUCCESS;
4232 }
4233 
4234 /**
4235  * ice_upd_prof_hw - update hardware using the change list
4236  * @hw: pointer to the HW struct
4237  * @blk: hardware block
4238  * @chgs: the list of changes to make in hardware
4239  */
4240 static enum ice_status
4241 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4242 		struct LIST_HEAD_TYPE *chgs)
4243 {
4244 	struct ice_buf_build *b;
4245 	struct ice_chs_chg *tmp;
4246 	enum ice_status status;
4247 	u16 pkg_sects;
4248 	u16 xlt1 = 0;
4249 	u16 xlt2 = 0;
4250 	u16 tcam = 0;
4251 	u16 es = 0;
4252 	u16 sects;
4253 
4254 	/* count number of sections we need */
4255 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4256 		switch (tmp->type) {
4257 		case ICE_PTG_ES_ADD:
4258 			if (tmp->add_ptg)
4259 				xlt1++;
4260 			if (tmp->add_prof)
4261 				es++;
4262 			break;
4263 		case ICE_TCAM_ADD:
4264 			tcam++;
4265 			break;
4266 		case ICE_VSIG_ADD:
4267 		case ICE_VSI_MOVE:
4268 		case ICE_VSIG_REM:
4269 			xlt2++;
4270 			break;
4271 		default:
4272 			break;
4273 		}
4274 	}
4275 	sects = xlt1 + xlt2 + tcam + es;
4276 
4277 	if (!sects)
4278 		return ICE_SUCCESS;
4279 
4280 	/* Build update package buffer */
4281 	b = ice_pkg_buf_alloc(hw);
4282 	if (!b)
4283 		return ICE_ERR_NO_MEMORY;
4284 
4285 	status = ice_pkg_buf_reserve_section(b, sects);
4286 	if (status)
4287 		goto error_tmp;
4288 
4289 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
4290 	if (es) {
4291 		status = ice_prof_bld_es(hw, blk, b, chgs);
4292 		if (status)
4293 			goto error_tmp;
4294 	}
4295 
4296 	if (tcam) {
4297 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
4298 		if (status)
4299 			goto error_tmp;
4300 	}
4301 
4302 	if (xlt1) {
4303 		status = ice_prof_bld_xlt1(blk, b, chgs);
4304 		if (status)
4305 			goto error_tmp;
4306 	}
4307 
4308 	if (xlt2) {
4309 		status = ice_prof_bld_xlt2(blk, b, chgs);
4310 		if (status)
4311 			goto error_tmp;
4312 	}
4313 
4314 	/* After package buffer build check if the section count in buffer is
4315 	 * non-zero and matches the number of sections detected for package
4316 	 * update.
4317 	 */
4318 	pkg_sects = ice_pkg_buf_get_active_sections(b);
4319 	if (!pkg_sects || pkg_sects != sects) {
4320 		status = ICE_ERR_INVAL_SIZE;
4321 		goto error_tmp;
4322 	}
4323 
4324 	/* update package */
4325 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4326 	if (status == ICE_ERR_AQ_ERROR)
4327 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4328 
4329 error_tmp:
4330 	ice_pkg_buf_free(hw, b);
4331 	return status;
4332 }
4333 
4334 /**
4335  * ice_add_prof - add profile
4336  * @hw: pointer to the HW struct
4337  * @blk: hardware block
4338  * @id: profile tracking ID
4339  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4340  * @es: extraction sequence (length of array is determined by the block)
4341  *
4342  * This function registers a profile, which matches a set of PTGs with a
4343  * particular extraction sequence. While the hardware profile is allocated
4344  * it will not be written until the first call to ice_add_flow that specifies
4345  * the ID value used here.
4346  */
4347 enum ice_status
4348 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4349 	     struct ice_fv_word *es)
4350 {
4351 	u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4352 	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4353 	struct ice_prof_map *prof;
4354 	enum ice_status status;
4355 	u8 byte = 0;
4356 	u8 prof_id;
4357 
4358 	ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4359 
4360 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4361 
4362 	/* search for existing profile */
4363 	status = ice_find_prof_id(hw, blk, es, &prof_id);
4364 	if (status) {
4365 		/* allocate profile ID */
4366 		status = ice_alloc_prof_id(hw, blk, &prof_id);
4367 		if (status)
4368 			goto err_ice_add_prof;
4369 
4370 		/* and write new es */
4371 		ice_write_es(hw, blk, prof_id, es);
4372 	}
4373 
4374 	ice_prof_inc_ref(hw, blk, prof_id);
4375 
4376 	/* add profile info */
4377 
4378 	prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4379 	if (!prof)
4380 		goto err_ice_add_prof;
4381 
4382 	prof->profile_cookie = id;
4383 	prof->prof_id = prof_id;
4384 	prof->ptg_cnt = 0;
4385 	prof->context = 0;
4386 
4387 	/* build list of ptgs */
4388 	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4389 		u8 bit;
4390 
4391 		if (!ptypes[byte]) {
4392 			bytes--;
4393 			byte++;
4394 			continue;
4395 		}
4396 
4397 		/* Examine 8 bits per byte */
4398 		ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
4399 				     BITS_PER_BYTE) {
4400 			u16 ptype;
4401 			u8 ptg;
4402 
4403 			ptype = byte * BITS_PER_BYTE + bit;
4404 
4405 			/* The package should place all ptypes in a non-zero
4406 			 * PTG, so the following call should never fail.
4407 			 */
4408 			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4409 				continue;
4410 
4411 			/* If PTG is already added, skip and continue */
4412 			if (ice_is_bit_set(ptgs_used, ptg))
4413 				continue;
4414 
4415 			ice_set_bit(ptg, ptgs_used);
4416 			prof->ptg[prof->ptg_cnt] = ptg;
4417 
4418 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4419 				break;
4420 		}
4421 
4422 		bytes--;
4423 		byte++;
4424 	}
4425 
4426 	LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4427 	status = ICE_SUCCESS;
4428 
4429 err_ice_add_prof:
4430 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4431 	return status;
4432 }
4433 
4434 /**
4435  * ice_search_prof_id - Search for a profile tracking ID
4436  * @hw: pointer to the HW struct
4437  * @blk: hardware block
4438  * @id: profile tracking ID
4439  *
4440  * This will search for a profile tracking ID which was previously added.
4441  * The profile map lock should be held before calling this function.
4442  */
4443 struct ice_prof_map *
4444 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4445 {
4446 	struct ice_prof_map *entry = NULL;
4447 	struct ice_prof_map *map;
4448 
4449 	LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
4450 		if (map->profile_cookie == id) {
4451 			entry = map;
4452 			break;
4453 		}
4454 
4455 	return entry;
4456 }
4457 
4458 /**
4459  * ice_set_prof_context - Set context for a given profile
4460  * @hw: pointer to the HW struct
4461  * @blk: hardware block
4462  * @id: profile tracking ID
4463  * @cntxt: context
4464  */
4465 enum ice_status
4466 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
4467 {
4468 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4469 	struct ice_prof_map *entry;
4470 
4471 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4472 	entry = ice_search_prof_id(hw, blk, id);
4473 	if (entry) {
4474 		entry->context = cntxt;
4475 		status = ICE_SUCCESS;
4476 	}
4477 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4478 	return status;
4479 }
4480 
4481 /**
4482  * ice_get_prof_context - Get context for a given profile
4483  * @hw: pointer to the HW struct
4484  * @blk: hardware block
4485  * @id: profile tracking ID
4486  * @cntxt: pointer to variable to receive the context
4487  */
4488 enum ice_status
4489 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
4490 {
4491 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4492 	struct ice_prof_map *entry;
4493 
4494 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4495 	entry = ice_search_prof_id(hw, blk, id);
4496 	if (entry) {
4497 		*cntxt = entry->context;
4498 		status = ICE_SUCCESS;
4499 	}
4500 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4501 	return status;
4502 }
4503 
4504 /**
4505  * ice_vsig_prof_id_count - count profiles in a VSIG
4506  * @hw: pointer to the HW struct
4507  * @blk: hardware block
4508  * @vsig: VSIG to remove the profile from
4509  */
4510 static u16
4511 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4512 {
4513 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4514 	struct ice_vsig_prof *p;
4515 
4516 	LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4517 			    ice_vsig_prof, list)
4518 		count++;
4519 
4520 	return count;
4521 }
4522 
4523 /**
4524  * ice_rel_tcam_idx - release a TCAM index
4525  * @hw: pointer to the HW struct
4526  * @blk: hardware block
4527  * @idx: the index to release
4528  */
4529 static enum ice_status
4530 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4531 {
4532 	/* Masks to invoke a never match entry */
4533 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4534 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4535 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4536 	enum ice_status status;
4537 
4538 	/* write the TCAM entry */
4539 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4540 				      dc_msk, nm_msk);
4541 	if (status)
4542 		return status;
4543 
4544 	/* release the TCAM entry */
4545 	status = ice_free_tcam_ent(hw, blk, idx);
4546 
4547 	return status;
4548 }
4549 
4550 /**
4551  * ice_rem_prof_id - remove one profile from a VSIG
4552  * @hw: pointer to the HW struct
4553  * @blk: hardware block
4554  * @prof: pointer to profile structure to remove
4555  */
4556 static enum ice_status
4557 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4558 		struct ice_vsig_prof *prof)
4559 {
4560 	enum ice_status status;
4561 	u16 i;
4562 
4563 	for (i = 0; i < prof->tcam_count; i++)
4564 		if (prof->tcam[i].in_use) {
4565 			prof->tcam[i].in_use = false;
4566 			status = ice_rel_tcam_idx(hw, blk,
4567 						  prof->tcam[i].tcam_idx);
4568 			if (status)
4569 				return ICE_ERR_HW_TABLE;
4570 		}
4571 
4572 	return ICE_SUCCESS;
4573 }
4574 
4575 /**
4576  * ice_rem_vsig - remove VSIG
4577  * @hw: pointer to the HW struct
4578  * @blk: hardware block
4579  * @vsig: the VSIG to remove
4580  * @chg: the change list
4581  */
4582 static enum ice_status
4583 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4584 	     struct LIST_HEAD_TYPE *chg)
4585 {
4586 	u16 idx = vsig & ICE_VSIG_IDX_M;
4587 	struct ice_vsig_vsi *vsi_cur;
4588 	struct ice_vsig_prof *d, *t;
4589 	enum ice_status status;
4590 
4591 	/* remove TCAM entries */
4592 	LIST_FOR_EACH_ENTRY_SAFE(d, t,
4593 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4594 				 ice_vsig_prof, list) {
4595 		status = ice_rem_prof_id(hw, blk, d);
4596 		if (status)
4597 			return status;
4598 
4599 		LIST_DEL(&d->list);
4600 		ice_free(hw, d);
4601 	}
4602 
4603 	/* Move all VSIS associated with this VSIG to the default VSIG */
4604 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4605 	/* If the VSIG has at least 1 VSI then iterate through the list
4606 	 * and remove the VSIs before deleting the group.
4607 	 */
4608 	if (vsi_cur)
4609 		do {
4610 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4611 			struct ice_chs_chg *p;
4612 
4613 			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4614 			if (!p)
4615 				return ICE_ERR_NO_MEMORY;
4616 
4617 			p->type = ICE_VSIG_REM;
4618 			p->orig_vsig = vsig;
4619 			p->vsig = ICE_DEFAULT_VSIG;
4620 			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4621 
4622 			LIST_ADD(&p->list_entry, chg);
4623 
4624 			vsi_cur = tmp;
4625 		} while (vsi_cur);
4626 
4627 	return ice_vsig_free(hw, blk, vsig);
4628 }
4629 
4630 /**
4631  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4632  * @hw: pointer to the HW struct
4633  * @blk: hardware block
4634  * @vsig: VSIG to remove the profile from
4635  * @hdl: profile handle indicating which profile to remove
4636  * @chg: list to receive a record of changes
4637  */
4638 static enum ice_status
4639 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4640 		     struct LIST_HEAD_TYPE *chg)
4641 {
4642 	u16 idx = vsig & ICE_VSIG_IDX_M;
4643 	struct ice_vsig_prof *p, *t;
4644 	enum ice_status status;
4645 
4646 	LIST_FOR_EACH_ENTRY_SAFE(p, t,
4647 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4648 				 ice_vsig_prof, list)
4649 		if (p->profile_cookie == hdl) {
4650 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4651 				/* this is the last profile, remove the VSIG */
4652 				return ice_rem_vsig(hw, blk, vsig, chg);
4653 
4654 			status = ice_rem_prof_id(hw, blk, p);
4655 			if (!status) {
4656 				LIST_DEL(&p->list);
4657 				ice_free(hw, p);
4658 			}
4659 			return status;
4660 		}
4661 
4662 	return ICE_ERR_DOES_NOT_EXIST;
4663 }
4664 
4665 /**
4666  * ice_rem_flow_all - remove all flows with a particular profile
4667  * @hw: pointer to the HW struct
4668  * @blk: hardware block
4669  * @id: profile tracking ID
4670  */
4671 static enum ice_status
4672 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4673 {
4674 	struct ice_chs_chg *del, *tmp;
4675 	enum ice_status status;
4676 	struct LIST_HEAD_TYPE chg;
4677 	u16 i;
4678 
4679 	INIT_LIST_HEAD(&chg);
4680 
4681 	for (i = 1; i < ICE_MAX_VSIGS; i++)
4682 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4683 			if (ice_has_prof_vsig(hw, blk, i, id)) {
4684 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
4685 							      &chg);
4686 				if (status)
4687 					goto err_ice_rem_flow_all;
4688 			}
4689 		}
4690 
4691 	status = ice_upd_prof_hw(hw, blk, &chg);
4692 
4693 err_ice_rem_flow_all:
4694 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
4695 		LIST_DEL(&del->list_entry);
4696 		ice_free(hw, del);
4697 	}
4698 
4699 	return status;
4700 }
4701 
4702 /**
4703  * ice_rem_prof - remove profile
4704  * @hw: pointer to the HW struct
4705  * @blk: hardware block
4706  * @id: profile tracking ID
4707  *
4708  * This will remove the profile specified by the ID parameter, which was
4709  * previously created through ice_add_prof. If any existing entries
4710  * are associated with this profile, they will be removed as well.
4711  */
4712 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4713 {
4714 	struct ice_prof_map *pmap;
4715 	enum ice_status status;
4716 
4717 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4718 
4719 	pmap = ice_search_prof_id(hw, blk, id);
4720 	if (!pmap) {
4721 		status = ICE_ERR_DOES_NOT_EXIST;
4722 		goto err_ice_rem_prof;
4723 	}
4724 
4725 	/* remove all flows with this profile */
4726 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4727 	if (status)
4728 		goto err_ice_rem_prof;
4729 
4730 	/* dereference profile, and possibly remove */
4731 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
4732 
4733 	LIST_DEL(&pmap->list);
4734 	ice_free(hw, pmap);
4735 
4736 err_ice_rem_prof:
4737 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4738 	return status;
4739 }
4740 
4741 /**
4742  * ice_get_prof - get profile
4743  * @hw: pointer to the HW struct
4744  * @blk: hardware block
4745  * @hdl: profile handle
4746  * @chg: change list
4747  */
4748 static enum ice_status
4749 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4750 	     struct LIST_HEAD_TYPE *chg)
4751 {
4752 	enum ice_status status = ICE_SUCCESS;
4753 	struct ice_prof_map *map;
4754 	struct ice_chs_chg *p;
4755 	u16 i;
4756 
4757 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4758 	/* Get the details on the profile specified by the handle ID */
4759 	map = ice_search_prof_id(hw, blk, hdl);
4760 	if (!map) {
4761 		status = ICE_ERR_DOES_NOT_EXIST;
4762 		goto err_ice_get_prof;
4763 	}
4764 
4765 	for (i = 0; i < map->ptg_cnt; i++)
4766 		if (!hw->blk[blk].es.written[map->prof_id]) {
4767 			/* add ES to change list */
4768 			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4769 			if (!p) {
4770 				status = ICE_ERR_NO_MEMORY;
4771 				goto err_ice_get_prof;
4772 			}
4773 
4774 			p->type = ICE_PTG_ES_ADD;
4775 			p->ptype = 0;
4776 			p->ptg = map->ptg[i];
4777 			p->add_ptg = 0;
4778 
4779 			p->add_prof = 1;
4780 			p->prof_id = map->prof_id;
4781 
4782 			hw->blk[blk].es.written[map->prof_id] = true;
4783 
4784 			LIST_ADD(&p->list_entry, chg);
4785 		}
4786 
4787 err_ice_get_prof:
4788 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4789 	/* let caller clean up the change list */
4790 	return status;
4791 }
4792 
4793 /**
4794  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4795  * @hw: pointer to the HW struct
4796  * @blk: hardware block
4797  * @vsig: VSIG from which to copy the list
4798  * @lst: output list
4799  *
4800  * This routine makes a copy of the list of profiles in the specified VSIG.
4801  */
4802 static enum ice_status
4803 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4804 		   struct LIST_HEAD_TYPE *lst)
4805 {
4806 	struct ice_vsig_prof *ent1, *ent2;
4807 	u16 idx = vsig & ICE_VSIG_IDX_M;
4808 
4809 	LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4810 			    ice_vsig_prof, list) {
4811 		struct ice_vsig_prof *p;
4812 
4813 		/* copy to the input list */
4814 		p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
4815 						       ICE_NONDMA_TO_NONDMA);
4816 		if (!p)
4817 			goto err_ice_get_profs_vsig;
4818 
4819 		LIST_ADD_TAIL(&p->list, lst);
4820 	}
4821 
4822 	return ICE_SUCCESS;
4823 
4824 err_ice_get_profs_vsig:
4825 	LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
4826 		LIST_DEL(&ent1->list);
4827 		ice_free(hw, ent1);
4828 	}
4829 
4830 	return ICE_ERR_NO_MEMORY;
4831 }
4832 
4833 /**
4834  * ice_add_prof_to_lst - add profile entry to a list
4835  * @hw: pointer to the HW struct
4836  * @blk: hardware block
4837  * @lst: the list to be added to
4838  * @hdl: profile handle of entry to add
4839  */
4840 static enum ice_status
4841 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4842 		    struct LIST_HEAD_TYPE *lst, u64 hdl)
4843 {
4844 	enum ice_status status = ICE_SUCCESS;
4845 	struct ice_prof_map *map;
4846 	struct ice_vsig_prof *p;
4847 	u16 i;
4848 
4849 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4850 	map = ice_search_prof_id(hw, blk, hdl);
4851 	if (!map) {
4852 		status = ICE_ERR_DOES_NOT_EXIST;
4853 		goto err_ice_add_prof_to_lst;
4854 	}
4855 
4856 	p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
4857 	if (!p) {
4858 		status = ICE_ERR_NO_MEMORY;
4859 		goto err_ice_add_prof_to_lst;
4860 	}
4861 
4862 	p->profile_cookie = map->profile_cookie;
4863 	p->prof_id = map->prof_id;
4864 	p->tcam_count = map->ptg_cnt;
4865 
4866 	for (i = 0; i < map->ptg_cnt; i++) {
4867 		p->tcam[i].prof_id = map->prof_id;
4868 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4869 		p->tcam[i].ptg = map->ptg[i];
4870 	}
4871 
4872 	LIST_ADD(&p->list, lst);
4873 
4874 err_ice_add_prof_to_lst:
4875 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4876 	return status;
4877 }
4878 
4879 /**
4880  * ice_move_vsi - move VSI to another VSIG
4881  * @hw: pointer to the HW struct
4882  * @blk: hardware block
4883  * @vsi: the VSI to move
4884  * @vsig: the VSIG to move the VSI to
4885  * @chg: the change list
4886  */
4887 static enum ice_status
4888 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4889 	     struct LIST_HEAD_TYPE *chg)
4890 {
4891 	enum ice_status status;
4892 	struct ice_chs_chg *p;
4893 	u16 orig_vsig;
4894 
4895 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4896 	if (!p)
4897 		return ICE_ERR_NO_MEMORY;
4898 
4899 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4900 	if (!status)
4901 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4902 
4903 	if (status) {
4904 		ice_free(hw, p);
4905 		return status;
4906 	}
4907 
4908 	p->type = ICE_VSI_MOVE;
4909 	p->vsi = vsi;
4910 	p->orig_vsig = orig_vsig;
4911 	p->vsig = vsig;
4912 
4913 	LIST_ADD(&p->list_entry, chg);
4914 
4915 	return ICE_SUCCESS;
4916 }
4917 
4918 /**
4919  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4920  * @hw: pointer to the HW struct
4921  * @idx: the index of the TCAM entry to remove
4922  * @chg: the list of change structures to search
4923  */
4924 static void
4925 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
4926 {
4927 	struct ice_chs_chg *pos, *tmp;
4928 
4929 	LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
4930 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4931 			LIST_DEL(&tmp->list_entry);
4932 			ice_free(hw, tmp);
4933 		}
4934 }
4935 
4936 /**
4937  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4938  * @hw: pointer to the HW struct
4939  * @blk: hardware block
4940  * @enable: true to enable, false to disable
4941  * @vsig: the VSIG of the TCAM entry
4942  * @tcam: pointer the TCAM info structure of the TCAM to disable
4943  * @chg: the change list
4944  *
4945  * This function appends an enable or disable TCAM entry in the change log
4946  */
4947 static enum ice_status
4948 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4949 		      u16 vsig, struct ice_tcam_inf *tcam,
4950 		      struct LIST_HEAD_TYPE *chg)
4951 {
4952 	enum ice_status status;
4953 	struct ice_chs_chg *p;
4954 
4955 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4956 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4957 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4958 
4959 	/* if disabling, free the TCAM */
4960 	if (!enable) {
4961 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4962 
4963 		/* if we have already created a change for this TCAM entry, then
4964 		 * we need to remove that entry, in order to prevent writing to
4965 		 * a TCAM entry we no longer will have ownership of.
4966 		 */
4967 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4968 		tcam->tcam_idx = 0;
4969 		tcam->in_use = 0;
4970 		return status;
4971 	}
4972 
4973 	/* for re-enabling, reallocate a TCAM */
4974 	status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx);
4975 	if (status)
4976 		return status;
4977 
4978 	/* add TCAM to change list */
4979 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4980 	if (!p)
4981 		return ICE_ERR_NO_MEMORY;
4982 
4983 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4984 				      tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
4985 				      nm_msk);
4986 	if (status)
4987 		goto err_ice_prof_tcam_ena_dis;
4988 
4989 	tcam->in_use = 1;
4990 
4991 	p->type = ICE_TCAM_ADD;
4992 	p->add_tcam_idx = true;
4993 	p->prof_id = tcam->prof_id;
4994 	p->ptg = tcam->ptg;
4995 	p->vsig = 0;
4996 	p->tcam_idx = tcam->tcam_idx;
4997 
4998 	/* log change */
4999 	LIST_ADD(&p->list_entry, chg);
5000 
5001 	return ICE_SUCCESS;
5002 
5003 err_ice_prof_tcam_ena_dis:
5004 	ice_free(hw, p);
5005 	return status;
5006 }
5007 
5008 /**
5009  * ice_adj_prof_priorities - adjust profile based on priorities
5010  * @hw: pointer to the HW struct
5011  * @blk: hardware block
5012  * @vsig: the VSIG for which to adjust profile priorities
5013  * @chg: the change list
5014  */
5015 static enum ice_status
5016 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5017 			struct LIST_HEAD_TYPE *chg)
5018 {
5019 	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5020 	enum ice_status status = ICE_SUCCESS;
5021 	struct ice_vsig_prof *t;
5022 	u16 idx;
5023 
5024 	ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5025 	idx = vsig & ICE_VSIG_IDX_M;
5026 
5027 	/* Priority is based on the order in which the profiles are added. The
5028 	 * newest added profile has highest priority and the oldest added
5029 	 * profile has the lowest priority. Since the profile property list for
5030 	 * a VSIG is sorted from newest to oldest, this code traverses the list
5031 	 * in order and enables the first of each PTG that it finds (that is not
5032 	 * already enabled); it also disables any duplicate PTGs that it finds
5033 	 * in the older profiles (that are currently enabled).
5034 	 */
5035 
5036 	LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5037 			    ice_vsig_prof, list) {
5038 		u16 i;
5039 
5040 		for (i = 0; i < t->tcam_count; i++) {
5041 			bool used;
5042 
5043 			/* Scan the priorities from newest to oldest.
5044 			 * Make sure that the newest profiles take priority.
5045 			 */
5046 			used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg);
5047 
5048 			if (used && t->tcam[i].in_use) {
5049 				/* need to mark this PTG as never match, as it
5050 				 * was already in use and therefore duplicate
5051 				 * (and lower priority)
5052 				 */
5053 				status = ice_prof_tcam_ena_dis(hw, blk, false,
5054 							       vsig,
5055 							       &t->tcam[i],
5056 							       chg);
5057 				if (status)
5058 					return status;
5059 			} else if (!used && !t->tcam[i].in_use) {
5060 				/* need to enable this PTG, as it in not in use
5061 				 * and not enabled (highest priority)
5062 				 */
5063 				status = ice_prof_tcam_ena_dis(hw, blk, true,
5064 							       vsig,
5065 							       &t->tcam[i],
5066 							       chg);
5067 				if (status)
5068 					return status;
5069 			}
5070 
5071 			/* keep track of used ptgs */
5072 			ice_set_bit(t->tcam[i].ptg, ptgs_used);
5073 		}
5074 	}
5075 
5076 	return status;
5077 }
5078 
5079 /**
5080  * ice_add_prof_id_vsig - add profile to VSIG
5081  * @hw: pointer to the HW struct
5082  * @blk: hardware block
5083  * @vsig: the VSIG to which this profile is to be added
5084  * @hdl: the profile handle indicating the profile to add
5085  * @rev: true to add entries to the end of the list
5086  * @chg: the change list
5087  */
5088 static enum ice_status
5089 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5090 		     bool rev, struct LIST_HEAD_TYPE *chg)
5091 {
5092 	/* Masks that ignore flags */
5093 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5094 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5095 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5096 	enum ice_status status = ICE_SUCCESS;
5097 	struct ice_prof_map *map;
5098 	struct ice_vsig_prof *t;
5099 	struct ice_chs_chg *p;
5100 	u16 vsig_idx, i;
5101 
5102 	/* Error, if this VSIG already has this profile */
5103 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5104 		return ICE_ERR_ALREADY_EXISTS;
5105 
5106 	/* new VSIG profile structure */
5107 	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5108 	if (!t)
5109 		return ICE_ERR_NO_MEMORY;
5110 
5111 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5112 	/* Get the details on the profile specified by the handle ID */
5113 	map = ice_search_prof_id(hw, blk, hdl);
5114 	if (!map) {
5115 		status = ICE_ERR_DOES_NOT_EXIST;
5116 		goto err_ice_add_prof_id_vsig;
5117 	}
5118 
5119 	t->profile_cookie = map->profile_cookie;
5120 	t->prof_id = map->prof_id;
5121 	t->tcam_count = map->ptg_cnt;
5122 
5123 	/* create TCAM entries */
5124 	for (i = 0; i < map->ptg_cnt; i++) {
5125 		u16 tcam_idx;
5126 
5127 		/* add TCAM to change list */
5128 		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5129 		if (!p) {
5130 			status = ICE_ERR_NO_MEMORY;
5131 			goto err_ice_add_prof_id_vsig;
5132 		}
5133 
5134 		/* allocate the TCAM entry index */
5135 		status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx);
5136 		if (status) {
5137 			ice_free(hw, p);
5138 			goto err_ice_add_prof_id_vsig;
5139 		}
5140 
5141 		t->tcam[i].ptg = map->ptg[i];
5142 		t->tcam[i].prof_id = map->prof_id;
5143 		t->tcam[i].tcam_idx = tcam_idx;
5144 		t->tcam[i].in_use = true;
5145 
5146 		p->type = ICE_TCAM_ADD;
5147 		p->add_tcam_idx = true;
5148 		p->prof_id = t->tcam[i].prof_id;
5149 		p->ptg = t->tcam[i].ptg;
5150 		p->vsig = vsig;
5151 		p->tcam_idx = t->tcam[i].tcam_idx;
5152 
5153 		/* write the TCAM entry */
5154 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5155 					      t->tcam[i].prof_id,
5156 					      t->tcam[i].ptg, vsig, 0, 0,
5157 					      vl_msk, dc_msk, nm_msk);
5158 		if (status) {
5159 			ice_free(hw, p);
5160 			goto err_ice_add_prof_id_vsig;
5161 		}
5162 
5163 		/* log change */
5164 		LIST_ADD(&p->list_entry, chg);
5165 	}
5166 
5167 	/* add profile to VSIG */
5168 	vsig_idx = vsig & ICE_VSIG_IDX_M;
5169 	if (rev)
5170 		LIST_ADD_TAIL(&t->list,
5171 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5172 	else
5173 		LIST_ADD(&t->list,
5174 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5175 
5176 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5177 	return status;
5178 
5179 err_ice_add_prof_id_vsig:
5180 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5181 	/* let caller clean up the change list */
5182 	ice_free(hw, t);
5183 	return status;
5184 }
5185 
5186 /**
5187  * ice_create_prof_id_vsig - add a new VSIG with a single profile
5188  * @hw: pointer to the HW struct
5189  * @blk: hardware block
5190  * @vsi: the initial VSI that will be in VSIG
5191  * @hdl: the profile handle of the profile that will be added to the VSIG
5192  * @chg: the change list
5193  */
5194 static enum ice_status
5195 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5196 			struct LIST_HEAD_TYPE *chg)
5197 {
5198 	enum ice_status status;
5199 	struct ice_chs_chg *p;
5200 	u16 new_vsig;
5201 
5202 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5203 	if (!p)
5204 		return ICE_ERR_NO_MEMORY;
5205 
5206 	new_vsig = ice_vsig_alloc(hw, blk);
5207 	if (!new_vsig) {
5208 		status = ICE_ERR_HW_TABLE;
5209 		goto err_ice_create_prof_id_vsig;
5210 	}
5211 
5212 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5213 	if (status)
5214 		goto err_ice_create_prof_id_vsig;
5215 
5216 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5217 	if (status)
5218 		goto err_ice_create_prof_id_vsig;
5219 
5220 	p->type = ICE_VSIG_ADD;
5221 	p->vsi = vsi;
5222 	p->orig_vsig = ICE_DEFAULT_VSIG;
5223 	p->vsig = new_vsig;
5224 
5225 	LIST_ADD(&p->list_entry, chg);
5226 
5227 	return ICE_SUCCESS;
5228 
5229 err_ice_create_prof_id_vsig:
5230 	/* let caller clean up the change list */
5231 	ice_free(hw, p);
5232 	return status;
5233 }
5234 
5235 /**
5236  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5237  * @hw: pointer to the HW struct
5238  * @blk: hardware block
5239  * @vsi: the initial VSI that will be in VSIG
5240  * @lst: the list of profile that will be added to the VSIG
5241  * @new_vsig: return of new VSIG
5242  * @chg: the change list
5243  */
5244 static enum ice_status
5245 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5246 			 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
5247 			 struct LIST_HEAD_TYPE *chg)
5248 {
5249 	struct ice_vsig_prof *t;
5250 	enum ice_status status;
5251 	u16 vsig;
5252 
5253 	vsig = ice_vsig_alloc(hw, blk);
5254 	if (!vsig)
5255 		return ICE_ERR_HW_TABLE;
5256 
5257 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5258 	if (status)
5259 		return status;
5260 
5261 	LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5262 		/* Reverse the order here since we are copying the list */
5263 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5264 					      true, chg);
5265 		if (status)
5266 			return status;
5267 	}
5268 
5269 	*new_vsig = vsig;
5270 
5271 	return ICE_SUCCESS;
5272 }
5273 
5274 /**
5275  * ice_find_prof_vsig - find a VSIG with a specific profile handle
5276  * @hw: pointer to the HW struct
5277  * @blk: hardware block
5278  * @hdl: the profile handle of the profile to search for
5279  * @vsig: returns the VSIG with the matching profile
5280  */
5281 static bool
5282 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5283 {
5284 	struct ice_vsig_prof *t;
5285 	enum ice_status status;
5286 	struct LIST_HEAD_TYPE lst;
5287 
5288 	INIT_LIST_HEAD(&lst);
5289 
5290 	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5291 	if (!t)
5292 		return false;
5293 
5294 	t->profile_cookie = hdl;
5295 	LIST_ADD(&t->list, &lst);
5296 
5297 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5298 
5299 	LIST_DEL(&t->list);
5300 	ice_free(hw, t);
5301 
5302 	return status == ICE_SUCCESS;
5303 }
5304 
5305 /**
5306  * ice_add_vsi_flow - add VSI flow
5307  * @hw: pointer to the HW struct
5308  * @blk: hardware block
5309  * @vsi: input VSI
5310  * @vsig: target VSIG to include the input VSI
5311  *
5312  * Calling this function will add the VSI to a given VSIG and
5313  * update the HW tables accordingly. This call can be used to
5314  * add multiple VSIs to a VSIG if we know beforehand that those
5315  * VSIs have the same characteristics of the VSIG. This will
5316  * save time in generating a new VSIG and TCAMs till a match is
5317  * found and subsequent rollback when a matching VSIG is found.
5318  */
5319 enum ice_status
5320 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
5321 {
5322 	struct ice_chs_chg *tmp, *del;
5323 	struct LIST_HEAD_TYPE chg;
5324 	enum ice_status status;
5325 
5326 	/* if target VSIG is default the move is invalid */
5327 	if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
5328 		return ICE_ERR_PARAM;
5329 
5330 	INIT_LIST_HEAD(&chg);
5331 
5332 	/* move VSI to the VSIG that matches */
5333 	status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5334 	/* update hardware if success */
5335 	if (!status)
5336 		status = ice_upd_prof_hw(hw, blk, &chg);
5337 
5338 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5339 		LIST_DEL(&del->list_entry);
5340 		ice_free(hw, del);
5341 	}
5342 
5343 	return status;
5344 }
5345 
5346 /**
5347  * ice_add_prof_id_flow - add profile flow
5348  * @hw: pointer to the HW struct
5349  * @blk: hardware block
5350  * @vsi: the VSI to enable with the profile specified by ID
5351  * @hdl: profile handle
5352  *
5353  * Calling this function will update the hardware tables to enable the
5354  * profile indicated by the ID parameter for the VSIs specified in the VSI
5355  * array. Once successfully called, the flow will be enabled.
5356  */
5357 enum ice_status
5358 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5359 {
5360 	struct ice_vsig_prof *tmp1, *del1;
5361 	struct ice_chs_chg *tmp, *del;
5362 	struct LIST_HEAD_TYPE union_lst;
5363 	enum ice_status status;
5364 	struct LIST_HEAD_TYPE chg;
5365 	u16 vsig;
5366 
5367 	INIT_LIST_HEAD(&union_lst);
5368 	INIT_LIST_HEAD(&chg);
5369 
5370 	/* Get profile */
5371 	status = ice_get_prof(hw, blk, hdl, &chg);
5372 	if (status)
5373 		return status;
5374 
5375 	/* determine if VSI is already part of a VSIG */
5376 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5377 	if (!status && vsig) {
5378 		bool only_vsi;
5379 		u16 or_vsig;
5380 		u16 ref;
5381 
5382 		/* found in VSIG */
5383 		or_vsig = vsig;
5384 
5385 		/* make sure that there is no overlap/conflict between the new
5386 		 * characteristics and the existing ones; we don't support that
5387 		 * scenario
5388 		 */
5389 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5390 			status = ICE_ERR_ALREADY_EXISTS;
5391 			goto err_ice_add_prof_id_flow;
5392 		}
5393 
5394 		/* last VSI in the VSIG? */
5395 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5396 		if (status)
5397 			goto err_ice_add_prof_id_flow;
5398 		only_vsi = (ref == 1);
5399 
5400 		/* create a union of the current profiles and the one being
5401 		 * added
5402 		 */
5403 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5404 		if (status)
5405 			goto err_ice_add_prof_id_flow;
5406 
5407 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5408 		if (status)
5409 			goto err_ice_add_prof_id_flow;
5410 
5411 		/* search for an existing VSIG with an exact charc match */
5412 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5413 		if (!status) {
5414 			/* move VSI to the VSIG that matches */
5415 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5416 			if (status)
5417 				goto err_ice_add_prof_id_flow;
5418 
5419 			/* VSI has been moved out of or_vsig. If the or_vsig had
5420 			 * only that VSI it is now empty and can be removed.
5421 			 */
5422 			if (only_vsi) {
5423 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5424 				if (status)
5425 					goto err_ice_add_prof_id_flow;
5426 			}
5427 		} else if (only_vsi) {
5428 			/* If the original VSIG only contains one VSI, then it
5429 			 * will be the requesting VSI. In this case the VSI is
5430 			 * not sharing entries and we can simply add the new
5431 			 * profile to the VSIG.
5432 			 */
5433 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5434 						      &chg);
5435 			if (status)
5436 				goto err_ice_add_prof_id_flow;
5437 
5438 			/* Adjust priorities */
5439 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5440 			if (status)
5441 				goto err_ice_add_prof_id_flow;
5442 		} else {
5443 			/* No match, so we need a new VSIG */
5444 			status = ice_create_vsig_from_lst(hw, blk, vsi,
5445 							  &union_lst, &vsig,
5446 							  &chg);
5447 			if (status)
5448 				goto err_ice_add_prof_id_flow;
5449 
5450 			/* Adjust priorities */
5451 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5452 			if (status)
5453 				goto err_ice_add_prof_id_flow;
5454 		}
5455 	} else {
5456 		/* need to find or add a VSIG */
5457 		/* search for an existing VSIG with an exact charc match */
5458 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5459 			/* found an exact match */
5460 			/* add or move VSI to the VSIG that matches */
5461 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5462 			if (status)
5463 				goto err_ice_add_prof_id_flow;
5464 		} else {
5465 			/* we did not find an exact match */
5466 			/* we need to add a VSIG */
5467 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5468 							 &chg);
5469 			if (status)
5470 				goto err_ice_add_prof_id_flow;
5471 		}
5472 	}
5473 
5474 	/* update hardware */
5475 	if (!status)
5476 		status = ice_upd_prof_hw(hw, blk, &chg);
5477 
5478 err_ice_add_prof_id_flow:
5479 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5480 		LIST_DEL(&del->list_entry);
5481 		ice_free(hw, del);
5482 	}
5483 
5484 	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
5485 		LIST_DEL(&del1->list);
5486 		ice_free(hw, del1);
5487 	}
5488 
5489 	return status;
5490 }
5491 
5492 /**
5493  * ice_add_flow - add flow
5494  * @hw: pointer to the HW struct
5495  * @blk: hardware block
5496  * @vsi: array of VSIs to enable with the profile specified by ID
5497  * @count: number of elements in the VSI array
5498  * @id: profile tracking ID
5499  *
5500  * Calling this function will update the hardware tables to enable the
5501  * profile indicated by the ID parameter for the VSIs specified in the VSI
5502  * array. Once successfully called, the flow will be enabled.
5503  */
5504 enum ice_status
5505 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
5506 	     u64 id)
5507 {
5508 	enum ice_status status;
5509 	u16 i;
5510 
5511 	for (i = 0; i < count; i++) {
5512 		status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
5513 		if (status)
5514 			return status;
5515 	}
5516 
5517 	return ICE_SUCCESS;
5518 }
5519 
5520 /**
5521  * ice_rem_prof_from_list - remove a profile from list
5522  * @hw: pointer to the HW struct
5523  * @lst: list to remove the profile from
5524  * @hdl: the profile handle indicating the profile to remove
5525  */
5526 static enum ice_status
5527 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
5528 {
5529 	struct ice_vsig_prof *ent, *tmp;
5530 
5531 	LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
5532 		if (ent->profile_cookie == hdl) {
5533 			LIST_DEL(&ent->list);
5534 			ice_free(hw, ent);
5535 			return ICE_SUCCESS;
5536 		}
5537 
5538 	return ICE_ERR_DOES_NOT_EXIST;
5539 }
5540 
5541 /**
5542  * ice_rem_prof_id_flow - remove flow
5543  * @hw: pointer to the HW struct
5544  * @blk: hardware block
5545  * @vsi: the VSI from which to remove the profile specified by ID
5546  * @hdl: profile tracking handle
5547  *
5548  * Calling this function will update the hardware tables to remove the
5549  * profile indicated by the ID parameter for the VSIs specified in the VSI
5550  * array. Once successfully called, the flow will be disabled.
5551  */
5552 enum ice_status
5553 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5554 {
5555 	struct ice_vsig_prof *tmp1, *del1;
5556 	struct ice_chs_chg *tmp, *del;
5557 	struct LIST_HEAD_TYPE chg, copy;
5558 	enum ice_status status;
5559 	u16 vsig;
5560 
5561 	INIT_LIST_HEAD(&copy);
5562 	INIT_LIST_HEAD(&chg);
5563 
5564 	/* determine if VSI is already part of a VSIG */
5565 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5566 	if (!status && vsig) {
5567 		bool last_profile;
5568 		bool only_vsi;
5569 		u16 ref;
5570 
5571 		/* found in VSIG */
5572 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5573 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5574 		if (status)
5575 			goto err_ice_rem_prof_id_flow;
5576 		only_vsi = (ref == 1);
5577 
5578 		if (only_vsi) {
5579 			/* If the original VSIG only contains one reference,
5580 			 * which will be the requesting VSI, then the VSI is not
5581 			 * sharing entries and we can simply remove the specific
5582 			 * characteristics from the VSIG.
5583 			 */
5584 
5585 			if (last_profile) {
5586 				/* If there are no profiles left for this VSIG,
5587 				 * then simply remove the VSIG.
5588 				 */
5589 				status = ice_rem_vsig(hw, blk, vsig, &chg);
5590 				if (status)
5591 					goto err_ice_rem_prof_id_flow;
5592 			} else {
5593 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
5594 							      hdl, &chg);
5595 				if (status)
5596 					goto err_ice_rem_prof_id_flow;
5597 
5598 				/* Adjust priorities */
5599 				status = ice_adj_prof_priorities(hw, blk, vsig,
5600 								 &chg);
5601 				if (status)
5602 					goto err_ice_rem_prof_id_flow;
5603 			}
5604 
5605 		} else {
5606 			/* Make a copy of the VSIG's list of Profiles */
5607 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
5608 			if (status)
5609 				goto err_ice_rem_prof_id_flow;
5610 
5611 			/* Remove specified profile entry from the list */
5612 			status = ice_rem_prof_from_list(hw, &copy, hdl);
5613 			if (status)
5614 				goto err_ice_rem_prof_id_flow;
5615 
5616 			if (LIST_EMPTY(&copy)) {
5617 				status = ice_move_vsi(hw, blk, vsi,
5618 						      ICE_DEFAULT_VSIG, &chg);
5619 				if (status)
5620 					goto err_ice_rem_prof_id_flow;
5621 
5622 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
5623 							    &vsig)) {
5624 				/* found an exact match */
5625 				/* add or move VSI to the VSIG that matches */
5626 				/* Search for a VSIG with a matching profile
5627 				 * list
5628 				 */
5629 
5630 				/* Found match, move VSI to the matching VSIG */
5631 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5632 				if (status)
5633 					goto err_ice_rem_prof_id_flow;
5634 			} else {
5635 				/* since no existing VSIG supports this
5636 				 * characteristic pattern, we need to create a
5637 				 * new VSIG and TCAM entries
5638 				 */
5639 				status = ice_create_vsig_from_lst(hw, blk, vsi,
5640 								  &copy, &vsig,
5641 								  &chg);
5642 				if (status)
5643 					goto err_ice_rem_prof_id_flow;
5644 
5645 				/* Adjust priorities */
5646 				status = ice_adj_prof_priorities(hw, blk, vsig,
5647 								 &chg);
5648 				if (status)
5649 					goto err_ice_rem_prof_id_flow;
5650 			}
5651 		}
5652 	} else {
5653 		status = ICE_ERR_DOES_NOT_EXIST;
5654 	}
5655 
5656 	/* update hardware tables */
5657 	if (!status)
5658 		status = ice_upd_prof_hw(hw, blk, &chg);
5659 
5660 err_ice_rem_prof_id_flow:
5661 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5662 		LIST_DEL(&del->list_entry);
5663 		ice_free(hw, del);
5664 	}
5665 
5666 	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &copy, ice_vsig_prof, list) {
5667 		LIST_DEL(&del1->list);
5668 		ice_free(hw, del1);
5669 	}
5670 
5671 	return status;
5672 }
5673 
5674 /**
5675  * ice_rem_flow - remove flow
5676  * @hw: pointer to the HW struct
5677  * @blk: hardware block
5678  * @vsi: array of VSIs from which to remove the profile specified by ID
5679  * @count: number of elements in the VSI array
5680  * @id: profile tracking ID
5681  *
5682  * The function will remove flows from the specified VSIs that were enabled
5683  * using ice_add_flow. The ID value will indicated which profile will be
5684  * removed. Once successfully called, the flow will be disabled.
5685  */
5686 enum ice_status
5687 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
5688 	     u64 id)
5689 {
5690 	enum ice_status status;
5691 	u16 i;
5692 
5693 	for (i = 0; i < count; i++) {
5694 		status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
5695 		if (status)
5696 			return status;
5697 	}
5698 
5699 	return ICE_SUCCESS;
5700 }
5701