xref: /freebsd/sys/dev/ice/ice_flex_pipe.c (revision 9e6bbe47a503137f1698232070cb4c1a4f14be10)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 #include "ice_flex_pipe.h"
35 #include "ice_protocol_type.h"
36 #include "ice_flow.h"
37 
38 /* To support tunneling entries by PF, the package will append the PF number to
39  * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
40  */
41 #define ICE_TNL_PRE	"TNL_"
42 static const struct ice_tunnel_type_scan tnls[] = {
43 	{ TNL_VXLAN,		"TNL_VXLAN_PF" },
44 	{ TNL_GENEVE,		"TNL_GENEVE_PF" },
45 	{ TNL_LAST,		"" }
46 };
47 
48 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
49 	/* SWITCH */
50 	{
51 		ICE_SID_XLT0_SW,
52 		ICE_SID_XLT_KEY_BUILDER_SW,
53 		ICE_SID_XLT1_SW,
54 		ICE_SID_XLT2_SW,
55 		ICE_SID_PROFID_TCAM_SW,
56 		ICE_SID_PROFID_REDIR_SW,
57 		ICE_SID_FLD_VEC_SW,
58 		ICE_SID_CDID_KEY_BUILDER_SW,
59 		ICE_SID_CDID_REDIR_SW
60 	},
61 
62 	/* ACL */
63 	{
64 		ICE_SID_XLT0_ACL,
65 		ICE_SID_XLT_KEY_BUILDER_ACL,
66 		ICE_SID_XLT1_ACL,
67 		ICE_SID_XLT2_ACL,
68 		ICE_SID_PROFID_TCAM_ACL,
69 		ICE_SID_PROFID_REDIR_ACL,
70 		ICE_SID_FLD_VEC_ACL,
71 		ICE_SID_CDID_KEY_BUILDER_ACL,
72 		ICE_SID_CDID_REDIR_ACL
73 	},
74 
75 	/* FD */
76 	{
77 		ICE_SID_XLT0_FD,
78 		ICE_SID_XLT_KEY_BUILDER_FD,
79 		ICE_SID_XLT1_FD,
80 		ICE_SID_XLT2_FD,
81 		ICE_SID_PROFID_TCAM_FD,
82 		ICE_SID_PROFID_REDIR_FD,
83 		ICE_SID_FLD_VEC_FD,
84 		ICE_SID_CDID_KEY_BUILDER_FD,
85 		ICE_SID_CDID_REDIR_FD
86 	},
87 
88 	/* RSS */
89 	{
90 		ICE_SID_XLT0_RSS,
91 		ICE_SID_XLT_KEY_BUILDER_RSS,
92 		ICE_SID_XLT1_RSS,
93 		ICE_SID_XLT2_RSS,
94 		ICE_SID_PROFID_TCAM_RSS,
95 		ICE_SID_PROFID_REDIR_RSS,
96 		ICE_SID_FLD_VEC_RSS,
97 		ICE_SID_CDID_KEY_BUILDER_RSS,
98 		ICE_SID_CDID_REDIR_RSS
99 	},
100 
101 	/* PE */
102 	{
103 		ICE_SID_XLT0_PE,
104 		ICE_SID_XLT_KEY_BUILDER_PE,
105 		ICE_SID_XLT1_PE,
106 		ICE_SID_XLT2_PE,
107 		ICE_SID_PROFID_TCAM_PE,
108 		ICE_SID_PROFID_REDIR_PE,
109 		ICE_SID_FLD_VEC_PE,
110 		ICE_SID_CDID_KEY_BUILDER_PE,
111 		ICE_SID_CDID_REDIR_PE
112 	}
113 };
114 
115 /**
116  * ice_sect_id - returns section ID
117  * @blk: block type
118  * @sect: section type
119  *
120  * This helper function returns the proper section ID given a block type and a
121  * section type.
122  */
123 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
124 {
125 	return ice_sect_lkup[blk][sect];
126 }
127 
128 /**
129  * ice_pkg_val_buf
130  * @buf: pointer to the ice buffer
131  *
132  * This helper function validates a buffer's header.
133  */
134 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
135 {
136 	struct ice_buf_hdr *hdr;
137 	u16 section_count;
138 	u16 data_end;
139 
140 	hdr = (struct ice_buf_hdr *)buf->buf;
141 	/* verify data */
142 	section_count = LE16_TO_CPU(hdr->section_count);
143 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
144 		return NULL;
145 
146 	data_end = LE16_TO_CPU(hdr->data_end);
147 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
148 		return NULL;
149 
150 	return hdr;
151 }
152 
153 /**
154  * ice_find_buf_table
155  * @ice_seg: pointer to the ice segment
156  *
157  * Returns the address of the buffer table within the ice segment.
158  */
159 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
160 {
161 	struct ice_nvm_table *nvms;
162 
163 	nvms = (struct ice_nvm_table *)
164 		(ice_seg->device_table +
165 		 LE32_TO_CPU(ice_seg->device_table_count));
166 
167 	return (_FORCE_ struct ice_buf_table *)
168 		(nvms->vers + LE32_TO_CPU(nvms->table_count));
169 }
170 
171 /**
172  * ice_pkg_enum_buf
173  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174  * @state: pointer to the enum state
175  *
176  * This function will enumerate all the buffers in the ice segment. The first
177  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
178  * ice_seg is set to NULL which continues the enumeration. When the function
179  * returns a NULL pointer, then the end of the buffers has been reached, or an
180  * unexpected value has been detected (for example an invalid section count or
181  * an invalid buffer end value).
182  */
183 static struct ice_buf_hdr *
184 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
185 {
186 	if (ice_seg) {
187 		state->buf_table = ice_find_buf_table(ice_seg);
188 		if (!state->buf_table)
189 			return NULL;
190 
191 		state->buf_idx = 0;
192 		return ice_pkg_val_buf(state->buf_table->buf_array);
193 	}
194 
195 	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
196 		return ice_pkg_val_buf(state->buf_table->buf_array +
197 				       state->buf_idx);
198 	else
199 		return NULL;
200 }
201 
202 /**
203  * ice_pkg_advance_sect
204  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
205  * @state: pointer to the enum state
206  *
207  * This helper function will advance the section within the ice segment,
208  * also advancing the buffer if needed.
209  */
210 static bool
211 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
212 {
213 	if (!ice_seg && !state->buf)
214 		return false;
215 
216 	if (!ice_seg && state->buf)
217 		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
218 			return true;
219 
220 	state->buf = ice_pkg_enum_buf(ice_seg, state);
221 	if (!state->buf)
222 		return false;
223 
224 	/* start of new buffer, reset section index */
225 	state->sect_idx = 0;
226 	return true;
227 }
228 
229 /**
230  * ice_pkg_enum_section
231  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
232  * @state: pointer to the enum state
233  * @sect_type: section type to enumerate
234  *
235  * This function will enumerate all the sections of a particular type in the
236  * ice segment. The first call is made with the ice_seg parameter non-NULL;
237  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
238  * When the function returns a NULL pointer, then the end of the matching
239  * sections has been reached.
240  */
241 static void *
242 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
243 		     u32 sect_type)
244 {
245 	u16 offset, size;
246 
247 	if (ice_seg)
248 		state->type = sect_type;
249 
250 	if (!ice_pkg_advance_sect(ice_seg, state))
251 		return NULL;
252 
253 	/* scan for next matching section */
254 	while (state->buf->section_entry[state->sect_idx].type !=
255 	       CPU_TO_LE32(state->type))
256 		if (!ice_pkg_advance_sect(NULL, state))
257 			return NULL;
258 
259 	/* validate section */
260 	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
261 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
262 		return NULL;
263 
264 	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
265 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
266 		return NULL;
267 
268 	/* make sure the section fits in the buffer */
269 	if (offset + size > ICE_PKG_BUF_SIZE)
270 		return NULL;
271 
272 	state->sect_type =
273 		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
274 
275 	/* calc pointer to this section */
276 	state->sect = ((u8 *)state->buf) +
277 		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
278 
279 	return state->sect;
280 }
281 
282 /**
283  * ice_pkg_enum_entry
284  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
285  * @state: pointer to the enum state
286  * @sect_type: section type to enumerate
287  * @offset: pointer to variable that receives the offset in the table (optional)
288  * @handler: function that handles access to the entries into the section type
289  *
290  * This function will enumerate all the entries in particular section type in
291  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
292  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
293  * When the function returns a NULL pointer, then the end of the entries has
294  * been reached.
295  *
296  * Since each section may have a different header and entry size, the handler
297  * function is needed to determine the number and location entries in each
298  * section.
299  *
300  * The offset parameter is optional, but should be used for sections that
301  * contain an offset for each section table. For such cases, the section handler
302  * function must return the appropriate offset + index to give the absolution
303  * offset for each entry. For example, if the base for a section's header
304  * indicates a base offset of 10, and the index for the entry is 2, then
305  * section handler function should set the offset to 10 + 2 = 12.
306  */
307 static void *
308 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
309 		   u32 sect_type, u32 *offset,
310 		   void *(*handler)(u32 sect_type, void *section,
311 				    u32 index, u32 *offset))
312 {
313 	void *entry;
314 
315 	if (ice_seg) {
316 		if (!handler)
317 			return NULL;
318 
319 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
320 			return NULL;
321 
322 		state->entry_idx = 0;
323 		state->handler = handler;
324 	} else {
325 		state->entry_idx++;
326 	}
327 
328 	if (!state->handler)
329 		return NULL;
330 
331 	/* get entry */
332 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
333 			       offset);
334 	if (!entry) {
335 		/* end of a section, look for another section of this type */
336 		if (!ice_pkg_enum_section(NULL, state, 0))
337 			return NULL;
338 
339 		state->entry_idx = 0;
340 		entry = state->handler(state->sect_type, state->sect,
341 				       state->entry_idx, offset);
342 	}
343 
344 	return entry;
345 }
346 
347 /**
348  * ice_boost_tcam_handler
349  * @sect_type: section type
350  * @section: pointer to section
351  * @index: index of the boost TCAM entry to be returned
352  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
353  *
354  * This is a callback function that can be passed to ice_pkg_enum_entry.
355  * Handles enumeration of individual boost TCAM entries.
356  */
357 static void *
358 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
359 {
360 	struct ice_boost_tcam_section *boost;
361 
362 	if (!section)
363 		return NULL;
364 
365 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
366 		return NULL;
367 
368 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
369 		return NULL;
370 
371 	if (offset)
372 		*offset = 0;
373 
374 	boost = (struct ice_boost_tcam_section *)section;
375 	if (index >= LE16_TO_CPU(boost->count))
376 		return NULL;
377 
378 	return boost->tcam + index;
379 }
380 
381 /**
382  * ice_find_boost_entry
383  * @ice_seg: pointer to the ice segment (non-NULL)
384  * @addr: Boost TCAM address of entry to search for
385  * @entry: returns pointer to the entry
386  *
387  * Finds a particular Boost TCAM entry and returns a pointer to that entry
388  * if it is found. The ice_seg parameter must not be NULL since the first call
389  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
390  */
391 static enum ice_status
392 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
393 		     struct ice_boost_tcam_entry **entry)
394 {
395 	struct ice_boost_tcam_entry *tcam;
396 	struct ice_pkg_enum state;
397 
398 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
399 
400 	if (!ice_seg)
401 		return ICE_ERR_PARAM;
402 
403 	do {
404 		tcam = (struct ice_boost_tcam_entry *)
405 		       ice_pkg_enum_entry(ice_seg, &state,
406 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
407 					  ice_boost_tcam_handler);
408 		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
409 			*entry = tcam;
410 			return ICE_SUCCESS;
411 		}
412 
413 		ice_seg = NULL;
414 	} while (tcam);
415 
416 	*entry = NULL;
417 	return ICE_ERR_CFG;
418 }
419 
420 /**
421  * ice_label_enum_handler
422  * @sect_type: section type
423  * @section: pointer to section
424  * @index: index of the label entry to be returned
425  * @offset: pointer to receive absolute offset, always zero for label sections
426  *
427  * This is a callback function that can be passed to ice_pkg_enum_entry.
428  * Handles enumeration of individual label entries.
429  */
430 static void *
431 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
432 		       u32 *offset)
433 {
434 	struct ice_label_section *labels;
435 
436 	if (!section)
437 		return NULL;
438 
439 	if (index > ICE_MAX_LABELS_IN_BUF)
440 		return NULL;
441 
442 	if (offset)
443 		*offset = 0;
444 
445 	labels = (struct ice_label_section *)section;
446 	if (index >= LE16_TO_CPU(labels->count))
447 		return NULL;
448 
449 	return labels->label + index;
450 }
451 
452 /**
453  * ice_enum_labels
454  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
455  * @type: the section type that will contain the label (0 on subsequent calls)
456  * @state: ice_pkg_enum structure that will hold the state of the enumeration
457  * @value: pointer to a value that will return the label's value if found
458  *
459  * Enumerates a list of labels in the package. The caller will call
460  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
461  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
462  * the end of the list has been reached.
463  */
464 static char *
465 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
466 		u16 *value)
467 {
468 	struct ice_label *label;
469 
470 	/* Check for valid label section on first call */
471 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
472 		return NULL;
473 
474 	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
475 						       NULL,
476 						       ice_label_enum_handler);
477 	if (!label)
478 		return NULL;
479 
480 	*value = LE16_TO_CPU(label->value);
481 	return label->name;
482 }
483 
484 /**
485  * ice_add_tunnel_hint
486  * @hw: pointer to the HW structure
487  * @label_name: label text
488  * @val: value of the tunnel port boost entry
489  */
490 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
491 {
492 	if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
493 		u16 i;
494 
495 		for (i = 0; tnls[i].type != TNL_LAST; i++) {
496 			size_t len = strlen(tnls[i].label_prefix);
497 
498 			/* Look for matching label start, before continuing */
499 			if (strncmp(label_name, tnls[i].label_prefix, len))
500 				continue;
501 
502 			/* Make sure this label matches our PF. Note that the PF
503 			 * character ('0' - '7') will be located where our
504 			 * prefix string's null terminator is located.
505 			 */
506 			if ((label_name[len] - '0') == hw->pf_id) {
507 				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
508 				hw->tnl.tbl[hw->tnl.count].valid = false;
509 				hw->tnl.tbl[hw->tnl.count].in_use = false;
510 				hw->tnl.tbl[hw->tnl.count].marked = false;
511 				hw->tnl.tbl[hw->tnl.count].boost_addr = val;
512 				hw->tnl.tbl[hw->tnl.count].port = 0;
513 				hw->tnl.count++;
514 				break;
515 			}
516 		}
517 	}
518 }
519 
520 /**
521  * ice_init_pkg_hints
522  * @hw: pointer to the HW structure
523  * @ice_seg: pointer to the segment of the package scan (non-NULL)
524  *
525  * This function will scan the package and save off relevant information
526  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
527  * since the first call to ice_enum_labels requires a pointer to an actual
528  * ice_seg structure.
529  */
530 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
531 {
532 	struct ice_pkg_enum state;
533 	char *label_name;
534 	u16 val;
535 	int i;
536 
537 	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
538 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
539 
540 	if (!ice_seg)
541 		return;
542 
543 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
544 				     &val);
545 
546 	while (label_name) {
547 		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
548 			/* check for a tunnel entry */
549 			ice_add_tunnel_hint(hw, label_name, val);
550 
551 		label_name = ice_enum_labels(NULL, 0, &state, &val);
552 	}
553 
554 	/* Cache the appropriate boost TCAM entry pointers for tunnels */
555 	for (i = 0; i < hw->tnl.count; i++) {
556 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
557 				     &hw->tnl.tbl[i].boost_entry);
558 		if (hw->tnl.tbl[i].boost_entry)
559 			hw->tnl.tbl[i].valid = true;
560 	}
561 }
562 
563 /* Key creation */
564 
565 #define ICE_DC_KEY	0x1	/* don't care */
566 #define ICE_DC_KEYINV	0x1
567 #define ICE_NM_KEY	0x0	/* never match */
568 #define ICE_NM_KEYINV	0x0
569 #define ICE_0_KEY	0x1	/* match 0 */
570 #define ICE_0_KEYINV	0x0
571 #define ICE_1_KEY	0x0	/* match 1 */
572 #define ICE_1_KEYINV	0x1
573 
574 /**
575  * ice_gen_key_word - generate 16-bits of a key/mask word
576  * @val: the value
577  * @valid: valid bits mask (change only the valid bits)
578  * @dont_care: don't care mask
579  * @nvr_mtch: never match mask
580  * @key: pointer to an array of where the resulting key portion
581  * @key_inv: pointer to an array of where the resulting key invert portion
582  *
583  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
584  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
585  * of key and 8 bits of key invert.
586  *
587  *     '0' =    b01, always match a 0 bit
588  *     '1' =    b10, always match a 1 bit
589  *     '?' =    b11, don't care bit (always matches)
590  *     '~' =    b00, never match bit
591  *
592  * Input:
593  *          val:         b0  1  0  1  0  1
594  *          dont_care:   b0  0  1  1  0  0
595  *          never_mtch:  b0  0  0  0  1  1
596  *          ------------------------------
597  * Result:  key:        b01 10 11 11 00 00
598  */
599 static enum ice_status
600 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
601 		 u8 *key_inv)
602 {
603 	u8 in_key = *key, in_key_inv = *key_inv;
604 	u8 i;
605 
606 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
607 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
608 		return ICE_ERR_CFG;
609 
610 	*key = 0;
611 	*key_inv = 0;
612 
613 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
614 	for (i = 0; i < 8; i++) {
615 		*key >>= 1;
616 		*key_inv >>= 1;
617 
618 		if (!(valid & 0x1)) { /* change only valid bits */
619 			*key |= (in_key & 0x1) << 7;
620 			*key_inv |= (in_key_inv & 0x1) << 7;
621 		} else if (dont_care & 0x1) { /* don't care bit */
622 			*key |= ICE_DC_KEY << 7;
623 			*key_inv |= ICE_DC_KEYINV << 7;
624 		} else if (nvr_mtch & 0x1) { /* never match bit */
625 			*key |= ICE_NM_KEY << 7;
626 			*key_inv |= ICE_NM_KEYINV << 7;
627 		} else if (val & 0x01) { /* exact 1 match */
628 			*key |= ICE_1_KEY << 7;
629 			*key_inv |= ICE_1_KEYINV << 7;
630 		} else { /* exact 0 match */
631 			*key |= ICE_0_KEY << 7;
632 			*key_inv |= ICE_0_KEYINV << 7;
633 		}
634 
635 		dont_care >>= 1;
636 		nvr_mtch >>= 1;
637 		valid >>= 1;
638 		val >>= 1;
639 		in_key >>= 1;
640 		in_key_inv >>= 1;
641 	}
642 
643 	return ICE_SUCCESS;
644 }
645 
646 /**
647  * ice_bits_max_set - determine if the number of bits set is within a maximum
648  * @mask: pointer to the byte array which is the mask
649  * @size: the number of bytes in the mask
650  * @max: the max number of set bits
651  *
652  * This function determines if there are at most 'max' number of bits set in an
653  * array. Returns true if the number for bits set is <= max or will return false
654  * otherwise.
655  */
656 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
657 {
658 	u16 count = 0;
659 	u16 i;
660 
661 	/* check each byte */
662 	for (i = 0; i < size; i++) {
663 		/* if 0, go to next byte */
664 		if (!mask[i])
665 			continue;
666 
667 		/* We know there is at least one set bit in this byte because of
668 		 * the above check; if we already have found 'max' number of
669 		 * bits set, then we can return failure now.
670 		 */
671 		if (count == max)
672 			return false;
673 
674 		/* count the bits in this byte, checking threshold */
675 		count += ice_hweight8(mask[i]);
676 		if (count > max)
677 			return false;
678 	}
679 
680 	return true;
681 }
682 
683 /**
684  * ice_set_key - generate a variable sized key with multiples of 16-bits
685  * @key: pointer to where the key will be stored
686  * @size: the size of the complete key in bytes (must be even)
687  * @val: array of 8-bit values that makes up the value portion of the key
688  * @upd: array of 8-bit masks that determine what key portion to update
689  * @dc: array of 8-bit masks that make up the don't care mask
690  * @nm: array of 8-bit masks that make up the never match mask
691  * @off: the offset of the first byte in the key to update
692  * @len: the number of bytes in the key update
693  *
694  * This function generates a key from a value, a don't care mask and a never
695  * match mask.
696  * upd, dc, and nm are optional parameters, and can be NULL:
697  *	upd == NULL --> upd mask is all 1's (update all bits)
698  *	dc == NULL --> dc mask is all 0's (no don't care bits)
699  *	nm == NULL --> nm mask is all 0's (no never match bits)
700  */
701 static enum ice_status
702 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
703 	    u16 len)
704 {
705 	u16 half_size;
706 	u16 i;
707 
708 	/* size must be a multiple of 2 bytes. */
709 	if (size % 2)
710 		return ICE_ERR_CFG;
711 	half_size = size / 2;
712 
713 	if (off + len > half_size)
714 		return ICE_ERR_CFG;
715 
716 	/* Make sure at most one bit is set in the never match mask. Having more
717 	 * than one never match mask bit set will cause HW to consume excessive
718 	 * power otherwise; this is a power management efficiency check.
719 	 */
720 #define ICE_NVR_MTCH_BITS_MAX	1
721 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
722 		return ICE_ERR_CFG;
723 
724 	for (i = 0; i < len; i++)
725 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
726 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
727 				     key + off + i, key + half_size + off + i))
728 			return ICE_ERR_CFG;
729 
730 	return ICE_SUCCESS;
731 }
732 
733 /**
734  * ice_acquire_global_cfg_lock
735  * @hw: pointer to the HW structure
736  * @access: access type (read or write)
737  *
738  * This function will request ownership of the global config lock for reading
739  * or writing of the package. When attempting to obtain write access, the
740  * caller must check for the following two return values:
741  *
742  * ICE_SUCCESS        - Means the caller has acquired the global config lock
743  *                      and can perform writing of the package.
744  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
745  *                      package or has found that no update was necessary; in
746  *                      this case, the caller can just skip performing any
747  *                      update of the package.
748  */
749 static enum ice_status
750 ice_acquire_global_cfg_lock(struct ice_hw *hw,
751 			    enum ice_aq_res_access_type access)
752 {
753 	enum ice_status status;
754 
755 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
756 
757 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
758 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
759 
760 	if (status == ICE_ERR_AQ_NO_WORK)
761 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
762 
763 	return status;
764 }
765 
766 /**
767  * ice_release_global_cfg_lock
768  * @hw: pointer to the HW structure
769  *
770  * This function will release the global config lock.
771  */
772 static void ice_release_global_cfg_lock(struct ice_hw *hw)
773 {
774 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
775 }
776 
777 /**
778  * ice_acquire_change_lock
779  * @hw: pointer to the HW structure
780  * @access: access type (read or write)
781  *
782  * This function will request ownership of the change lock.
783  */
784 static enum ice_status
785 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
786 {
787 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
788 
789 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
790 			       ICE_CHANGE_LOCK_TIMEOUT);
791 }
792 
793 /**
794  * ice_release_change_lock
795  * @hw: pointer to the HW structure
796  *
797  * This function will release the change lock using the proper Admin Command.
798  */
799 static void ice_release_change_lock(struct ice_hw *hw)
800 {
801 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
802 
803 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
804 }
805 
806 /**
807  * ice_aq_download_pkg
808  * @hw: pointer to the hardware structure
809  * @pkg_buf: the package buffer to transfer
810  * @buf_size: the size of the package buffer
811  * @last_buf: last buffer indicator
812  * @error_offset: returns error offset
813  * @error_info: returns error information
814  * @cd: pointer to command details structure or NULL
815  *
816  * Download Package (0x0C40)
817  */
818 static enum ice_status
819 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
820 		    u16 buf_size, bool last_buf, u32 *error_offset,
821 		    u32 *error_info, struct ice_sq_cd *cd)
822 {
823 	struct ice_aqc_download_pkg *cmd;
824 	struct ice_aq_desc desc;
825 	enum ice_status status;
826 
827 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
828 
829 	if (error_offset)
830 		*error_offset = 0;
831 	if (error_info)
832 		*error_info = 0;
833 
834 	cmd = &desc.params.download_pkg;
835 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
836 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
837 
838 	if (last_buf)
839 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
840 
841 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
842 	if (status == ICE_ERR_AQ_ERROR) {
843 		/* Read error from buffer only when the FW returned an error */
844 		struct ice_aqc_download_pkg_resp *resp;
845 
846 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
847 		if (error_offset)
848 			*error_offset = LE32_TO_CPU(resp->error_offset);
849 		if (error_info)
850 			*error_info = LE32_TO_CPU(resp->error_info);
851 	}
852 
853 	return status;
854 }
855 
856 /**
857  * ice_aq_upload_section
858  * @hw: pointer to the hardware structure
859  * @pkg_buf: the package buffer which will receive the section
860  * @buf_size: the size of the package buffer
861  * @cd: pointer to command details structure or NULL
862  *
863  * Upload Section (0x0C41)
864  */
865 enum ice_status
866 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
867 		      u16 buf_size, struct ice_sq_cd *cd)
868 {
869 	struct ice_aq_desc desc;
870 
871 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
872 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
873 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
874 
875 	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
876 }
877 
878 /**
879  * ice_aq_update_pkg
880  * @hw: pointer to the hardware structure
881  * @pkg_buf: the package cmd buffer
882  * @buf_size: the size of the package cmd buffer
883  * @last_buf: last buffer indicator
884  * @error_offset: returns error offset
885  * @error_info: returns error information
886  * @cd: pointer to command details structure or NULL
887  *
888  * Update Package (0x0C42)
889  */
890 static enum ice_status
891 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
892 		  bool last_buf, u32 *error_offset, u32 *error_info,
893 		  struct ice_sq_cd *cd)
894 {
895 	struct ice_aqc_download_pkg *cmd;
896 	struct ice_aq_desc desc;
897 	enum ice_status status;
898 
899 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
900 
901 	if (error_offset)
902 		*error_offset = 0;
903 	if (error_info)
904 		*error_info = 0;
905 
906 	cmd = &desc.params.download_pkg;
907 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
908 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
909 
910 	if (last_buf)
911 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
912 
913 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
914 	if (status == ICE_ERR_AQ_ERROR) {
915 		/* Read error from buffer only when the FW returned an error */
916 		struct ice_aqc_download_pkg_resp *resp;
917 
918 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
919 		if (error_offset)
920 			*error_offset = LE32_TO_CPU(resp->error_offset);
921 		if (error_info)
922 			*error_info = LE32_TO_CPU(resp->error_info);
923 	}
924 
925 	return status;
926 }
927 
928 /**
929  * ice_find_seg_in_pkg
930  * @hw: pointer to the hardware structure
931  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
932  * @pkg_hdr: pointer to the package header to be searched
933  *
934  * This function searches a package file for a particular segment type. On
935  * success it returns a pointer to the segment header, otherwise it will
936  * return NULL.
937  */
938 static struct ice_generic_seg_hdr *
939 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
940 		    struct ice_pkg_hdr *pkg_hdr)
941 {
942 	u32 i;
943 
944 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
945 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
946 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
947 		  pkg_hdr->pkg_format_ver.update,
948 		  pkg_hdr->pkg_format_ver.draft);
949 
950 	/* Search all package segments for the requested segment type */
951 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
952 		struct ice_generic_seg_hdr *seg;
953 
954 		seg = (struct ice_generic_seg_hdr *)
955 			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
956 
957 		if (LE32_TO_CPU(seg->seg_type) == seg_type)
958 			return seg;
959 	}
960 
961 	return NULL;
962 }
963 
964 /**
965  * ice_update_pkg_no_lock
966  * @hw: pointer to the hardware structure
967  * @bufs: pointer to an array of buffers
968  * @count: the number of buffers in the array
969  */
970 static enum ice_status
971 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
972 {
973 	enum ice_status status = ICE_SUCCESS;
974 	u32 i;
975 
976 	for (i = 0; i < count; i++) {
977 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
978 		bool last = ((i + 1) == count);
979 		u32 offset, info;
980 
981 		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
982 					   last, &offset, &info, NULL);
983 
984 		if (status) {
985 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
986 				  status, offset, info);
987 			break;
988 		}
989 	}
990 
991 	return status;
992 }
993 
994 /**
995  * ice_update_pkg
996  * @hw: pointer to the hardware structure
997  * @bufs: pointer to an array of buffers
998  * @count: the number of buffers in the array
999  *
1000  * Obtains change lock and updates package.
1001  */
1002 enum ice_status
1003 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1004 {
1005 	enum ice_status status;
1006 
1007 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1008 	if (status)
1009 		return status;
1010 
1011 	status = ice_update_pkg_no_lock(hw, bufs, count);
1012 
1013 	ice_release_change_lock(hw);
1014 
1015 	return status;
1016 }
1017 
1018 /**
1019  * ice_dwnld_cfg_bufs
1020  * @hw: pointer to the hardware structure
1021  * @bufs: pointer to an array of buffers
1022  * @count: the number of buffers in the array
1023  *
1024  * Obtains global config lock and downloads the package configuration buffers
1025  * to the firmware. Metadata buffers are skipped, and the first metadata buffer
1026  * found indicates that the rest of the buffers are all metadata buffers.
1027  */
1028 static enum ice_status
1029 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1030 {
1031 	enum ice_status status;
1032 	struct ice_buf_hdr *bh;
1033 	u32 offset, info, i;
1034 
1035 	if (!bufs || !count)
1036 		return ICE_ERR_PARAM;
1037 
1038 	/* If the first buffer's first section has its metadata bit set
1039 	 * then there are no buffers to be downloaded, and the operation is
1040 	 * considered a success.
1041 	 */
1042 	bh = (struct ice_buf_hdr *)bufs;
1043 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
1044 		return ICE_SUCCESS;
1045 
1046 	/* reset pkg_dwnld_status in case this function is called in the
1047 	 * reset/rebuild flow
1048 	 */
1049 	hw->pkg_dwnld_status = ICE_AQ_RC_OK;
1050 
1051 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1052 	if (status) {
1053 		if (status == ICE_ERR_AQ_NO_WORK)
1054 			hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
1055 		else
1056 			hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1057 		return status;
1058 	}
1059 
1060 	for (i = 0; i < count; i++) {
1061 		bool last = ((i + 1) == count);
1062 
1063 		if (!last) {
1064 			/* check next buffer for metadata flag */
1065 			bh = (struct ice_buf_hdr *)(bufs + i + 1);
1066 
1067 			/* A set metadata flag in the next buffer will signal
1068 			 * that the current buffer will be the last buffer
1069 			 * downloaded
1070 			 */
1071 			if (LE16_TO_CPU(bh->section_count))
1072 				if (LE32_TO_CPU(bh->section_entry[0].type) &
1073 				    ICE_METADATA_BUF)
1074 					last = true;
1075 		}
1076 
1077 		bh = (struct ice_buf_hdr *)(bufs + i);
1078 
1079 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1080 					     &offset, &info, NULL);
1081 
1082 		/* Save AQ status from download package */
1083 		hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1084 		if (status) {
1085 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1086 				  status, offset, info);
1087 
1088 			break;
1089 		}
1090 
1091 		if (last)
1092 			break;
1093 	}
1094 
1095 	if (!status) {
1096 		status = ice_set_vlan_mode(hw);
1097 		if (status)
1098 			ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1099 				  status);
1100 	}
1101 
1102 	ice_release_global_cfg_lock(hw);
1103 
1104 	return status;
1105 }
1106 
1107 /**
1108  * ice_aq_get_pkg_info_list
1109  * @hw: pointer to the hardware structure
1110  * @pkg_info: the buffer which will receive the information list
1111  * @buf_size: the size of the pkg_info information buffer
1112  * @cd: pointer to command details structure or NULL
1113  *
1114  * Get Package Info List (0x0C43)
1115  */
1116 static enum ice_status
1117 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1118 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
1119 			 u16 buf_size, struct ice_sq_cd *cd)
1120 {
1121 	struct ice_aq_desc desc;
1122 
1123 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1124 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1125 
1126 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1127 }
1128 
1129 /**
1130  * ice_download_pkg
1131  * @hw: pointer to the hardware structure
1132  * @ice_seg: pointer to the segment of the package to be downloaded
1133  *
1134  * Handles the download of a complete package.
1135  */
1136 static enum ice_status
1137 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1138 {
1139 	struct ice_buf_table *ice_buf_tbl;
1140 	enum ice_status status;
1141 
1142 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1143 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1144 		  ice_seg->hdr.seg_format_ver.major,
1145 		  ice_seg->hdr.seg_format_ver.minor,
1146 		  ice_seg->hdr.seg_format_ver.update,
1147 		  ice_seg->hdr.seg_format_ver.draft);
1148 
1149 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1150 		  LE32_TO_CPU(ice_seg->hdr.seg_type),
1151 		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1152 
1153 	ice_buf_tbl = ice_find_buf_table(ice_seg);
1154 
1155 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1156 		  LE32_TO_CPU(ice_buf_tbl->buf_count));
1157 
1158 	status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1159 				    LE32_TO_CPU(ice_buf_tbl->buf_count));
1160 
1161 	ice_post_pkg_dwnld_vlan_mode_cfg(hw);
1162 
1163 	return status;
1164 }
1165 
1166 /**
1167  * ice_init_pkg_info
1168  * @hw: pointer to the hardware structure
1169  * @pkg_hdr: pointer to the driver's package hdr
1170  *
1171  * Saves off the package details into the HW structure.
1172  */
1173 static enum ice_status
1174 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1175 {
1176 	struct ice_generic_seg_hdr *seg_hdr;
1177 
1178 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1179 	if (!pkg_hdr)
1180 		return ICE_ERR_PARAM;
1181 
1182 	hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810;
1183 
1184 	ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
1185 		  hw->pkg_seg_id);
1186 
1187 	seg_hdr = (struct ice_generic_seg_hdr *)
1188 		ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
1189 	if (seg_hdr) {
1190 		struct ice_meta_sect *meta;
1191 		struct ice_pkg_enum state;
1192 
1193 		ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1194 
1195 		/* Get package information from the Metadata Section */
1196 		meta = (struct ice_meta_sect *)
1197 			ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1198 					     ICE_SID_METADATA);
1199 		if (!meta) {
1200 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1201 			return ICE_ERR_CFG;
1202 		}
1203 
1204 		hw->pkg_ver = meta->ver;
1205 		ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
1206 			   ICE_NONDMA_TO_NONDMA);
1207 
1208 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1209 			  meta->ver.major, meta->ver.minor, meta->ver.update,
1210 			  meta->ver.draft, meta->name);
1211 
1212 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1213 		ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1214 			   sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
1215 
1216 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1217 			  seg_hdr->seg_format_ver.major,
1218 			  seg_hdr->seg_format_ver.minor,
1219 			  seg_hdr->seg_format_ver.update,
1220 			  seg_hdr->seg_format_ver.draft,
1221 			  seg_hdr->seg_id);
1222 	} else {
1223 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1224 		return ICE_ERR_CFG;
1225 	}
1226 
1227 	return ICE_SUCCESS;
1228 }
1229 
1230 /**
1231  * ice_get_pkg_info
1232  * @hw: pointer to the hardware structure
1233  *
1234  * Store details of the package currently loaded in HW into the HW structure.
1235  */
1236 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1237 {
1238 	struct ice_aqc_get_pkg_info_resp *pkg_info;
1239 	enum ice_status status;
1240 	u16 size;
1241 	u32 i;
1242 
1243 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1244 
1245 	size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1246 	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1247 	if (!pkg_info)
1248 		return ICE_ERR_NO_MEMORY;
1249 
1250 	status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1251 	if (status)
1252 		goto init_pkg_free_alloc;
1253 
1254 	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1255 #define ICE_PKG_FLAG_COUNT	4
1256 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1257 		u8 place = 0;
1258 
1259 		if (pkg_info->pkg_info[i].is_active) {
1260 			flags[place++] = 'A';
1261 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1262 			hw->active_track_id =
1263 				LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
1264 			ice_memcpy(hw->active_pkg_name,
1265 				   pkg_info->pkg_info[i].name,
1266 				   sizeof(pkg_info->pkg_info[i].name),
1267 				   ICE_NONDMA_TO_NONDMA);
1268 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1269 		}
1270 		if (pkg_info->pkg_info[i].is_active_at_boot)
1271 			flags[place++] = 'B';
1272 		if (pkg_info->pkg_info[i].is_modified)
1273 			flags[place++] = 'M';
1274 		if (pkg_info->pkg_info[i].is_in_nvm)
1275 			flags[place++] = 'N';
1276 
1277 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1278 			  i, pkg_info->pkg_info[i].ver.major,
1279 			  pkg_info->pkg_info[i].ver.minor,
1280 			  pkg_info->pkg_info[i].ver.update,
1281 			  pkg_info->pkg_info[i].ver.draft,
1282 			  pkg_info->pkg_info[i].name, flags);
1283 	}
1284 
1285 init_pkg_free_alloc:
1286 	ice_free(hw, pkg_info);
1287 
1288 	return status;
1289 }
1290 
1291 /**
1292  * ice_find_label_value
1293  * @ice_seg: pointer to the ice segment (non-NULL)
1294  * @name: name of the label to search for
1295  * @type: the section type that will contain the label
1296  * @value: pointer to a value that will return the label's value if found
1297  *
1298  * Finds a label's value given the label name and the section type to search.
1299  * The ice_seg parameter must not be NULL since the first call to
1300  * ice_enum_labels requires a pointer to an actual ice_seg structure.
1301  */
1302 enum ice_status
1303 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
1304 		     u16 *value)
1305 {
1306 	struct ice_pkg_enum state;
1307 	char *label_name;
1308 	u16 val;
1309 
1310 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1311 
1312 	if (!ice_seg)
1313 		return ICE_ERR_PARAM;
1314 
1315 	do {
1316 		label_name = ice_enum_labels(ice_seg, type, &state, &val);
1317 		if (label_name && !strcmp(label_name, name)) {
1318 			*value = val;
1319 			return ICE_SUCCESS;
1320 		}
1321 
1322 		ice_seg = NULL;
1323 	} while (label_name);
1324 
1325 	return ICE_ERR_CFG;
1326 }
1327 
1328 /**
1329  * ice_verify_pkg - verify package
1330  * @pkg: pointer to the package buffer
1331  * @len: size of the package buffer
1332  *
1333  * Verifies various attributes of the package file, including length, format
1334  * version, and the requirement of at least one segment.
1335  */
1336 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1337 {
1338 	u32 seg_count;
1339 	u32 i;
1340 
1341 	if (len < ice_struct_size(pkg, seg_offset, 1))
1342 		return ICE_ERR_BUF_TOO_SHORT;
1343 
1344 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1345 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1346 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1347 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1348 		return ICE_ERR_CFG;
1349 
1350 	/* pkg must have at least one segment */
1351 	seg_count = LE32_TO_CPU(pkg->seg_count);
1352 	if (seg_count < 1)
1353 		return ICE_ERR_CFG;
1354 
1355 	/* make sure segment array fits in package length */
1356 	if (len < ice_struct_size(pkg, seg_offset, seg_count))
1357 		return ICE_ERR_BUF_TOO_SHORT;
1358 
1359 	/* all segments must fit within length */
1360 	for (i = 0; i < seg_count; i++) {
1361 		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1362 		struct ice_generic_seg_hdr *seg;
1363 
1364 		/* segment header must fit */
1365 		if (len < off + sizeof(*seg))
1366 			return ICE_ERR_BUF_TOO_SHORT;
1367 
1368 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1369 
1370 		/* segment body must fit */
1371 		if (len < off + LE32_TO_CPU(seg->seg_size))
1372 			return ICE_ERR_BUF_TOO_SHORT;
1373 	}
1374 
1375 	return ICE_SUCCESS;
1376 }
1377 
1378 /**
1379  * ice_free_seg - free package segment pointer
1380  * @hw: pointer to the hardware structure
1381  *
1382  * Frees the package segment pointer in the proper manner, depending on if the
1383  * segment was allocated or just the passed in pointer was stored.
1384  */
1385 void ice_free_seg(struct ice_hw *hw)
1386 {
1387 	if (hw->pkg_copy) {
1388 		ice_free(hw, hw->pkg_copy);
1389 		hw->pkg_copy = NULL;
1390 		hw->pkg_size = 0;
1391 	}
1392 	hw->seg = NULL;
1393 }
1394 
1395 /**
1396  * ice_init_pkg_regs - initialize additional package registers
1397  * @hw: pointer to the hardware structure
1398  */
1399 static void ice_init_pkg_regs(struct ice_hw *hw)
1400 {
1401 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1402 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1403 #define ICE_SW_BLK_IDX	0
1404 
1405 	/* setup Switch block input mask, which is 48-bits in two parts */
1406 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1407 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1408 }
1409 
1410 /**
1411  * ice_chk_pkg_version - check package version for compatibility with driver
1412  * @pkg_ver: pointer to a version structure to check
1413  *
1414  * Check to make sure that the package about to be downloaded is compatible with
1415  * the driver. To be compatible, the major and minor components of the package
1416  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1417  * definitions.
1418  */
1419 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1420 {
1421 	if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1422 	    pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1423 		return ICE_ERR_NOT_SUPPORTED;
1424 
1425 	return ICE_SUCCESS;
1426 }
1427 
1428 /**
1429  * ice_chk_pkg_compat
1430  * @hw: pointer to the hardware structure
1431  * @ospkg: pointer to the package hdr
1432  * @seg: pointer to the package segment hdr
1433  *
1434  * This function checks the package version compatibility with driver and NVM
1435  */
1436 static enum ice_status
1437 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1438 		   struct ice_seg **seg)
1439 {
1440 	struct ice_aqc_get_pkg_info_resp *pkg;
1441 	enum ice_status status;
1442 	u16 size;
1443 	u32 i;
1444 
1445 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1446 
1447 	/* Check package version compatibility */
1448 	status = ice_chk_pkg_version(&hw->pkg_ver);
1449 	if (status) {
1450 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1451 		return status;
1452 	}
1453 
1454 	/* find ICE segment in given package */
1455 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
1456 						     ospkg);
1457 	if (!*seg) {
1458 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1459 		return ICE_ERR_CFG;
1460 	}
1461 
1462 	/* Check if FW is compatible with the OS package */
1463 	size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1464 	pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1465 	if (!pkg)
1466 		return ICE_ERR_NO_MEMORY;
1467 
1468 	status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1469 	if (status)
1470 		goto fw_ddp_compat_free_alloc;
1471 
1472 	for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1473 		/* loop till we find the NVM package */
1474 		if (!pkg->pkg_info[i].is_in_nvm)
1475 			continue;
1476 		if ((*seg)->hdr.seg_format_ver.major !=
1477 			pkg->pkg_info[i].ver.major ||
1478 		    (*seg)->hdr.seg_format_ver.minor >
1479 			pkg->pkg_info[i].ver.minor) {
1480 			status = ICE_ERR_FW_DDP_MISMATCH;
1481 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1482 		}
1483 		/* done processing NVM package so break */
1484 		break;
1485 	}
1486 fw_ddp_compat_free_alloc:
1487 	ice_free(hw, pkg);
1488 	return status;
1489 }
1490 
1491 /**
1492  * ice_sw_fv_handler
1493  * @sect_type: section type
1494  * @section: pointer to section
1495  * @index: index of the field vector entry to be returned
1496  * @offset: ptr to variable that receives the offset in the field vector table
1497  *
1498  * This is a callback function that can be passed to ice_pkg_enum_entry.
1499  * This function treats the given section as of type ice_sw_fv_section and
1500  * enumerates offset field. "offset" is an index into the field vector table.
1501  */
1502 static void *
1503 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1504 {
1505 	struct ice_sw_fv_section *fv_section =
1506 		(struct ice_sw_fv_section *)section;
1507 
1508 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1509 		return NULL;
1510 	if (index >= LE16_TO_CPU(fv_section->count))
1511 		return NULL;
1512 	if (offset)
1513 		/* "index" passed in to this function is relative to a given
1514 		 * 4k block. To get to the true index into the field vector
1515 		 * table need to add the relative index to the base_offset
1516 		 * field of this section
1517 		 */
1518 		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
1519 	return fv_section->fv + index;
1520 }
1521 
1522 /**
1523  * ice_get_prof_index_max - get the max profile index for used profile
1524  * @hw: pointer to the HW struct
1525  *
1526  * Calling this function will get the max profile index for used profile
1527  * and store the index number in struct ice_switch_info *switch_info
1528  * in hw for following use.
1529  */
1530 static int ice_get_prof_index_max(struct ice_hw *hw)
1531 {
1532 	u16 prof_index = 0, j, max_prof_index = 0;
1533 	struct ice_pkg_enum state;
1534 	struct ice_seg *ice_seg;
1535 	bool flag = false;
1536 	struct ice_fv *fv;
1537 	u32 offset;
1538 
1539 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1540 
1541 	if (!hw->seg)
1542 		return ICE_ERR_PARAM;
1543 
1544 	ice_seg = hw->seg;
1545 
1546 	do {
1547 		fv = (struct ice_fv *)
1548 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1549 					   &offset, ice_sw_fv_handler);
1550 		if (!fv)
1551 			break;
1552 		ice_seg = NULL;
1553 
1554 		/* in the profile that not be used, the prot_id is set to 0xff
1555 		 * and the off is set to 0x1ff for all the field vectors.
1556 		 */
1557 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1558 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1559 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1560 				flag = true;
1561 		if (flag && prof_index > max_prof_index)
1562 			max_prof_index = prof_index;
1563 
1564 		prof_index++;
1565 		flag = false;
1566 	} while (fv);
1567 
1568 	hw->switch_info->max_used_prof_index = max_prof_index;
1569 
1570 	return ICE_SUCCESS;
1571 }
1572 
1573 /**
1574  * ice_init_pkg - initialize/download package
1575  * @hw: pointer to the hardware structure
1576  * @buf: pointer to the package buffer
1577  * @len: size of the package buffer
1578  *
1579  * This function initializes a package. The package contains HW tables
1580  * required to do packet processing. First, the function extracts package
1581  * information such as version. Then it finds the ice configuration segment
1582  * within the package; this function then saves a copy of the segment pointer
1583  * within the supplied package buffer. Next, the function will cache any hints
1584  * from the package, followed by downloading the package itself. Note, that if
1585  * a previous PF driver has already downloaded the package successfully, then
1586  * the current driver will not have to download the package again.
1587  *
1588  * The local package contents will be used to query default behavior and to
1589  * update specific sections of the HW's version of the package (e.g. to update
1590  * the parse graph to understand new protocols).
1591  *
1592  * This function stores a pointer to the package buffer memory, and it is
1593  * expected that the supplied buffer will not be freed immediately. If the
1594  * package buffer needs to be freed, such as when read from a file, use
1595  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1596  * case.
1597  */
1598 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1599 {
1600 	struct ice_pkg_hdr *pkg;
1601 	enum ice_status status;
1602 	struct ice_seg *seg;
1603 
1604 	if (!buf || !len)
1605 		return ICE_ERR_PARAM;
1606 
1607 	pkg = (struct ice_pkg_hdr *)buf;
1608 	status = ice_verify_pkg(pkg, len);
1609 	if (status) {
1610 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1611 			  status);
1612 		return status;
1613 	}
1614 
1615 	/* initialize package info */
1616 	status = ice_init_pkg_info(hw, pkg);
1617 	if (status)
1618 		return status;
1619 
1620 	/* before downloading the package, check package version for
1621 	 * compatibility with driver
1622 	 */
1623 	status = ice_chk_pkg_compat(hw, pkg, &seg);
1624 	if (status)
1625 		return status;
1626 
1627 	/* initialize package hints and then download package */
1628 	ice_init_pkg_hints(hw, seg);
1629 	status = ice_download_pkg(hw, seg);
1630 	if (status == ICE_ERR_AQ_NO_WORK) {
1631 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1632 		status = ICE_SUCCESS;
1633 	}
1634 
1635 	/* Get information on the package currently loaded in HW, then make sure
1636 	 * the driver is compatible with this version.
1637 	 */
1638 	if (!status) {
1639 		status = ice_get_pkg_info(hw);
1640 		if (!status)
1641 			status = ice_chk_pkg_version(&hw->active_pkg_ver);
1642 	}
1643 
1644 	if (!status) {
1645 		hw->seg = seg;
1646 		/* on successful package download update other required
1647 		 * registers to support the package and fill HW tables
1648 		 * with package content.
1649 		 */
1650 		ice_init_pkg_regs(hw);
1651 		ice_fill_blk_tbls(hw);
1652 		ice_get_prof_index_max(hw);
1653 	} else {
1654 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1655 			  status);
1656 	}
1657 
1658 	return status;
1659 }
1660 
1661 /**
1662  * ice_copy_and_init_pkg - initialize/download a copy of the package
1663  * @hw: pointer to the hardware structure
1664  * @buf: pointer to the package buffer
1665  * @len: size of the package buffer
1666  *
1667  * This function copies the package buffer, and then calls ice_init_pkg() to
1668  * initialize the copied package contents.
1669  *
1670  * The copying is necessary if the package buffer supplied is constant, or if
1671  * the memory may disappear shortly after calling this function.
1672  *
1673  * If the package buffer resides in the data segment and can be modified, the
1674  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1675  *
1676  * However, if the package buffer needs to be copied first, such as when being
1677  * read from a file, the caller should use ice_copy_and_init_pkg().
1678  *
1679  * This function will first copy the package buffer, before calling
1680  * ice_init_pkg(). The caller is free to immediately destroy the original
1681  * package buffer, as the new copy will be managed by this function and
1682  * related routines.
1683  */
1684 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1685 {
1686 	enum ice_status status;
1687 	u8 *buf_copy;
1688 
1689 	if (!buf || !len)
1690 		return ICE_ERR_PARAM;
1691 
1692 	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1693 
1694 	status = ice_init_pkg(hw, buf_copy, len);
1695 	if (status) {
1696 		/* Free the copy, since we failed to initialize the package */
1697 		ice_free(hw, buf_copy);
1698 	} else {
1699 		/* Track the copied pkg so we can free it later */
1700 		hw->pkg_copy = buf_copy;
1701 		hw->pkg_size = len;
1702 	}
1703 
1704 	return status;
1705 }
1706 
1707 /**
1708  * ice_pkg_buf_alloc
1709  * @hw: pointer to the HW structure
1710  *
1711  * Allocates a package buffer and returns a pointer to the buffer header.
1712  * Note: all package contents must be in Little Endian form.
1713  */
1714 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1715 {
1716 	struct ice_buf_build *bld;
1717 	struct ice_buf_hdr *buf;
1718 
1719 	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1720 	if (!bld)
1721 		return NULL;
1722 
1723 	buf = (struct ice_buf_hdr *)bld;
1724 	buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1725 					     section_entry));
1726 	return bld;
1727 }
1728 
1729 /**
1730  * ice_get_sw_prof_type - determine switch profile type
1731  * @hw: pointer to the HW structure
1732  * @fv: pointer to the switch field vector
1733  */
1734 static enum ice_prof_type
1735 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1736 {
1737 	u16 i;
1738 
1739 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1740 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1741 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1742 		    fv->ew[i].off == ICE_VNI_OFFSET)
1743 			return ICE_PROF_TUN_UDP;
1744 
1745 		/* GRE tunnel will have GRE protocol */
1746 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1747 			return ICE_PROF_TUN_GRE;
1748 	}
1749 
1750 	return ICE_PROF_NON_TUN;
1751 }
1752 
1753 /**
1754  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1755  * @hw: pointer to hardware structure
1756  * @req_profs: type of profiles requested
1757  * @bm: pointer to memory for returning the bitmap of field vectors
1758  */
1759 void
1760 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1761 		     ice_bitmap_t *bm)
1762 {
1763 	struct ice_pkg_enum state;
1764 	struct ice_seg *ice_seg;
1765 	struct ice_fv *fv;
1766 
1767 	if (req_profs == ICE_PROF_ALL) {
1768 		ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1769 		return;
1770 	}
1771 
1772 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1773 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1774 	ice_seg = hw->seg;
1775 	do {
1776 		enum ice_prof_type prof_type;
1777 		u32 offset;
1778 
1779 		fv = (struct ice_fv *)
1780 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1781 					   &offset, ice_sw_fv_handler);
1782 		ice_seg = NULL;
1783 
1784 		if (fv) {
1785 			/* Determine field vector type */
1786 			prof_type = ice_get_sw_prof_type(hw, fv);
1787 
1788 			if (req_profs & prof_type)
1789 				ice_set_bit((u16)offset, bm);
1790 		}
1791 	} while (fv);
1792 }
1793 
1794 /**
1795  * ice_get_sw_fv_list
1796  * @hw: pointer to the HW structure
1797  * @prot_ids: field vector to search for with a given protocol ID
1798  * @ids_cnt: lookup/protocol count
1799  * @bm: bitmap of field vectors to consider
1800  * @fv_list: Head of a list
1801  *
1802  * Finds all the field vector entries from switch block that contain
1803  * a given protocol ID and returns a list of structures of type
1804  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1805  * definition and profile ID information
1806  * NOTE: The caller of the function is responsible for freeing the memory
1807  * allocated for every list entry.
1808  */
1809 enum ice_status
1810 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1811 		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1812 {
1813 	struct ice_sw_fv_list_entry *fvl;
1814 	struct ice_sw_fv_list_entry *tmp;
1815 	struct ice_pkg_enum state;
1816 	struct ice_seg *ice_seg;
1817 	struct ice_fv *fv;
1818 	u32 offset;
1819 
1820 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1821 
1822 	if (!ids_cnt || !hw->seg)
1823 		return ICE_ERR_PARAM;
1824 
1825 	ice_seg = hw->seg;
1826 	do {
1827 		u16 i;
1828 
1829 		fv = (struct ice_fv *)
1830 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1831 					   &offset, ice_sw_fv_handler);
1832 		if (!fv)
1833 			break;
1834 		ice_seg = NULL;
1835 
1836 		/* If field vector is not in the bitmap list, then skip this
1837 		 * profile.
1838 		 */
1839 		if (!ice_is_bit_set(bm, (u16)offset))
1840 			continue;
1841 
1842 		for (i = 0; i < ids_cnt; i++) {
1843 			int j;
1844 
1845 			/* This code assumes that if a switch field vector line
1846 			 * has a matching protocol, then this line will contain
1847 			 * the entries necessary to represent every field in
1848 			 * that protocol header.
1849 			 */
1850 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1851 				if (fv->ew[j].prot_id == prot_ids[i])
1852 					break;
1853 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1854 				break;
1855 			if (i + 1 == ids_cnt) {
1856 				fvl = (struct ice_sw_fv_list_entry *)
1857 					ice_malloc(hw, sizeof(*fvl));
1858 				if (!fvl)
1859 					goto err;
1860 				fvl->fv_ptr = fv;
1861 				fvl->profile_id = offset;
1862 				LIST_ADD(&fvl->list_entry, fv_list);
1863 				break;
1864 			}
1865 		}
1866 	} while (fv);
1867 	if (LIST_EMPTY(fv_list))
1868 		return ICE_ERR_CFG;
1869 	return ICE_SUCCESS;
1870 
1871 err:
1872 	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1873 				 list_entry) {
1874 		LIST_DEL(&fvl->list_entry);
1875 		ice_free(hw, fvl);
1876 	}
1877 
1878 	return ICE_ERR_NO_MEMORY;
1879 }
1880 
1881 /**
1882  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1883  * @hw: pointer to hardware structure
1884  */
1885 void ice_init_prof_result_bm(struct ice_hw *hw)
1886 {
1887 	struct ice_pkg_enum state;
1888 	struct ice_seg *ice_seg;
1889 	struct ice_fv *fv;
1890 
1891 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1892 
1893 	if (!hw->seg)
1894 		return;
1895 
1896 	ice_seg = hw->seg;
1897 	do {
1898 		u32 off;
1899 		u16 i;
1900 
1901 		fv = (struct ice_fv *)
1902 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1903 					   &off, ice_sw_fv_handler);
1904 		ice_seg = NULL;
1905 		if (!fv)
1906 			break;
1907 
1908 		ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1909 				ICE_MAX_FV_WORDS);
1910 
1911 		/* Determine empty field vector indices, these can be
1912 		 * used for recipe results. Skip index 0, since it is
1913 		 * always used for Switch ID.
1914 		 */
1915 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1916 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1917 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1918 				ice_set_bit(i,
1919 					    hw->switch_info->prof_res_bm[off]);
1920 	} while (fv);
1921 }
1922 
1923 /**
1924  * ice_pkg_buf_free
1925  * @hw: pointer to the HW structure
1926  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1927  *
1928  * Frees a package buffer
1929  */
1930 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1931 {
1932 	ice_free(hw, bld);
1933 }
1934 
1935 /**
1936  * ice_pkg_buf_reserve_section
1937  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1938  * @count: the number of sections to reserve
1939  *
1940  * Reserves one or more section table entries in a package buffer. This routine
1941  * can be called multiple times as long as they are made before calling
1942  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1943  * is called once, the number of sections that can be allocated will not be able
1944  * to be increased; not using all reserved sections is fine, but this will
1945  * result in some wasted space in the buffer.
1946  * Note: all package contents must be in Little Endian form.
1947  */
1948 static enum ice_status
1949 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1950 {
1951 	struct ice_buf_hdr *buf;
1952 	u16 section_count;
1953 	u16 data_end;
1954 
1955 	if (!bld)
1956 		return ICE_ERR_PARAM;
1957 
1958 	buf = (struct ice_buf_hdr *)&bld->buf;
1959 
1960 	/* already an active section, can't increase table size */
1961 	section_count = LE16_TO_CPU(buf->section_count);
1962 	if (section_count > 0)
1963 		return ICE_ERR_CFG;
1964 
1965 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1966 		return ICE_ERR_CFG;
1967 	bld->reserved_section_table_entries += count;
1968 
1969 	data_end = LE16_TO_CPU(buf->data_end) +
1970 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1971 	buf->data_end = CPU_TO_LE16(data_end);
1972 
1973 	return ICE_SUCCESS;
1974 }
1975 
1976 /**
1977  * ice_pkg_buf_alloc_section
1978  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1979  * @type: the section type value
1980  * @size: the size of the section to reserve (in bytes)
1981  *
1982  * Reserves memory in the buffer for a section's content and updates the
1983  * buffers' status accordingly. This routine returns a pointer to the first
1984  * byte of the section start within the buffer, which is used to fill in the
1985  * section contents.
1986  * Note: all package contents must be in Little Endian form.
1987  */
1988 static void *
1989 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1990 {
1991 	struct ice_buf_hdr *buf;
1992 	u16 sect_count;
1993 	u16 data_end;
1994 
1995 	if (!bld || !type || !size)
1996 		return NULL;
1997 
1998 	buf = (struct ice_buf_hdr *)&bld->buf;
1999 
2000 	/* check for enough space left in buffer */
2001 	data_end = LE16_TO_CPU(buf->data_end);
2002 
2003 	/* section start must align on 4 byte boundary */
2004 	data_end = ICE_ALIGN(data_end, 4);
2005 
2006 	if ((data_end + size) > ICE_MAX_S_DATA_END)
2007 		return NULL;
2008 
2009 	/* check for more available section table entries */
2010 	sect_count = LE16_TO_CPU(buf->section_count);
2011 	if (sect_count < bld->reserved_section_table_entries) {
2012 		void *section_ptr = ((u8 *)buf) + data_end;
2013 
2014 		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
2015 		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
2016 		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
2017 
2018 		data_end += size;
2019 		buf->data_end = CPU_TO_LE16(data_end);
2020 
2021 		buf->section_count = CPU_TO_LE16(sect_count + 1);
2022 		return section_ptr;
2023 	}
2024 
2025 	/* no free section table entries */
2026 	return NULL;
2027 }
2028 
2029 /**
2030  * ice_pkg_buf_alloc_single_section
2031  * @hw: pointer to the HW structure
2032  * @type: the section type value
2033  * @size: the size of the section to reserve (in bytes)
2034  * @section: returns pointer to the section
2035  *
2036  * Allocates a package buffer with a single section.
2037  * Note: all package contents must be in Little Endian form.
2038  */
2039 struct ice_buf_build *
2040 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
2041 				 void **section)
2042 {
2043 	struct ice_buf_build *buf;
2044 
2045 	if (!section)
2046 		return NULL;
2047 
2048 	buf = ice_pkg_buf_alloc(hw);
2049 	if (!buf)
2050 		return NULL;
2051 
2052 	if (ice_pkg_buf_reserve_section(buf, 1))
2053 		goto ice_pkg_buf_alloc_single_section_err;
2054 
2055 	*section = ice_pkg_buf_alloc_section(buf, type, size);
2056 	if (!*section)
2057 		goto ice_pkg_buf_alloc_single_section_err;
2058 
2059 	return buf;
2060 
2061 ice_pkg_buf_alloc_single_section_err:
2062 	ice_pkg_buf_free(hw, buf);
2063 	return NULL;
2064 }
2065 
2066 /**
2067  * ice_pkg_buf_unreserve_section
2068  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2069  * @count: the number of sections to unreserve
2070  *
2071  * Unreserves one or more section table entries in a package buffer, releasing
2072  * space that can be used for section data. This routine can be called
2073  * multiple times as long as they are made before calling
2074  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
2075  * is called once, the number of sections that can be allocated will not be able
2076  * to be increased; not using all reserved sections is fine, but this will
2077  * result in some wasted space in the buffer.
2078  * Note: all package contents must be in Little Endian form.
2079  */
2080 enum ice_status
2081 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
2082 {
2083 	struct ice_buf_hdr *buf;
2084 	u16 section_count;
2085 	u16 data_end;
2086 
2087 	if (!bld)
2088 		return ICE_ERR_PARAM;
2089 
2090 	buf = (struct ice_buf_hdr *)&bld->buf;
2091 
2092 	/* already an active section, can't decrease table size */
2093 	section_count = LE16_TO_CPU(buf->section_count);
2094 	if (section_count > 0)
2095 		return ICE_ERR_CFG;
2096 
2097 	if (count > bld->reserved_section_table_entries)
2098 		return ICE_ERR_CFG;
2099 	bld->reserved_section_table_entries -= count;
2100 
2101 	data_end = LE16_TO_CPU(buf->data_end) -
2102 		FLEX_ARRAY_SIZE(buf, section_entry, count);
2103 	buf->data_end = CPU_TO_LE16(data_end);
2104 
2105 	return ICE_SUCCESS;
2106 }
2107 
2108 /**
2109  * ice_pkg_buf_get_free_space
2110  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2111  *
2112  * Returns the number of free bytes remaining in the buffer.
2113  * Note: all package contents must be in Little Endian form.
2114  */
2115 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
2116 {
2117 	struct ice_buf_hdr *buf;
2118 
2119 	if (!bld)
2120 		return 0;
2121 
2122 	buf = (struct ice_buf_hdr *)&bld->buf;
2123 	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
2124 }
2125 
2126 /**
2127  * ice_pkg_buf_get_active_sections
2128  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2129  *
2130  * Returns the number of active sections. Before using the package buffer
2131  * in an update package command, the caller should make sure that there is at
2132  * least one active section - otherwise, the buffer is not legal and should
2133  * not be used.
2134  * Note: all package contents must be in Little Endian form.
2135  */
2136 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
2137 {
2138 	struct ice_buf_hdr *buf;
2139 
2140 	if (!bld)
2141 		return 0;
2142 
2143 	buf = (struct ice_buf_hdr *)&bld->buf;
2144 	return LE16_TO_CPU(buf->section_count);
2145 }
2146 
2147 /**
2148  * ice_pkg_buf
2149  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2150  *
2151  * Return a pointer to the buffer's header
2152  */
2153 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
2154 {
2155 	if (!bld)
2156 		return NULL;
2157 
2158 	return &bld->buf;
2159 }
2160 
2161 /**
2162  * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
2163  * @hw: pointer to the HW structure
2164  * @port: port to search for
2165  * @index: optionally returns index
2166  *
2167  * Returns whether a port is already in use as a tunnel, and optionally its
2168  * index
2169  */
2170 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
2171 {
2172 	u16 i;
2173 
2174 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2175 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2176 			if (index)
2177 				*index = i;
2178 			return true;
2179 		}
2180 
2181 	return false;
2182 }
2183 
2184 /**
2185  * ice_tunnel_port_in_use
2186  * @hw: pointer to the HW structure
2187  * @port: port to search for
2188  * @index: optionally returns index
2189  *
2190  * Returns whether a port is already in use as a tunnel, and optionally its
2191  * index
2192  */
2193 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
2194 {
2195 	bool res;
2196 
2197 	ice_acquire_lock(&hw->tnl_lock);
2198 	res = ice_tunnel_port_in_use_hlpr(hw, port, index);
2199 	ice_release_lock(&hw->tnl_lock);
2200 
2201 	return res;
2202 }
2203 
2204 /**
2205  * ice_tunnel_get_type
2206  * @hw: pointer to the HW structure
2207  * @port: port to search for
2208  * @type: returns tunnel index
2209  *
2210  * For a given port number, will return the type of tunnel.
2211  */
2212 bool
2213 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
2214 {
2215 	bool res = false;
2216 	u16 i;
2217 
2218 	ice_acquire_lock(&hw->tnl_lock);
2219 
2220 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2221 		if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2222 			*type = hw->tnl.tbl[i].type;
2223 			res = true;
2224 			break;
2225 		}
2226 
2227 	ice_release_lock(&hw->tnl_lock);
2228 
2229 	return res;
2230 }
2231 
2232 /**
2233  * ice_find_free_tunnel_entry
2234  * @hw: pointer to the HW structure
2235  * @type: tunnel type
2236  * @index: optionally returns index
2237  *
2238  * Returns whether there is a free tunnel entry, and optionally its index
2239  */
2240 static bool
2241 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2242 			   u16 *index)
2243 {
2244 	u16 i;
2245 
2246 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2247 		if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
2248 		    hw->tnl.tbl[i].type == type) {
2249 			if (index)
2250 				*index = i;
2251 			return true;
2252 		}
2253 
2254 	return false;
2255 }
2256 
2257 /**
2258  * ice_get_open_tunnel_port - retrieve an open tunnel port
2259  * @hw: pointer to the HW structure
2260  * @type: tunnel type (TNL_ALL will return any open port)
2261  * @port: returns open port
2262  */
2263 bool
2264 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
2265 			 u16 *port)
2266 {
2267 	bool res = false;
2268 	u16 i;
2269 
2270 	ice_acquire_lock(&hw->tnl_lock);
2271 
2272 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2273 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2274 		    (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
2275 			*port = hw->tnl.tbl[i].port;
2276 			res = true;
2277 			break;
2278 		}
2279 
2280 	ice_release_lock(&hw->tnl_lock);
2281 
2282 	return res;
2283 }
2284 
2285 /**
2286  * ice_create_tunnel
2287  * @hw: pointer to the HW structure
2288  * @type: type of tunnel
2289  * @port: port of tunnel to create
2290  *
2291  * Create a tunnel by updating the parse graph in the parser. We do that by
2292  * creating a package buffer with the tunnel info and issuing an update package
2293  * command.
2294  */
2295 enum ice_status
2296 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
2297 {
2298 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
2299 	enum ice_status status = ICE_ERR_MAX_LIMIT;
2300 	struct ice_buf_build *bld;
2301 	u16 index;
2302 
2303 	ice_acquire_lock(&hw->tnl_lock);
2304 
2305 	if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
2306 		hw->tnl.tbl[index].ref++;
2307 		status = ICE_SUCCESS;
2308 		goto ice_create_tunnel_end;
2309 	}
2310 
2311 	if (!ice_find_free_tunnel_entry(hw, type, &index)) {
2312 		status = ICE_ERR_OUT_OF_RANGE;
2313 		goto ice_create_tunnel_end;
2314 	}
2315 
2316 	bld = ice_pkg_buf_alloc(hw);
2317 	if (!bld) {
2318 		status = ICE_ERR_NO_MEMORY;
2319 		goto ice_create_tunnel_end;
2320 	}
2321 
2322 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
2323 	if (ice_pkg_buf_reserve_section(bld, 2))
2324 		goto ice_create_tunnel_err;
2325 
2326 	sect_rx = (struct ice_boost_tcam_section *)
2327 		ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2328 					  ice_struct_size(sect_rx, tcam, 1));
2329 	if (!sect_rx)
2330 		goto ice_create_tunnel_err;
2331 	sect_rx->count = CPU_TO_LE16(1);
2332 
2333 	sect_tx = (struct ice_boost_tcam_section *)
2334 		ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2335 					  ice_struct_size(sect_tx, tcam, 1));
2336 	if (!sect_tx)
2337 		goto ice_create_tunnel_err;
2338 	sect_tx->count = CPU_TO_LE16(1);
2339 
2340 	/* copy original boost entry to update package buffer */
2341 	ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2342 		   sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
2343 
2344 	/* over-write the never-match dest port key bits with the encoded port
2345 	 * bits
2346 	 */
2347 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2348 		    (u8 *)&port, NULL, NULL, NULL,
2349 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2350 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2351 
2352 	/* exact copy of entry to Tx section entry */
2353 	ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2354 		   ICE_NONDMA_TO_NONDMA);
2355 
2356 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2357 	if (!status) {
2358 		hw->tnl.tbl[index].port = port;
2359 		hw->tnl.tbl[index].in_use = true;
2360 		hw->tnl.tbl[index].ref = 1;
2361 	}
2362 
2363 ice_create_tunnel_err:
2364 	ice_pkg_buf_free(hw, bld);
2365 
2366 ice_create_tunnel_end:
2367 	ice_release_lock(&hw->tnl_lock);
2368 
2369 	return status;
2370 }
2371 
2372 /**
2373  * ice_destroy_tunnel
2374  * @hw: pointer to the HW structure
2375  * @port: port of tunnel to destroy (ignored if the all parameter is true)
2376  * @all: flag that states to destroy all tunnels
2377  *
2378  * Destroys a tunnel or all tunnels by creating an update package buffer
2379  * targeting the specific updates requested and then performing an update
2380  * package.
2381  */
2382 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
2383 {
2384 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
2385 	enum ice_status status = ICE_ERR_MAX_LIMIT;
2386 	struct ice_buf_build *bld;
2387 	u16 count = 0;
2388 	u16 index;
2389 	u16 size;
2390 	u16 i, j;
2391 
2392 	ice_acquire_lock(&hw->tnl_lock);
2393 
2394 	if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
2395 		if (hw->tnl.tbl[index].ref > 1) {
2396 			hw->tnl.tbl[index].ref--;
2397 			status = ICE_SUCCESS;
2398 			goto ice_destroy_tunnel_end;
2399 		}
2400 
2401 	/* determine count */
2402 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2403 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2404 		    (all || hw->tnl.tbl[i].port == port))
2405 			count++;
2406 
2407 	if (!count) {
2408 		status = ICE_ERR_PARAM;
2409 		goto ice_destroy_tunnel_end;
2410 	}
2411 
2412 	/* size of section - there is at least one entry */
2413 	size = ice_struct_size(sect_rx, tcam, count);
2414 
2415 	bld = ice_pkg_buf_alloc(hw);
2416 	if (!bld) {
2417 		status = ICE_ERR_NO_MEMORY;
2418 		goto ice_destroy_tunnel_end;
2419 	}
2420 
2421 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
2422 	if (ice_pkg_buf_reserve_section(bld, 2))
2423 		goto ice_destroy_tunnel_err;
2424 
2425 	sect_rx = (struct ice_boost_tcam_section *)
2426 		ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2427 					  size);
2428 	if (!sect_rx)
2429 		goto ice_destroy_tunnel_err;
2430 	sect_rx->count = CPU_TO_LE16(count);
2431 
2432 	sect_tx = (struct ice_boost_tcam_section *)
2433 		ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2434 					  size);
2435 	if (!sect_tx)
2436 		goto ice_destroy_tunnel_err;
2437 	sect_tx->count = CPU_TO_LE16(count);
2438 
2439 	/* copy original boost entry to update package buffer, one copy to Rx
2440 	 * section, another copy to the Tx section
2441 	 */
2442 	for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2443 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2444 		    (all || hw->tnl.tbl[i].port == port)) {
2445 			ice_memcpy(sect_rx->tcam + j,
2446 				   hw->tnl.tbl[i].boost_entry,
2447 				   sizeof(*sect_rx->tcam),
2448 				   ICE_NONDMA_TO_NONDMA);
2449 			ice_memcpy(sect_tx->tcam + j,
2450 				   hw->tnl.tbl[i].boost_entry,
2451 				   sizeof(*sect_tx->tcam),
2452 				   ICE_NONDMA_TO_NONDMA);
2453 			hw->tnl.tbl[i].marked = true;
2454 			j++;
2455 		}
2456 
2457 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2458 	if (!status)
2459 		for (i = 0; i < hw->tnl.count &&
2460 		     i < ICE_TUNNEL_MAX_ENTRIES; i++)
2461 			if (hw->tnl.tbl[i].marked) {
2462 				hw->tnl.tbl[i].ref = 0;
2463 				hw->tnl.tbl[i].port = 0;
2464 				hw->tnl.tbl[i].in_use = false;
2465 				hw->tnl.tbl[i].marked = false;
2466 			}
2467 
2468 ice_destroy_tunnel_err:
2469 	ice_pkg_buf_free(hw, bld);
2470 
2471 ice_destroy_tunnel_end:
2472 	ice_release_lock(&hw->tnl_lock);
2473 
2474 	return status;
2475 }
2476 
2477 /**
2478  * ice_replay_tunnels
2479  * @hw: pointer to the HW structure
2480  *
2481  * Replays all tunnels
2482  */
2483 enum ice_status ice_replay_tunnels(struct ice_hw *hw)
2484 {
2485 	enum ice_status status = ICE_SUCCESS;
2486 	u16 i;
2487 
2488 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2489 
2490 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) {
2491 		enum ice_tunnel_type type = hw->tnl.tbl[i].type;
2492 		u16 refs = hw->tnl.tbl[i].ref;
2493 		u16 port = hw->tnl.tbl[i].port;
2494 
2495 		if (!hw->tnl.tbl[i].in_use)
2496 			continue;
2497 
2498 		/* Replay tunnels one at a time by destroying them, then
2499 		 * recreating them
2500 		 */
2501 		hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */
2502 		status = ice_destroy_tunnel(hw, port, false);
2503 		if (status) {
2504 			ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n",
2505 				  status, port);
2506 			break;
2507 		}
2508 
2509 		status = ice_create_tunnel(hw, type, port);
2510 		if (status) {
2511 			ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n",
2512 				  status, port);
2513 			break;
2514 		}
2515 
2516 		/* reset to original ref count */
2517 		hw->tnl.tbl[i].ref = refs;
2518 	}
2519 
2520 	return status;
2521 }
2522 
2523 /**
2524  * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2525  * @hw: pointer to the hardware structure
2526  * @blk: hardware block
2527  * @prof: profile ID
2528  * @fv_idx: field vector word index
2529  * @prot: variable to receive the protocol ID
2530  * @off: variable to receive the protocol offset
2531  */
2532 enum ice_status
2533 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2534 		  u8 *prot, u16 *off)
2535 {
2536 	struct ice_fv_word *fv_ext;
2537 
2538 	if (prof >= hw->blk[blk].es.count)
2539 		return ICE_ERR_PARAM;
2540 
2541 	if (fv_idx >= hw->blk[blk].es.fvw)
2542 		return ICE_ERR_PARAM;
2543 
2544 	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2545 
2546 	*prot = fv_ext[fv_idx].prot_id;
2547 	*off = fv_ext[fv_idx].off;
2548 
2549 	return ICE_SUCCESS;
2550 }
2551 
2552 /* PTG Management */
2553 
2554 /**
2555  * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
2556  * @hw: pointer to the hardware structure
2557  * @blk: HW block
2558  *
2559  * This function will update the XLT1 hardware table to reflect the new
2560  * packet type group configuration.
2561  */
2562 enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
2563 {
2564 	struct ice_xlt1_section *sect;
2565 	struct ice_buf_build *bld;
2566 	enum ice_status status;
2567 	u16 index;
2568 
2569 	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
2570 					       ice_struct_size(sect, value,
2571 							       ICE_XLT1_CNT),
2572 					       (void **)&sect);
2573 	if (!bld)
2574 		return ICE_ERR_NO_MEMORY;
2575 
2576 	sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
2577 	sect->offset = CPU_TO_LE16(0);
2578 	for (index = 0; index < ICE_XLT1_CNT; index++)
2579 		sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
2580 
2581 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2582 
2583 	ice_pkg_buf_free(hw, bld);
2584 
2585 	return status;
2586 }
2587 
2588 /**
2589  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2590  * @hw: pointer to the hardware structure
2591  * @blk: HW block
2592  * @ptype: the ptype to search for
2593  * @ptg: pointer to variable that receives the PTG
2594  *
2595  * This function will search the PTGs for a particular ptype, returning the
2596  * PTG ID that contains it through the PTG parameter, with the value of
2597  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2598  */
2599 static enum ice_status
2600 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2601 {
2602 	if (ptype >= ICE_XLT1_CNT || !ptg)
2603 		return ICE_ERR_PARAM;
2604 
2605 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2606 	return ICE_SUCCESS;
2607 }
2608 
2609 /**
2610  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2611  * @hw: pointer to the hardware structure
2612  * @blk: HW block
2613  * @ptg: the PTG to allocate
2614  *
2615  * This function allocates a given packet type group ID specified by the PTG
2616  * parameter.
2617  */
2618 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2619 {
2620 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2621 }
2622 
2623 /**
2624  * ice_ptg_free - Frees a packet type group
2625  * @hw: pointer to the hardware structure
2626  * @blk: HW block
2627  * @ptg: the PTG ID to free
2628  *
2629  * This function frees a packet type group, and returns all the current ptypes
2630  * within it to the default PTG.
2631  */
2632 void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2633 {
2634 	struct ice_ptg_ptype *p, *temp;
2635 
2636 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
2637 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2638 	while (p) {
2639 		p->ptg = ICE_DEFAULT_PTG;
2640 		temp = p->next_ptype;
2641 		p->next_ptype = NULL;
2642 		p = temp;
2643 	}
2644 
2645 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
2646 }
2647 
2648 /**
2649  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2650  * @hw: pointer to the hardware structure
2651  * @blk: HW block
2652  * @ptype: the ptype to remove
2653  * @ptg: the PTG to remove the ptype from
2654  *
2655  * This function will remove the ptype from the specific PTG, and move it to
2656  * the default PTG (ICE_DEFAULT_PTG).
2657  */
2658 static enum ice_status
2659 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2660 {
2661 	struct ice_ptg_ptype **ch;
2662 	struct ice_ptg_ptype *p;
2663 
2664 	if (ptype > ICE_XLT1_CNT - 1)
2665 		return ICE_ERR_PARAM;
2666 
2667 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2668 		return ICE_ERR_DOES_NOT_EXIST;
2669 
2670 	/* Should not happen if .in_use is set, bad config */
2671 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2672 		return ICE_ERR_CFG;
2673 
2674 	/* find the ptype within this PTG, and bypass the link over it */
2675 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2676 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2677 	while (p) {
2678 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2679 			*ch = p->next_ptype;
2680 			break;
2681 		}
2682 
2683 		ch = &p->next_ptype;
2684 		p = p->next_ptype;
2685 	}
2686 
2687 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2688 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2689 
2690 	return ICE_SUCCESS;
2691 }
2692 
2693 /**
2694  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2695  * @hw: pointer to the hardware structure
2696  * @blk: HW block
2697  * @ptype: the ptype to add or move
2698  * @ptg: the PTG to add or move the ptype to
2699  *
2700  * This function will either add or move a ptype to a particular PTG depending
2701  * on if the ptype is already part of another group. Note that using a
2702  * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2703  * default PTG.
2704  */
2705 static enum ice_status
2706 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2707 {
2708 	enum ice_status status;
2709 	u8 original_ptg;
2710 
2711 	if (ptype > ICE_XLT1_CNT - 1)
2712 		return ICE_ERR_PARAM;
2713 
2714 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2715 		return ICE_ERR_DOES_NOT_EXIST;
2716 
2717 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2718 	if (status)
2719 		return status;
2720 
2721 	/* Is ptype already in the correct PTG? */
2722 	if (original_ptg == ptg)
2723 		return ICE_SUCCESS;
2724 
2725 	/* Remove from original PTG and move back to the default PTG */
2726 	if (original_ptg != ICE_DEFAULT_PTG)
2727 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2728 
2729 	/* Moving to default PTG? Then we're done with this request */
2730 	if (ptg == ICE_DEFAULT_PTG)
2731 		return ICE_SUCCESS;
2732 
2733 	/* Add ptype to PTG at beginning of list */
2734 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2735 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2736 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2737 		&hw->blk[blk].xlt1.ptypes[ptype];
2738 
2739 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2740 	hw->blk[blk].xlt1.t[ptype] = ptg;
2741 
2742 	return ICE_SUCCESS;
2743 }
2744 
2745 /* Block / table size info */
2746 struct ice_blk_size_details {
2747 	u16 xlt1;			/* # XLT1 entries */
2748 	u16 xlt2;			/* # XLT2 entries */
2749 	u16 prof_tcam;			/* # profile ID TCAM entries */
2750 	u16 prof_id;			/* # profile IDs */
2751 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
2752 	u16 prof_redir;			/* # profile redirection entries */
2753 	u16 es;				/* # extraction sequence entries */
2754 	u16 fvw;			/* # field vector words */
2755 	u8 overwrite;			/* overwrite existing entries allowed */
2756 	u8 reverse;			/* reverse FV order */
2757 };
2758 
2759 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2760 	/**
2761 	 * Table Definitions
2762 	 * XLT1 - Number of entries in XLT1 table
2763 	 * XLT2 - Number of entries in XLT2 table
2764 	 * TCAM - Number of entries Profile ID TCAM table
2765 	 * CDID - Control Domain ID of the hardware block
2766 	 * PRED - Number of entries in the Profile Redirection Table
2767 	 * FV   - Number of entries in the Field Vector
2768 	 * FVW  - Width (in WORDs) of the Field Vector
2769 	 * OVR  - Overwrite existing table entries
2770 	 * REV  - Reverse FV
2771 	 */
2772 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
2773 	/*          Overwrite   , Reverse FV */
2774 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
2775 		    false, false },
2776 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
2777 		    false, false },
2778 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2779 		    false, true  },
2780 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
2781 		    true,  true  },
2782 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
2783 		    false, false },
2784 };
2785 
2786 enum ice_sid_all {
2787 	ICE_SID_XLT1_OFF = 0,
2788 	ICE_SID_XLT2_OFF,
2789 	ICE_SID_PR_OFF,
2790 	ICE_SID_PR_REDIR_OFF,
2791 	ICE_SID_ES_OFF,
2792 	ICE_SID_OFF_COUNT,
2793 };
2794 
2795 /* Characteristic handling */
2796 
2797 /**
2798  * ice_match_prop_lst - determine if properties of two lists match
2799  * @list1: first properties list
2800  * @list2: second properties list
2801  *
2802  * Count, cookies and the order must match in order to be considered equivalent.
2803  */
2804 static bool
2805 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2806 {
2807 	struct ice_vsig_prof *tmp1;
2808 	struct ice_vsig_prof *tmp2;
2809 	u16 chk_count = 0;
2810 	u16 count = 0;
2811 
2812 	/* compare counts */
2813 	LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
2814 		count++;
2815 	LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
2816 		chk_count++;
2817 	if (!count || count != chk_count)
2818 		return false;
2819 
2820 	tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2821 	tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2822 
2823 	/* profile cookies must compare, and in the exact same order to take
2824 	 * into account priority
2825 	 */
2826 	while (count--) {
2827 		if (tmp2->profile_cookie != tmp1->profile_cookie)
2828 			return false;
2829 
2830 		tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2831 		tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2832 	}
2833 
2834 	return true;
2835 }
2836 
2837 /* VSIG Management */
2838 
2839 /**
2840  * ice_vsig_update_xlt2_sect - update one section of XLT2 table
2841  * @hw: pointer to the hardware structure
2842  * @blk: HW block
2843  * @vsi: HW VSI number to program
2844  * @vsig: VSIG for the VSI
2845  *
2846  * This function will update the XLT2 hardware table with the input VSI
2847  * group configuration.
2848  */
2849 static enum ice_status
2850 ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
2851 			  u16 vsig)
2852 {
2853 	struct ice_xlt2_section *sect;
2854 	struct ice_buf_build *bld;
2855 	enum ice_status status;
2856 
2857 	bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
2858 					       ice_struct_size(sect, value, 1),
2859 					       (void **)&sect);
2860 	if (!bld)
2861 		return ICE_ERR_NO_MEMORY;
2862 
2863 	sect->count = CPU_TO_LE16(1);
2864 	sect->offset = CPU_TO_LE16(vsi);
2865 	sect->value[0] = CPU_TO_LE16(vsig);
2866 
2867 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2868 
2869 	ice_pkg_buf_free(hw, bld);
2870 
2871 	return status;
2872 }
2873 
2874 /**
2875  * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
2876  * @hw: pointer to the hardware structure
2877  * @blk: HW block
2878  *
2879  * This function will update the XLT2 hardware table with the input VSI
2880  * group configuration of used vsis.
2881  */
2882 enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
2883 {
2884 	u16 vsi;
2885 
2886 	for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
2887 		/* update only vsis that have been changed */
2888 		if (hw->blk[blk].xlt2.vsis[vsi].changed) {
2889 			enum ice_status status;
2890 			u16 vsig;
2891 
2892 			vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2893 			status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
2894 			if (status)
2895 				return status;
2896 
2897 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
2898 		}
2899 	}
2900 
2901 	return ICE_SUCCESS;
2902 }
2903 
2904 /**
2905  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2906  * @hw: pointer to the hardware structure
2907  * @blk: HW block
2908  * @vsi: VSI of interest
2909  * @vsig: pointer to receive the VSI group
2910  *
2911  * This function will lookup the VSI entry in the XLT2 list and return
2912  * the VSI group its associated with.
2913  */
2914 enum ice_status
2915 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2916 {
2917 	if (!vsig || vsi >= ICE_MAX_VSI)
2918 		return ICE_ERR_PARAM;
2919 
2920 	/* As long as there's a default or valid VSIG associated with the input
2921 	 * VSI, the functions returns a success. Any handling of VSIG will be
2922 	 * done by the following add, update or remove functions.
2923 	 */
2924 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2925 
2926 	return ICE_SUCCESS;
2927 }
2928 
2929 /**
2930  * ice_vsig_alloc_val - allocate a new VSIG by value
2931  * @hw: pointer to the hardware structure
2932  * @blk: HW block
2933  * @vsig: the VSIG to allocate
2934  *
2935  * This function will allocate a given VSIG specified by the VSIG parameter.
2936  */
2937 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2938 {
2939 	u16 idx = vsig & ICE_VSIG_IDX_M;
2940 
2941 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2942 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2943 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2944 	}
2945 
2946 	return ICE_VSIG_VALUE(idx, hw->pf_id);
2947 }
2948 
2949 /**
2950  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2951  * @hw: pointer to the hardware structure
2952  * @blk: HW block
2953  *
2954  * This function will iterate through the VSIG list and mark the first
2955  * unused entry for the new VSIG entry as used and return that value.
2956  */
2957 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2958 {
2959 	u16 i;
2960 
2961 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2962 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2963 			return ice_vsig_alloc_val(hw, blk, i);
2964 
2965 	return ICE_DEFAULT_VSIG;
2966 }
2967 
2968 /**
2969  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2970  * @hw: pointer to the hardware structure
2971  * @blk: HW block
2972  * @chs: characteristic list
2973  * @vsig: returns the VSIG with the matching profiles, if found
2974  *
2975  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2976  * a group have the same characteristic set. To check if there exists a VSIG
2977  * which has the same characteristics as the input characteristics; this
2978  * function will iterate through the XLT2 list and return the VSIG that has a
2979  * matching configuration. In order to make sure that priorities are accounted
2980  * for, the list must match exactly, including the order in which the
2981  * characteristics are listed.
2982  */
2983 static enum ice_status
2984 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2985 			struct LIST_HEAD_TYPE *chs, u16 *vsig)
2986 {
2987 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2988 	u16 i;
2989 
2990 	for (i = 0; i < xlt2->count; i++)
2991 		if (xlt2->vsig_tbl[i].in_use &&
2992 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2993 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2994 			return ICE_SUCCESS;
2995 		}
2996 
2997 	return ICE_ERR_DOES_NOT_EXIST;
2998 }
2999 
3000 /**
3001  * ice_vsig_free - free VSI group
3002  * @hw: pointer to the hardware structure
3003  * @blk: HW block
3004  * @vsig: VSIG to remove
3005  *
3006  * The function will remove all VSIs associated with the input VSIG and move
3007  * them to the DEFAULT_VSIG and mark the VSIG available.
3008  */
3009 static enum ice_status
3010 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3011 {
3012 	struct ice_vsig_prof *dtmp, *del;
3013 	struct ice_vsig_vsi *vsi_cur;
3014 	u16 idx;
3015 
3016 	idx = vsig & ICE_VSIG_IDX_M;
3017 	if (idx >= ICE_MAX_VSIGS)
3018 		return ICE_ERR_PARAM;
3019 
3020 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3021 		return ICE_ERR_DOES_NOT_EXIST;
3022 
3023 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
3024 
3025 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3026 	/* If the VSIG has at least 1 VSI then iterate through the
3027 	 * list and remove the VSIs before deleting the group.
3028 	 */
3029 	if (vsi_cur) {
3030 		/* remove all vsis associated with this VSIG XLT2 entry */
3031 		do {
3032 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
3033 
3034 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
3035 			vsi_cur->changed = 1;
3036 			vsi_cur->next_vsi = NULL;
3037 			vsi_cur = tmp;
3038 		} while (vsi_cur);
3039 
3040 		/* NULL terminate head of VSI list */
3041 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
3042 	}
3043 
3044 	/* free characteristic list */
3045 	LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
3046 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3047 				 ice_vsig_prof, list) {
3048 		LIST_DEL(&del->list);
3049 		ice_free(hw, del);
3050 	}
3051 
3052 	/* if VSIG characteristic list was cleared for reset
3053 	 * re-initialize the list head
3054 	 */
3055 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
3056 
3057 	return ICE_SUCCESS;
3058 }
3059 
3060 /**
3061  * ice_vsig_remove_vsi - remove VSI from VSIG
3062  * @hw: pointer to the hardware structure
3063  * @blk: HW block
3064  * @vsi: VSI to remove
3065  * @vsig: VSI group to remove from
3066  *
3067  * The function will remove the input VSI from its VSI group and move it
3068  * to the DEFAULT_VSIG.
3069  */
3070 static enum ice_status
3071 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3072 {
3073 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
3074 	u16 idx;
3075 
3076 	idx = vsig & ICE_VSIG_IDX_M;
3077 
3078 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3079 		return ICE_ERR_PARAM;
3080 
3081 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3082 		return ICE_ERR_DOES_NOT_EXIST;
3083 
3084 	/* entry already in default VSIG, don't have to remove */
3085 	if (idx == ICE_DEFAULT_VSIG)
3086 		return ICE_SUCCESS;
3087 
3088 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3089 	if (!(*vsi_head))
3090 		return ICE_ERR_CFG;
3091 
3092 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
3093 	vsi_cur = (*vsi_head);
3094 
3095 	/* iterate the VSI list, skip over the entry to be removed */
3096 	while (vsi_cur) {
3097 		if (vsi_tgt == vsi_cur) {
3098 			(*vsi_head) = vsi_cur->next_vsi;
3099 			break;
3100 		}
3101 		vsi_head = &vsi_cur->next_vsi;
3102 		vsi_cur = vsi_cur->next_vsi;
3103 	}
3104 
3105 	/* verify if VSI was removed from group list */
3106 	if (!vsi_cur)
3107 		return ICE_ERR_DOES_NOT_EXIST;
3108 
3109 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
3110 	vsi_cur->changed = 1;
3111 	vsi_cur->next_vsi = NULL;
3112 
3113 	return ICE_SUCCESS;
3114 }
3115 
3116 /**
3117  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
3118  * @hw: pointer to the hardware structure
3119  * @blk: HW block
3120  * @vsi: VSI to move
3121  * @vsig: destination VSI group
3122  *
3123  * This function will move or add the input VSI to the target VSIG.
3124  * The function will find the original VSIG the VSI belongs to and
3125  * move the entry to the DEFAULT_VSIG, update the original VSIG and
3126  * then move entry to the new VSIG.
3127  */
3128 static enum ice_status
3129 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3130 {
3131 	struct ice_vsig_vsi *tmp;
3132 	enum ice_status status;
3133 	u16 orig_vsig, idx;
3134 
3135 	idx = vsig & ICE_VSIG_IDX_M;
3136 
3137 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3138 		return ICE_ERR_PARAM;
3139 
3140 	/* if VSIG not in use and VSIG is not default type this VSIG
3141 	 * doesn't exist.
3142 	 */
3143 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
3144 	    vsig != ICE_DEFAULT_VSIG)
3145 		return ICE_ERR_DOES_NOT_EXIST;
3146 
3147 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3148 	if (status)
3149 		return status;
3150 
3151 	/* no update required if vsigs match */
3152 	if (orig_vsig == vsig)
3153 		return ICE_SUCCESS;
3154 
3155 	if (orig_vsig != ICE_DEFAULT_VSIG) {
3156 		/* remove entry from orig_vsig and add to default VSIG */
3157 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
3158 		if (status)
3159 			return status;
3160 	}
3161 
3162 	if (idx == ICE_DEFAULT_VSIG)
3163 		return ICE_SUCCESS;
3164 
3165 	/* Create VSI entry and add VSIG and prop_mask values */
3166 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
3167 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
3168 
3169 	/* Add new entry to the head of the VSIG list */
3170 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3171 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
3172 		&hw->blk[blk].xlt2.vsis[vsi];
3173 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
3174 	hw->blk[blk].xlt2.t[vsi] = vsig;
3175 
3176 	return ICE_SUCCESS;
3177 }
3178 
3179 /**
3180  * ice_find_prof_id - find profile ID for a given field vector
3181  * @hw: pointer to the hardware structure
3182  * @blk: HW block
3183  * @fv: field vector to search for
3184  * @prof_id: receives the profile ID
3185  */
3186 static enum ice_status
3187 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
3188 		 struct ice_fv_word *fv, u8 *prof_id)
3189 {
3190 	struct ice_es *es = &hw->blk[blk].es;
3191 	u16 off;
3192 	u8 i;
3193 
3194 	for (i = 0; i < (u8)es->count; i++) {
3195 		off = i * es->fvw;
3196 
3197 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
3198 			continue;
3199 
3200 		*prof_id = i;
3201 		return ICE_SUCCESS;
3202 	}
3203 
3204 	return ICE_ERR_DOES_NOT_EXIST;
3205 }
3206 
3207 /**
3208  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
3209  * @blk: the block type
3210  * @rsrc_type: pointer to variable to receive the resource type
3211  */
3212 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3213 {
3214 	switch (blk) {
3215 	case ICE_BLK_RSS:
3216 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
3217 		break;
3218 	case ICE_BLK_PE:
3219 		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
3220 		break;
3221 	default:
3222 		return false;
3223 	}
3224 	return true;
3225 }
3226 
3227 /**
3228  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
3229  * @blk: the block type
3230  * @rsrc_type: pointer to variable to receive the resource type
3231  */
3232 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3233 {
3234 	switch (blk) {
3235 	case ICE_BLK_RSS:
3236 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
3237 		break;
3238 	case ICE_BLK_PE:
3239 		*rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
3240 		break;
3241 	default:
3242 		return false;
3243 	}
3244 	return true;
3245 }
3246 
3247 /**
3248  * ice_alloc_tcam_ent - allocate hardware TCAM entry
3249  * @hw: pointer to the HW struct
3250  * @blk: the block to allocate the TCAM for
3251  * @btm: true to allocate from bottom of table, false to allocate from top
3252  * @tcam_idx: pointer to variable to receive the TCAM entry
3253  *
3254  * This function allocates a new entry in a Profile ID TCAM for a specific
3255  * block.
3256  */
3257 static enum ice_status
3258 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3259 		   u16 *tcam_idx)
3260 {
3261 	u16 res_type;
3262 
3263 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3264 		return ICE_ERR_PARAM;
3265 
3266 	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3267 }
3268 
3269 /**
3270  * ice_free_tcam_ent - free hardware TCAM entry
3271  * @hw: pointer to the HW struct
3272  * @blk: the block from which to free the TCAM entry
3273  * @tcam_idx: the TCAM entry to free
3274  *
3275  * This function frees an entry in a Profile ID TCAM for a specific block.
3276  */
3277 static enum ice_status
3278 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3279 {
3280 	u16 res_type;
3281 
3282 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3283 		return ICE_ERR_PARAM;
3284 
3285 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3286 }
3287 
3288 /**
3289  * ice_alloc_prof_id - allocate profile ID
3290  * @hw: pointer to the HW struct
3291  * @blk: the block to allocate the profile ID for
3292  * @prof_id: pointer to variable to receive the profile ID
3293  *
3294  * This function allocates a new profile ID, which also corresponds to a Field
3295  * Vector (Extraction Sequence) entry.
3296  */
3297 static enum ice_status
3298 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3299 {
3300 	enum ice_status status;
3301 	u16 res_type;
3302 	u16 get_prof;
3303 
3304 	if (!ice_prof_id_rsrc_type(blk, &res_type))
3305 		return ICE_ERR_PARAM;
3306 
3307 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3308 	if (!status)
3309 		*prof_id = (u8)get_prof;
3310 
3311 	return status;
3312 }
3313 
3314 /**
3315  * ice_free_prof_id - free profile ID
3316  * @hw: pointer to the HW struct
3317  * @blk: the block from which to free the profile ID
3318  * @prof_id: the profile ID to free
3319  *
3320  * This function frees a profile ID, which also corresponds to a Field Vector.
3321  */
3322 static enum ice_status
3323 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3324 {
3325 	u16 tmp_prof_id = (u16)prof_id;
3326 	u16 res_type;
3327 
3328 	if (!ice_prof_id_rsrc_type(blk, &res_type))
3329 		return ICE_ERR_PARAM;
3330 
3331 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3332 }
3333 
3334 /**
3335  * ice_prof_inc_ref - increment reference count for profile
3336  * @hw: pointer to the HW struct
3337  * @blk: the block from which to free the profile ID
3338  * @prof_id: the profile ID for which to increment the reference count
3339  */
3340 static enum ice_status
3341 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3342 {
3343 	if (prof_id > hw->blk[blk].es.count)
3344 		return ICE_ERR_PARAM;
3345 
3346 	hw->blk[blk].es.ref_count[prof_id]++;
3347 
3348 	return ICE_SUCCESS;
3349 }
3350 
3351 /**
3352  * ice_write_es - write an extraction sequence to hardware
3353  * @hw: pointer to the HW struct
3354  * @blk: the block in which to write the extraction sequence
3355  * @prof_id: the profile ID to write
3356  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3357  */
3358 static void
3359 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3360 	     struct ice_fv_word *fv)
3361 {
3362 	u16 off;
3363 
3364 	off = prof_id * hw->blk[blk].es.fvw;
3365 	if (!fv) {
3366 		ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3367 			   sizeof(*fv), ICE_NONDMA_MEM);
3368 		hw->blk[blk].es.written[prof_id] = false;
3369 	} else {
3370 		ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3371 			   sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3372 	}
3373 }
3374 
3375 /**
3376  * ice_prof_dec_ref - decrement reference count for profile
3377  * @hw: pointer to the HW struct
3378  * @blk: the block from which to free the profile ID
3379  * @prof_id: the profile ID for which to decrement the reference count
3380  */
3381 static enum ice_status
3382 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3383 {
3384 	if (prof_id > hw->blk[blk].es.count)
3385 		return ICE_ERR_PARAM;
3386 
3387 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3388 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
3389 			ice_write_es(hw, blk, prof_id, NULL);
3390 			return ice_free_prof_id(hw, blk, prof_id);
3391 		}
3392 	}
3393 
3394 	return ICE_SUCCESS;
3395 }
3396 
3397 /* Block / table section IDs */
3398 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3399 	/* SWITCH */
3400 	{	ICE_SID_XLT1_SW,
3401 		ICE_SID_XLT2_SW,
3402 		ICE_SID_PROFID_TCAM_SW,
3403 		ICE_SID_PROFID_REDIR_SW,
3404 		ICE_SID_FLD_VEC_SW
3405 	},
3406 
3407 	/* ACL */
3408 	{	ICE_SID_XLT1_ACL,
3409 		ICE_SID_XLT2_ACL,
3410 		ICE_SID_PROFID_TCAM_ACL,
3411 		ICE_SID_PROFID_REDIR_ACL,
3412 		ICE_SID_FLD_VEC_ACL
3413 	},
3414 
3415 	/* FD */
3416 	{	ICE_SID_XLT1_FD,
3417 		ICE_SID_XLT2_FD,
3418 		ICE_SID_PROFID_TCAM_FD,
3419 		ICE_SID_PROFID_REDIR_FD,
3420 		ICE_SID_FLD_VEC_FD
3421 	},
3422 
3423 	/* RSS */
3424 	{	ICE_SID_XLT1_RSS,
3425 		ICE_SID_XLT2_RSS,
3426 		ICE_SID_PROFID_TCAM_RSS,
3427 		ICE_SID_PROFID_REDIR_RSS,
3428 		ICE_SID_FLD_VEC_RSS
3429 	},
3430 
3431 	/* PE */
3432 	{	ICE_SID_XLT1_PE,
3433 		ICE_SID_XLT2_PE,
3434 		ICE_SID_PROFID_TCAM_PE,
3435 		ICE_SID_PROFID_REDIR_PE,
3436 		ICE_SID_FLD_VEC_PE
3437 	}
3438 };
3439 
3440 /**
3441  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3442  * @hw: pointer to the hardware structure
3443  * @blk: the HW block to initialize
3444  */
3445 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3446 {
3447 	u16 pt;
3448 
3449 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3450 		u8 ptg;
3451 
3452 		ptg = hw->blk[blk].xlt1.t[pt];
3453 		if (ptg != ICE_DEFAULT_PTG) {
3454 			ice_ptg_alloc_val(hw, blk, ptg);
3455 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3456 		}
3457 	}
3458 }
3459 
3460 /**
3461  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3462  * @hw: pointer to the hardware structure
3463  * @blk: the HW block to initialize
3464  */
3465 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3466 {
3467 	u16 vsi;
3468 
3469 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3470 		u16 vsig;
3471 
3472 		vsig = hw->blk[blk].xlt2.t[vsi];
3473 		if (vsig) {
3474 			ice_vsig_alloc_val(hw, blk, vsig);
3475 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3476 			/* no changes at this time, since this has been
3477 			 * initialized from the original package
3478 			 */
3479 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3480 		}
3481 	}
3482 }
3483 
3484 /**
3485  * ice_init_sw_db - init software database from HW tables
3486  * @hw: pointer to the hardware structure
3487  */
3488 static void ice_init_sw_db(struct ice_hw *hw)
3489 {
3490 	u16 i;
3491 
3492 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3493 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3494 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3495 	}
3496 }
3497 
3498 /**
3499  * ice_fill_tbl - Reads content of a single table type into database
3500  * @hw: pointer to the hardware structure
3501  * @block_id: Block ID of the table to copy
3502  * @sid: Section ID of the table to copy
3503  *
3504  * Will attempt to read the entire content of a given table of a single block
3505  * into the driver database. We assume that the buffer will always
3506  * be as large or larger than the data contained in the package. If
3507  * this condition is not met, there is most likely an error in the package
3508  * contents.
3509  */
3510 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3511 {
3512 	u32 dst_len, sect_len, offset = 0;
3513 	struct ice_prof_redir_section *pr;
3514 	struct ice_prof_id_section *pid;
3515 	struct ice_xlt1_section *xlt1;
3516 	struct ice_xlt2_section *xlt2;
3517 	struct ice_sw_fv_section *es;
3518 	struct ice_pkg_enum state;
3519 	u8 *src, *dst;
3520 	void *sect;
3521 
3522 	/* if the HW segment pointer is null then the first iteration of
3523 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
3524 	 * not be filled and return success.
3525 	 */
3526 	if (!hw->seg) {
3527 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3528 		return;
3529 	}
3530 
3531 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3532 
3533 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
3534 
3535 	while (sect) {
3536 		switch (sid) {
3537 		case ICE_SID_XLT1_SW:
3538 		case ICE_SID_XLT1_FD:
3539 		case ICE_SID_XLT1_RSS:
3540 		case ICE_SID_XLT1_ACL:
3541 		case ICE_SID_XLT1_PE:
3542 			xlt1 = (struct ice_xlt1_section *)sect;
3543 			src = xlt1->value;
3544 			sect_len = LE16_TO_CPU(xlt1->count) *
3545 				sizeof(*hw->blk[block_id].xlt1.t);
3546 			dst = hw->blk[block_id].xlt1.t;
3547 			dst_len = hw->blk[block_id].xlt1.count *
3548 				sizeof(*hw->blk[block_id].xlt1.t);
3549 			break;
3550 		case ICE_SID_XLT2_SW:
3551 		case ICE_SID_XLT2_FD:
3552 		case ICE_SID_XLT2_RSS:
3553 		case ICE_SID_XLT2_ACL:
3554 		case ICE_SID_XLT2_PE:
3555 			xlt2 = (struct ice_xlt2_section *)sect;
3556 			src = (_FORCE_ u8 *)xlt2->value;
3557 			sect_len = LE16_TO_CPU(xlt2->count) *
3558 				sizeof(*hw->blk[block_id].xlt2.t);
3559 			dst = (u8 *)hw->blk[block_id].xlt2.t;
3560 			dst_len = hw->blk[block_id].xlt2.count *
3561 				sizeof(*hw->blk[block_id].xlt2.t);
3562 			break;
3563 		case ICE_SID_PROFID_TCAM_SW:
3564 		case ICE_SID_PROFID_TCAM_FD:
3565 		case ICE_SID_PROFID_TCAM_RSS:
3566 		case ICE_SID_PROFID_TCAM_ACL:
3567 		case ICE_SID_PROFID_TCAM_PE:
3568 			pid = (struct ice_prof_id_section *)sect;
3569 			src = (u8 *)pid->entry;
3570 			sect_len = LE16_TO_CPU(pid->count) *
3571 				sizeof(*hw->blk[block_id].prof.t);
3572 			dst = (u8 *)hw->blk[block_id].prof.t;
3573 			dst_len = hw->blk[block_id].prof.count *
3574 				sizeof(*hw->blk[block_id].prof.t);
3575 			break;
3576 		case ICE_SID_PROFID_REDIR_SW:
3577 		case ICE_SID_PROFID_REDIR_FD:
3578 		case ICE_SID_PROFID_REDIR_RSS:
3579 		case ICE_SID_PROFID_REDIR_ACL:
3580 		case ICE_SID_PROFID_REDIR_PE:
3581 			pr = (struct ice_prof_redir_section *)sect;
3582 			src = pr->redir_value;
3583 			sect_len = LE16_TO_CPU(pr->count) *
3584 				sizeof(*hw->blk[block_id].prof_redir.t);
3585 			dst = hw->blk[block_id].prof_redir.t;
3586 			dst_len = hw->blk[block_id].prof_redir.count *
3587 				sizeof(*hw->blk[block_id].prof_redir.t);
3588 			break;
3589 		case ICE_SID_FLD_VEC_SW:
3590 		case ICE_SID_FLD_VEC_FD:
3591 		case ICE_SID_FLD_VEC_RSS:
3592 		case ICE_SID_FLD_VEC_ACL:
3593 		case ICE_SID_FLD_VEC_PE:
3594 			es = (struct ice_sw_fv_section *)sect;
3595 			src = (u8 *)es->fv;
3596 			sect_len = (u32)(LE16_TO_CPU(es->count) *
3597 					 hw->blk[block_id].es.fvw) *
3598 				sizeof(*hw->blk[block_id].es.t);
3599 			dst = (u8 *)hw->blk[block_id].es.t;
3600 			dst_len = (u32)(hw->blk[block_id].es.count *
3601 					hw->blk[block_id].es.fvw) *
3602 				sizeof(*hw->blk[block_id].es.t);
3603 			break;
3604 		default:
3605 			return;
3606 		}
3607 
3608 		/* if the section offset exceeds destination length, terminate
3609 		 * table fill.
3610 		 */
3611 		if (offset > dst_len)
3612 			return;
3613 
3614 		/* if the sum of section size and offset exceed destination size
3615 		 * then we are out of bounds of the HW table size for that PF.
3616 		 * Changing section length to fill the remaining table space
3617 		 * of that PF.
3618 		 */
3619 		if ((offset + sect_len) > dst_len)
3620 			sect_len = dst_len - offset;
3621 
3622 		ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3623 		offset += sect_len;
3624 		sect = ice_pkg_enum_section(NULL, &state, sid);
3625 	}
3626 }
3627 
3628 /**
3629  * ice_fill_blk_tbls - Read package context for tables
3630  * @hw: pointer to the hardware structure
3631  *
3632  * Reads the current package contents and populates the driver
3633  * database with the data iteratively for all advanced feature
3634  * blocks. Assume that the HW tables have been allocated.
3635  */
3636 void ice_fill_blk_tbls(struct ice_hw *hw)
3637 {
3638 	u8 i;
3639 
3640 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3641 		enum ice_block blk_id = (enum ice_block)i;
3642 
3643 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3644 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3645 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3646 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3647 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3648 	}
3649 
3650 	ice_init_sw_db(hw);
3651 }
3652 
3653 /**
3654  * ice_free_prof_map - free profile map
3655  * @hw: pointer to the hardware structure
3656  * @blk_idx: HW block index
3657  */
3658 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3659 {
3660 	struct ice_es *es = &hw->blk[blk_idx].es;
3661 	struct ice_prof_map *del, *tmp;
3662 
3663 	ice_acquire_lock(&es->prof_map_lock);
3664 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3665 				 ice_prof_map, list) {
3666 		LIST_DEL(&del->list);
3667 		ice_free(hw, del);
3668 	}
3669 	INIT_LIST_HEAD(&es->prof_map);
3670 	ice_release_lock(&es->prof_map_lock);
3671 }
3672 
3673 /**
3674  * ice_free_flow_profs - free flow profile entries
3675  * @hw: pointer to the hardware structure
3676  * @blk_idx: HW block index
3677  */
3678 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3679 {
3680 	struct ice_flow_prof *p, *tmp;
3681 
3682 	ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
3683 	LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
3684 				 ice_flow_prof, l_entry) {
3685 		LIST_DEL(&p->l_entry);
3686 
3687 		ice_free(hw, p);
3688 	}
3689 	ice_release_lock(&hw->fl_profs_locks[blk_idx]);
3690 
3691 	/* if driver is in reset and tables are being cleared
3692 	 * re-initialize the flow profile list heads
3693 	 */
3694 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3695 }
3696 
3697 /**
3698  * ice_free_vsig_tbl - free complete VSIG table entries
3699  * @hw: pointer to the hardware structure
3700  * @blk: the HW block on which to free the VSIG table entries
3701  */
3702 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3703 {
3704 	u16 i;
3705 
3706 	if (!hw->blk[blk].xlt2.vsig_tbl)
3707 		return;
3708 
3709 	for (i = 1; i < ICE_MAX_VSIGS; i++)
3710 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3711 			ice_vsig_free(hw, blk, i);
3712 }
3713 
3714 /**
3715  * ice_free_hw_tbls - free hardware table memory
3716  * @hw: pointer to the hardware structure
3717  */
3718 void ice_free_hw_tbls(struct ice_hw *hw)
3719 {
3720 	struct ice_rss_cfg *r, *rt;
3721 	u8 i;
3722 
3723 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3724 		if (hw->blk[i].is_list_init) {
3725 			struct ice_es *es = &hw->blk[i].es;
3726 
3727 			ice_free_prof_map(hw, i);
3728 			ice_destroy_lock(&es->prof_map_lock);
3729 
3730 			ice_free_flow_profs(hw, i);
3731 			ice_destroy_lock(&hw->fl_profs_locks[i]);
3732 
3733 			hw->blk[i].is_list_init = false;
3734 		}
3735 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3736 		ice_free(hw, hw->blk[i].xlt1.ptypes);
3737 		ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
3738 		ice_free(hw, hw->blk[i].xlt1.t);
3739 		ice_free(hw, hw->blk[i].xlt2.t);
3740 		ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
3741 		ice_free(hw, hw->blk[i].xlt2.vsis);
3742 		ice_free(hw, hw->blk[i].prof.t);
3743 		ice_free(hw, hw->blk[i].prof_redir.t);
3744 		ice_free(hw, hw->blk[i].es.t);
3745 		ice_free(hw, hw->blk[i].es.ref_count);
3746 		ice_free(hw, hw->blk[i].es.written);
3747 	}
3748 
3749 	LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
3750 				 ice_rss_cfg, l_entry) {
3751 		LIST_DEL(&r->l_entry);
3752 		ice_free(hw, r);
3753 	}
3754 	ice_destroy_lock(&hw->rss_locks);
3755 	ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
3756 }
3757 
3758 /**
3759  * ice_init_flow_profs - init flow profile locks and list heads
3760  * @hw: pointer to the hardware structure
3761  * @blk_idx: HW block index
3762  */
3763 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3764 {
3765 	ice_init_lock(&hw->fl_profs_locks[blk_idx]);
3766 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3767 }
3768 
3769 /**
3770  * ice_clear_hw_tbls - clear HW tables and flow profiles
3771  * @hw: pointer to the hardware structure
3772  */
3773 void ice_clear_hw_tbls(struct ice_hw *hw)
3774 {
3775 	u8 i;
3776 
3777 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3778 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3779 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3780 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3781 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3782 		struct ice_es *es = &hw->blk[i].es;
3783 
3784 		if (hw->blk[i].is_list_init) {
3785 			ice_free_prof_map(hw, i);
3786 			ice_free_flow_profs(hw, i);
3787 		}
3788 
3789 		ice_free_vsig_tbl(hw, (enum ice_block)i);
3790 
3791 		ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
3792 			   ICE_NONDMA_MEM);
3793 		ice_memset(xlt1->ptg_tbl, 0,
3794 			   ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
3795 			   ICE_NONDMA_MEM);
3796 		ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
3797 			   ICE_NONDMA_MEM);
3798 
3799 		ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
3800 			   ICE_NONDMA_MEM);
3801 		ice_memset(xlt2->vsig_tbl, 0,
3802 			   xlt2->count * sizeof(*xlt2->vsig_tbl),
3803 			   ICE_NONDMA_MEM);
3804 		ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
3805 			   ICE_NONDMA_MEM);
3806 
3807 		ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
3808 			   ICE_NONDMA_MEM);
3809 		ice_memset(prof_redir->t, 0,
3810 			   prof_redir->count * sizeof(*prof_redir->t),
3811 			   ICE_NONDMA_MEM);
3812 
3813 		ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
3814 			   ICE_NONDMA_MEM);
3815 		ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
3816 			   ICE_NONDMA_MEM);
3817 		ice_memset(es->written, 0, es->count * sizeof(*es->written),
3818 			   ICE_NONDMA_MEM);
3819 	}
3820 }
3821 
3822 /**
3823  * ice_init_hw_tbls - init hardware table memory
3824  * @hw: pointer to the hardware structure
3825  */
3826 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3827 {
3828 	u8 i;
3829 
3830 	ice_init_lock(&hw->rss_locks);
3831 	INIT_LIST_HEAD(&hw->rss_list_head);
3832 	for (i = 0; i < ICE_BLK_COUNT; i++) {
3833 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3834 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
3835 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3836 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3837 		struct ice_es *es = &hw->blk[i].es;
3838 		u16 j;
3839 
3840 		if (hw->blk[i].is_list_init)
3841 			continue;
3842 
3843 		ice_init_flow_profs(hw, i);
3844 		ice_init_lock(&es->prof_map_lock);
3845 		INIT_LIST_HEAD(&es->prof_map);
3846 		hw->blk[i].is_list_init = true;
3847 
3848 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
3849 		es->reverse = blk_sizes[i].reverse;
3850 
3851 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3852 		xlt1->count = blk_sizes[i].xlt1;
3853 
3854 		xlt1->ptypes = (struct ice_ptg_ptype *)
3855 			ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
3856 
3857 		if (!xlt1->ptypes)
3858 			goto err;
3859 
3860 		xlt1->ptg_tbl = (struct ice_ptg_entry *)
3861 			ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
3862 
3863 		if (!xlt1->ptg_tbl)
3864 			goto err;
3865 
3866 		xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
3867 		if (!xlt1->t)
3868 			goto err;
3869 
3870 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3871 		xlt2->count = blk_sizes[i].xlt2;
3872 
3873 		xlt2->vsis = (struct ice_vsig_vsi *)
3874 			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
3875 
3876 		if (!xlt2->vsis)
3877 			goto err;
3878 
3879 		xlt2->vsig_tbl = (struct ice_vsig_entry *)
3880 			ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
3881 		if (!xlt2->vsig_tbl)
3882 			goto err;
3883 
3884 		for (j = 0; j < xlt2->count; j++)
3885 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3886 
3887 		xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
3888 		if (!xlt2->t)
3889 			goto err;
3890 
3891 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3892 		prof->count = blk_sizes[i].prof_tcam;
3893 		prof->max_prof_id = blk_sizes[i].prof_id;
3894 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3895 		prof->t = (struct ice_prof_tcam_entry *)
3896 			ice_calloc(hw, prof->count, sizeof(*prof->t));
3897 
3898 		if (!prof->t)
3899 			goto err;
3900 
3901 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3902 		prof_redir->count = blk_sizes[i].prof_redir;
3903 		prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
3904 						 sizeof(*prof_redir->t));
3905 
3906 		if (!prof_redir->t)
3907 			goto err;
3908 
3909 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3910 		es->count = blk_sizes[i].es;
3911 		es->fvw = blk_sizes[i].fvw;
3912 		es->t = (struct ice_fv_word *)
3913 			ice_calloc(hw, (u32)(es->count * es->fvw),
3914 				   sizeof(*es->t));
3915 		if (!es->t)
3916 			goto err;
3917 
3918 		es->ref_count = (u16 *)
3919 			ice_calloc(hw, es->count, sizeof(*es->ref_count));
3920 
3921 		if (!es->ref_count)
3922 			goto err;
3923 
3924 		es->written = (u8 *)
3925 			ice_calloc(hw, es->count, sizeof(*es->written));
3926 
3927 		if (!es->written)
3928 			goto err;
3929 
3930 	}
3931 	return ICE_SUCCESS;
3932 
3933 err:
3934 	ice_free_hw_tbls(hw);
3935 	return ICE_ERR_NO_MEMORY;
3936 }
3937 
3938 /**
3939  * ice_prof_gen_key - generate profile ID key
3940  * @hw: pointer to the HW struct
3941  * @blk: the block in which to write profile ID to
3942  * @ptg: packet type group (PTG) portion of key
3943  * @vsig: VSIG portion of key
3944  * @cdid: CDID portion of key
3945  * @flags: flag portion of key
3946  * @vl_msk: valid mask
3947  * @dc_msk: don't care mask
3948  * @nm_msk: never match mask
3949  * @key: output of profile ID key
3950  */
3951 static enum ice_status
3952 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3953 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3954 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3955 		 u8 key[ICE_TCAM_KEY_SZ])
3956 {
3957 	struct ice_prof_id_key inkey;
3958 
3959 	inkey.xlt1 = ptg;
3960 	inkey.xlt2_cdid = CPU_TO_LE16(vsig);
3961 	inkey.flags = CPU_TO_LE16(flags);
3962 
3963 	switch (hw->blk[blk].prof.cdid_bits) {
3964 	case 0:
3965 		break;
3966 	case 2:
3967 #define ICE_CD_2_M 0xC000U
3968 #define ICE_CD_2_S 14
3969 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
3970 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
3971 		break;
3972 	case 4:
3973 #define ICE_CD_4_M 0xF000U
3974 #define ICE_CD_4_S 12
3975 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
3976 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
3977 		break;
3978 	case 8:
3979 #define ICE_CD_8_M 0xFF00U
3980 #define ICE_CD_8_S 16
3981 		inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
3982 		inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
3983 		break;
3984 	default:
3985 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3986 		break;
3987 	}
3988 
3989 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3990 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3991 }
3992 
3993 /**
3994  * ice_tcam_write_entry - write TCAM entry
3995  * @hw: pointer to the HW struct
3996  * @blk: the block in which to write profile ID to
3997  * @idx: the entry index to write to
3998  * @prof_id: profile ID
3999  * @ptg: packet type group (PTG) portion of key
4000  * @vsig: VSIG portion of key
4001  * @cdid: CDID portion of key
4002  * @flags: flag portion of key
4003  * @vl_msk: valid mask
4004  * @dc_msk: don't care mask
4005  * @nm_msk: never match mask
4006  */
4007 static enum ice_status
4008 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4009 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4010 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4011 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4012 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4013 {
4014 	struct ice_prof_tcam_entry;
4015 	enum ice_status status;
4016 
4017 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4018 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4019 	if (!status) {
4020 		hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
4021 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
4022 	}
4023 
4024 	return status;
4025 }
4026 
4027 /**
4028  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
4029  * @hw: pointer to the hardware structure
4030  * @blk: HW block
4031  * @vsig: VSIG to query
4032  * @refs: pointer to variable to receive the reference count
4033  */
4034 static enum ice_status
4035 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4036 {
4037 	u16 idx = vsig & ICE_VSIG_IDX_M;
4038 	struct ice_vsig_vsi *ptr;
4039 
4040 	*refs = 0;
4041 
4042 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4043 		return ICE_ERR_DOES_NOT_EXIST;
4044 
4045 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4046 	while (ptr) {
4047 		(*refs)++;
4048 		ptr = ptr->next_vsi;
4049 	}
4050 
4051 	return ICE_SUCCESS;
4052 }
4053 
4054 /**
4055  * ice_has_prof_vsig - check to see if VSIG has a specific profile
4056  * @hw: pointer to the hardware structure
4057  * @blk: HW block
4058  * @vsig: VSIG to check against
4059  * @hdl: profile handle
4060  */
4061 static bool
4062 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4063 {
4064 	u16 idx = vsig & ICE_VSIG_IDX_M;
4065 	struct ice_vsig_prof *ent;
4066 
4067 	LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4068 			    ice_vsig_prof, list)
4069 		if (ent->profile_cookie == hdl)
4070 			return true;
4071 
4072 	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4073 		  vsig);
4074 	return false;
4075 }
4076 
4077 /**
4078  * ice_prof_bld_es - build profile ID extraction sequence changes
4079  * @hw: pointer to the HW struct
4080  * @blk: hardware block
4081  * @bld: the update package buffer build to add to
4082  * @chgs: the list of changes to make in hardware
4083  */
4084 static enum ice_status
4085 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4086 		struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4087 {
4088 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4089 	struct ice_chs_chg *tmp;
4090 
4091 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4092 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4093 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4094 			struct ice_pkg_es *p;
4095 			u32 id;
4096 
4097 			id = ice_sect_id(blk, ICE_VEC_TBL);
4098 			p = (struct ice_pkg_es *)
4099 				ice_pkg_buf_alloc_section(bld, id,
4100 							  ice_struct_size(p, es,
4101 									  1) +
4102 							  vec_size -
4103 							  sizeof(p->es[0]));
4104 
4105 			if (!p)
4106 				return ICE_ERR_MAX_LIMIT;
4107 
4108 			p->count = CPU_TO_LE16(1);
4109 			p->offset = CPU_TO_LE16(tmp->prof_id);
4110 
4111 			ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
4112 				   ICE_NONDMA_TO_NONDMA);
4113 		}
4114 
4115 	return ICE_SUCCESS;
4116 }
4117 
4118 /**
4119  * ice_prof_bld_tcam - build profile ID TCAM changes
4120  * @hw: pointer to the HW struct
4121  * @blk: hardware block
4122  * @bld: the update package buffer build to add to
4123  * @chgs: the list of changes to make in hardware
4124  */
4125 static enum ice_status
4126 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4127 		  struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4128 {
4129 	struct ice_chs_chg *tmp;
4130 
4131 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4132 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4133 			struct ice_prof_id_section *p;
4134 			u32 id;
4135 
4136 			id = ice_sect_id(blk, ICE_PROF_TCAM);
4137 			p = (struct ice_prof_id_section *)
4138 				ice_pkg_buf_alloc_section(bld, id,
4139 							  ice_struct_size(p,
4140 									  entry,
4141 									  1));
4142 
4143 			if (!p)
4144 				return ICE_ERR_MAX_LIMIT;
4145 
4146 			p->count = CPU_TO_LE16(1);
4147 			p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
4148 			p->entry[0].prof_id = tmp->prof_id;
4149 
4150 			ice_memcpy(p->entry[0].key,
4151 				   &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4152 				   sizeof(hw->blk[blk].prof.t->key),
4153 				   ICE_NONDMA_TO_NONDMA);
4154 		}
4155 
4156 	return ICE_SUCCESS;
4157 }
4158 
4159 /**
4160  * ice_prof_bld_xlt1 - build XLT1 changes
4161  * @blk: hardware block
4162  * @bld: the update package buffer build to add to
4163  * @chgs: the list of changes to make in hardware
4164  */
4165 static enum ice_status
4166 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4167 		  struct LIST_HEAD_TYPE *chgs)
4168 {
4169 	struct ice_chs_chg *tmp;
4170 
4171 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4172 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4173 			struct ice_xlt1_section *p;
4174 			u32 id;
4175 
4176 			id = ice_sect_id(blk, ICE_XLT1);
4177 			p = (struct ice_xlt1_section *)
4178 				ice_pkg_buf_alloc_section(bld, id,
4179 							  ice_struct_size(p,
4180 									  value,
4181 									  1));
4182 
4183 			if (!p)
4184 				return ICE_ERR_MAX_LIMIT;
4185 
4186 			p->count = CPU_TO_LE16(1);
4187 			p->offset = CPU_TO_LE16(tmp->ptype);
4188 			p->value[0] = tmp->ptg;
4189 		}
4190 
4191 	return ICE_SUCCESS;
4192 }
4193 
4194 /**
4195  * ice_prof_bld_xlt2 - build XLT2 changes
4196  * @blk: hardware block
4197  * @bld: the update package buffer build to add to
4198  * @chgs: the list of changes to make in hardware
4199  */
4200 static enum ice_status
4201 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4202 		  struct LIST_HEAD_TYPE *chgs)
4203 {
4204 	struct ice_chs_chg *tmp;
4205 
4206 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4207 		struct ice_xlt2_section *p;
4208 		u32 id;
4209 
4210 		switch (tmp->type) {
4211 		case ICE_VSIG_ADD:
4212 		case ICE_VSI_MOVE:
4213 		case ICE_VSIG_REM:
4214 			id = ice_sect_id(blk, ICE_XLT2);
4215 			p = (struct ice_xlt2_section *)
4216 				ice_pkg_buf_alloc_section(bld, id,
4217 							  ice_struct_size(p,
4218 									  value,
4219 									  1));
4220 
4221 			if (!p)
4222 				return ICE_ERR_MAX_LIMIT;
4223 
4224 			p->count = CPU_TO_LE16(1);
4225 			p->offset = CPU_TO_LE16(tmp->vsi);
4226 			p->value[0] = CPU_TO_LE16(tmp->vsig);
4227 			break;
4228 		default:
4229 			break;
4230 		}
4231 	}
4232 
4233 	return ICE_SUCCESS;
4234 }
4235 
4236 /**
4237  * ice_upd_prof_hw - update hardware using the change list
4238  * @hw: pointer to the HW struct
4239  * @blk: hardware block
4240  * @chgs: the list of changes to make in hardware
4241  */
4242 static enum ice_status
4243 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4244 		struct LIST_HEAD_TYPE *chgs)
4245 {
4246 	struct ice_buf_build *b;
4247 	struct ice_chs_chg *tmp;
4248 	enum ice_status status;
4249 	u16 pkg_sects;
4250 	u16 xlt1 = 0;
4251 	u16 xlt2 = 0;
4252 	u16 tcam = 0;
4253 	u16 es = 0;
4254 	u16 sects;
4255 
4256 	/* count number of sections we need */
4257 	LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4258 		switch (tmp->type) {
4259 		case ICE_PTG_ES_ADD:
4260 			if (tmp->add_ptg)
4261 				xlt1++;
4262 			if (tmp->add_prof)
4263 				es++;
4264 			break;
4265 		case ICE_TCAM_ADD:
4266 			tcam++;
4267 			break;
4268 		case ICE_VSIG_ADD:
4269 		case ICE_VSI_MOVE:
4270 		case ICE_VSIG_REM:
4271 			xlt2++;
4272 			break;
4273 		default:
4274 			break;
4275 		}
4276 	}
4277 	sects = xlt1 + xlt2 + tcam + es;
4278 
4279 	if (!sects)
4280 		return ICE_SUCCESS;
4281 
4282 	/* Build update package buffer */
4283 	b = ice_pkg_buf_alloc(hw);
4284 	if (!b)
4285 		return ICE_ERR_NO_MEMORY;
4286 
4287 	status = ice_pkg_buf_reserve_section(b, sects);
4288 	if (status)
4289 		goto error_tmp;
4290 
4291 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
4292 	if (es) {
4293 		status = ice_prof_bld_es(hw, blk, b, chgs);
4294 		if (status)
4295 			goto error_tmp;
4296 	}
4297 
4298 	if (tcam) {
4299 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
4300 		if (status)
4301 			goto error_tmp;
4302 	}
4303 
4304 	if (xlt1) {
4305 		status = ice_prof_bld_xlt1(blk, b, chgs);
4306 		if (status)
4307 			goto error_tmp;
4308 	}
4309 
4310 	if (xlt2) {
4311 		status = ice_prof_bld_xlt2(blk, b, chgs);
4312 		if (status)
4313 			goto error_tmp;
4314 	}
4315 
4316 	/* After package buffer build check if the section count in buffer is
4317 	 * non-zero and matches the number of sections detected for package
4318 	 * update.
4319 	 */
4320 	pkg_sects = ice_pkg_buf_get_active_sections(b);
4321 	if (!pkg_sects || pkg_sects != sects) {
4322 		status = ICE_ERR_INVAL_SIZE;
4323 		goto error_tmp;
4324 	}
4325 
4326 	/* update package */
4327 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4328 	if (status == ICE_ERR_AQ_ERROR)
4329 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4330 
4331 error_tmp:
4332 	ice_pkg_buf_free(hw, b);
4333 	return status;
4334 }
4335 
4336 /**
4337  * ice_add_prof - add profile
4338  * @hw: pointer to the HW struct
4339  * @blk: hardware block
4340  * @id: profile tracking ID
4341  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4342  * @es: extraction sequence (length of array is determined by the block)
4343  *
4344  * This function registers a profile, which matches a set of PTGs with a
4345  * particular extraction sequence. While the hardware profile is allocated
4346  * it will not be written until the first call to ice_add_flow that specifies
4347  * the ID value used here.
4348  */
4349 enum ice_status
4350 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4351 	     struct ice_fv_word *es)
4352 {
4353 	u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4354 	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
4355 	struct ice_prof_map *prof;
4356 	enum ice_status status;
4357 	u8 byte = 0;
4358 	u8 prof_id;
4359 
4360 	ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
4361 
4362 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4363 
4364 	/* search for existing profile */
4365 	status = ice_find_prof_id(hw, blk, es, &prof_id);
4366 	if (status) {
4367 		/* allocate profile ID */
4368 		status = ice_alloc_prof_id(hw, blk, &prof_id);
4369 		if (status)
4370 			goto err_ice_add_prof;
4371 
4372 		/* and write new es */
4373 		ice_write_es(hw, blk, prof_id, es);
4374 	}
4375 
4376 	ice_prof_inc_ref(hw, blk, prof_id);
4377 
4378 	/* add profile info */
4379 
4380 	prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
4381 	if (!prof)
4382 		goto err_ice_add_prof;
4383 
4384 	prof->profile_cookie = id;
4385 	prof->prof_id = prof_id;
4386 	prof->ptg_cnt = 0;
4387 	prof->context = 0;
4388 
4389 	/* build list of ptgs */
4390 	while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4391 		u8 bit;
4392 
4393 		if (!ptypes[byte]) {
4394 			bytes--;
4395 			byte++;
4396 			continue;
4397 		}
4398 
4399 		/* Examine 8 bits per byte */
4400 		ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
4401 				     BITS_PER_BYTE) {
4402 			u16 ptype;
4403 			u8 ptg;
4404 
4405 			ptype = byte * BITS_PER_BYTE + bit;
4406 
4407 			/* The package should place all ptypes in a non-zero
4408 			 * PTG, so the following call should never fail.
4409 			 */
4410 			if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4411 				continue;
4412 
4413 			/* If PTG is already added, skip and continue */
4414 			if (ice_is_bit_set(ptgs_used, ptg))
4415 				continue;
4416 
4417 			ice_set_bit(ptg, ptgs_used);
4418 			prof->ptg[prof->ptg_cnt] = ptg;
4419 
4420 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4421 				break;
4422 		}
4423 
4424 		bytes--;
4425 		byte++;
4426 	}
4427 
4428 	LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
4429 	status = ICE_SUCCESS;
4430 
4431 err_ice_add_prof:
4432 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4433 	return status;
4434 }
4435 
4436 /**
4437  * ice_search_prof_id - Search for a profile tracking ID
4438  * @hw: pointer to the HW struct
4439  * @blk: hardware block
4440  * @id: profile tracking ID
4441  *
4442  * This will search for a profile tracking ID which was previously added.
4443  * The profile map lock should be held before calling this function.
4444  */
4445 struct ice_prof_map *
4446 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4447 {
4448 	struct ice_prof_map *entry = NULL;
4449 	struct ice_prof_map *map;
4450 
4451 	LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
4452 		if (map->profile_cookie == id) {
4453 			entry = map;
4454 			break;
4455 		}
4456 
4457 	return entry;
4458 }
4459 
4460 /**
4461  * ice_set_prof_context - Set context for a given profile
4462  * @hw: pointer to the HW struct
4463  * @blk: hardware block
4464  * @id: profile tracking ID
4465  * @cntxt: context
4466  */
4467 enum ice_status
4468 ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
4469 {
4470 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4471 	struct ice_prof_map *entry;
4472 
4473 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4474 	entry = ice_search_prof_id(hw, blk, id);
4475 	if (entry) {
4476 		entry->context = cntxt;
4477 		status = ICE_SUCCESS;
4478 	}
4479 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4480 	return status;
4481 }
4482 
4483 /**
4484  * ice_get_prof_context - Get context for a given profile
4485  * @hw: pointer to the HW struct
4486  * @blk: hardware block
4487  * @id: profile tracking ID
4488  * @cntxt: pointer to variable to receive the context
4489  */
4490 enum ice_status
4491 ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
4492 {
4493 	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4494 	struct ice_prof_map *entry;
4495 
4496 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4497 	entry = ice_search_prof_id(hw, blk, id);
4498 	if (entry) {
4499 		*cntxt = entry->context;
4500 		status = ICE_SUCCESS;
4501 	}
4502 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4503 	return status;
4504 }
4505 
4506 /**
4507  * ice_vsig_prof_id_count - count profiles in a VSIG
4508  * @hw: pointer to the HW struct
4509  * @blk: hardware block
4510  * @vsig: VSIG to remove the profile from
4511  */
4512 static u16
4513 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4514 {
4515 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4516 	struct ice_vsig_prof *p;
4517 
4518 	LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4519 			    ice_vsig_prof, list)
4520 		count++;
4521 
4522 	return count;
4523 }
4524 
4525 /**
4526  * ice_rel_tcam_idx - release a TCAM index
4527  * @hw: pointer to the HW struct
4528  * @blk: hardware block
4529  * @idx: the index to release
4530  */
4531 static enum ice_status
4532 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4533 {
4534 	/* Masks to invoke a never match entry */
4535 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4536 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4537 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4538 	enum ice_status status;
4539 
4540 	/* write the TCAM entry */
4541 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4542 				      dc_msk, nm_msk);
4543 	if (status)
4544 		return status;
4545 
4546 	/* release the TCAM entry */
4547 	status = ice_free_tcam_ent(hw, blk, idx);
4548 
4549 	return status;
4550 }
4551 
4552 /**
4553  * ice_rem_prof_id - remove one profile from a VSIG
4554  * @hw: pointer to the HW struct
4555  * @blk: hardware block
4556  * @prof: pointer to profile structure to remove
4557  */
4558 static enum ice_status
4559 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4560 		struct ice_vsig_prof *prof)
4561 {
4562 	enum ice_status status;
4563 	u16 i;
4564 
4565 	for (i = 0; i < prof->tcam_count; i++)
4566 		if (prof->tcam[i].in_use) {
4567 			prof->tcam[i].in_use = false;
4568 			status = ice_rel_tcam_idx(hw, blk,
4569 						  prof->tcam[i].tcam_idx);
4570 			if (status)
4571 				return ICE_ERR_HW_TABLE;
4572 		}
4573 
4574 	return ICE_SUCCESS;
4575 }
4576 
4577 /**
4578  * ice_rem_vsig - remove VSIG
4579  * @hw: pointer to the HW struct
4580  * @blk: hardware block
4581  * @vsig: the VSIG to remove
4582  * @chg: the change list
4583  */
4584 static enum ice_status
4585 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4586 	     struct LIST_HEAD_TYPE *chg)
4587 {
4588 	u16 idx = vsig & ICE_VSIG_IDX_M;
4589 	struct ice_vsig_vsi *vsi_cur;
4590 	struct ice_vsig_prof *d, *t;
4591 	enum ice_status status;
4592 
4593 	/* remove TCAM entries */
4594 	LIST_FOR_EACH_ENTRY_SAFE(d, t,
4595 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4596 				 ice_vsig_prof, list) {
4597 		status = ice_rem_prof_id(hw, blk, d);
4598 		if (status)
4599 			return status;
4600 
4601 		LIST_DEL(&d->list);
4602 		ice_free(hw, d);
4603 	}
4604 
4605 	/* Move all VSIS associated with this VSIG to the default VSIG */
4606 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4607 	/* If the VSIG has at least 1 VSI then iterate through the list
4608 	 * and remove the VSIs before deleting the group.
4609 	 */
4610 	if (vsi_cur)
4611 		do {
4612 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4613 			struct ice_chs_chg *p;
4614 
4615 			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4616 			if (!p)
4617 				return ICE_ERR_NO_MEMORY;
4618 
4619 			p->type = ICE_VSIG_REM;
4620 			p->orig_vsig = vsig;
4621 			p->vsig = ICE_DEFAULT_VSIG;
4622 			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4623 
4624 			LIST_ADD(&p->list_entry, chg);
4625 
4626 			vsi_cur = tmp;
4627 		} while (vsi_cur);
4628 
4629 	return ice_vsig_free(hw, blk, vsig);
4630 }
4631 
4632 /**
4633  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
4634  * @hw: pointer to the HW struct
4635  * @blk: hardware block
4636  * @vsig: VSIG to remove the profile from
4637  * @hdl: profile handle indicating which profile to remove
4638  * @chg: list to receive a record of changes
4639  */
4640 static enum ice_status
4641 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4642 		     struct LIST_HEAD_TYPE *chg)
4643 {
4644 	u16 idx = vsig & ICE_VSIG_IDX_M;
4645 	struct ice_vsig_prof *p, *t;
4646 	enum ice_status status;
4647 
4648 	LIST_FOR_EACH_ENTRY_SAFE(p, t,
4649 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4650 				 ice_vsig_prof, list)
4651 		if (p->profile_cookie == hdl) {
4652 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4653 				/* this is the last profile, remove the VSIG */
4654 				return ice_rem_vsig(hw, blk, vsig, chg);
4655 
4656 			status = ice_rem_prof_id(hw, blk, p);
4657 			if (!status) {
4658 				LIST_DEL(&p->list);
4659 				ice_free(hw, p);
4660 			}
4661 			return status;
4662 		}
4663 
4664 	return ICE_ERR_DOES_NOT_EXIST;
4665 }
4666 
4667 /**
4668  * ice_rem_flow_all - remove all flows with a particular profile
4669  * @hw: pointer to the HW struct
4670  * @blk: hardware block
4671  * @id: profile tracking ID
4672  */
4673 static enum ice_status
4674 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4675 {
4676 	struct ice_chs_chg *del, *tmp;
4677 	enum ice_status status;
4678 	struct LIST_HEAD_TYPE chg;
4679 	u16 i;
4680 
4681 	INIT_LIST_HEAD(&chg);
4682 
4683 	for (i = 1; i < ICE_MAX_VSIGS; i++)
4684 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4685 			if (ice_has_prof_vsig(hw, blk, i, id)) {
4686 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
4687 							      &chg);
4688 				if (status)
4689 					goto err_ice_rem_flow_all;
4690 			}
4691 		}
4692 
4693 	status = ice_upd_prof_hw(hw, blk, &chg);
4694 
4695 err_ice_rem_flow_all:
4696 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
4697 		LIST_DEL(&del->list_entry);
4698 		ice_free(hw, del);
4699 	}
4700 
4701 	return status;
4702 }
4703 
4704 /**
4705  * ice_rem_prof - remove profile
4706  * @hw: pointer to the HW struct
4707  * @blk: hardware block
4708  * @id: profile tracking ID
4709  *
4710  * This will remove the profile specified by the ID parameter, which was
4711  * previously created through ice_add_prof. If any existing entries
4712  * are associated with this profile, they will be removed as well.
4713  */
4714 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4715 {
4716 	struct ice_prof_map *pmap;
4717 	enum ice_status status;
4718 
4719 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4720 
4721 	pmap = ice_search_prof_id(hw, blk, id);
4722 	if (!pmap) {
4723 		status = ICE_ERR_DOES_NOT_EXIST;
4724 		goto err_ice_rem_prof;
4725 	}
4726 
4727 	/* remove all flows with this profile */
4728 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4729 	if (status)
4730 		goto err_ice_rem_prof;
4731 
4732 	/* dereference profile, and possibly remove */
4733 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
4734 
4735 	LIST_DEL(&pmap->list);
4736 	ice_free(hw, pmap);
4737 
4738 err_ice_rem_prof:
4739 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4740 	return status;
4741 }
4742 
4743 /**
4744  * ice_get_prof - get profile
4745  * @hw: pointer to the HW struct
4746  * @blk: hardware block
4747  * @hdl: profile handle
4748  * @chg: change list
4749  */
4750 static enum ice_status
4751 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4752 	     struct LIST_HEAD_TYPE *chg)
4753 {
4754 	enum ice_status status = ICE_SUCCESS;
4755 	struct ice_prof_map *map;
4756 	struct ice_chs_chg *p;
4757 	u16 i;
4758 
4759 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4760 	/* Get the details on the profile specified by the handle ID */
4761 	map = ice_search_prof_id(hw, blk, hdl);
4762 	if (!map) {
4763 		status = ICE_ERR_DOES_NOT_EXIST;
4764 		goto err_ice_get_prof;
4765 	}
4766 
4767 	for (i = 0; i < map->ptg_cnt; i++)
4768 		if (!hw->blk[blk].es.written[map->prof_id]) {
4769 			/* add ES to change list */
4770 			p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4771 			if (!p) {
4772 				status = ICE_ERR_NO_MEMORY;
4773 				goto err_ice_get_prof;
4774 			}
4775 
4776 			p->type = ICE_PTG_ES_ADD;
4777 			p->ptype = 0;
4778 			p->ptg = map->ptg[i];
4779 			p->add_ptg = 0;
4780 
4781 			p->add_prof = 1;
4782 			p->prof_id = map->prof_id;
4783 
4784 			hw->blk[blk].es.written[map->prof_id] = true;
4785 
4786 			LIST_ADD(&p->list_entry, chg);
4787 		}
4788 
4789 err_ice_get_prof:
4790 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4791 	/* let caller clean up the change list */
4792 	return status;
4793 }
4794 
4795 /**
4796  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4797  * @hw: pointer to the HW struct
4798  * @blk: hardware block
4799  * @vsig: VSIG from which to copy the list
4800  * @lst: output list
4801  *
4802  * This routine makes a copy of the list of profiles in the specified VSIG.
4803  */
4804 static enum ice_status
4805 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4806 		   struct LIST_HEAD_TYPE *lst)
4807 {
4808 	struct ice_vsig_prof *ent1, *ent2;
4809 	u16 idx = vsig & ICE_VSIG_IDX_M;
4810 
4811 	LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4812 			    ice_vsig_prof, list) {
4813 		struct ice_vsig_prof *p;
4814 
4815 		/* copy to the input list */
4816 		p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
4817 						       ICE_NONDMA_TO_NONDMA);
4818 		if (!p)
4819 			goto err_ice_get_profs_vsig;
4820 
4821 		LIST_ADD_TAIL(&p->list, lst);
4822 	}
4823 
4824 	return ICE_SUCCESS;
4825 
4826 err_ice_get_profs_vsig:
4827 	LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
4828 		LIST_DEL(&ent1->list);
4829 		ice_free(hw, ent1);
4830 	}
4831 
4832 	return ICE_ERR_NO_MEMORY;
4833 }
4834 
4835 /**
4836  * ice_add_prof_to_lst - add profile entry to a list
4837  * @hw: pointer to the HW struct
4838  * @blk: hardware block
4839  * @lst: the list to be added to
4840  * @hdl: profile handle of entry to add
4841  */
4842 static enum ice_status
4843 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4844 		    struct LIST_HEAD_TYPE *lst, u64 hdl)
4845 {
4846 	enum ice_status status = ICE_SUCCESS;
4847 	struct ice_prof_map *map;
4848 	struct ice_vsig_prof *p;
4849 	u16 i;
4850 
4851 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
4852 	map = ice_search_prof_id(hw, blk, hdl);
4853 	if (!map) {
4854 		status = ICE_ERR_DOES_NOT_EXIST;
4855 		goto err_ice_add_prof_to_lst;
4856 	}
4857 
4858 	p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
4859 	if (!p) {
4860 		status = ICE_ERR_NO_MEMORY;
4861 		goto err_ice_add_prof_to_lst;
4862 	}
4863 
4864 	p->profile_cookie = map->profile_cookie;
4865 	p->prof_id = map->prof_id;
4866 	p->tcam_count = map->ptg_cnt;
4867 
4868 	for (i = 0; i < map->ptg_cnt; i++) {
4869 		p->tcam[i].prof_id = map->prof_id;
4870 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4871 		p->tcam[i].ptg = map->ptg[i];
4872 	}
4873 
4874 	LIST_ADD(&p->list, lst);
4875 
4876 err_ice_add_prof_to_lst:
4877 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
4878 	return status;
4879 }
4880 
4881 /**
4882  * ice_move_vsi - move VSI to another VSIG
4883  * @hw: pointer to the HW struct
4884  * @blk: hardware block
4885  * @vsi: the VSI to move
4886  * @vsig: the VSIG to move the VSI to
4887  * @chg: the change list
4888  */
4889 static enum ice_status
4890 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4891 	     struct LIST_HEAD_TYPE *chg)
4892 {
4893 	enum ice_status status;
4894 	struct ice_chs_chg *p;
4895 	u16 orig_vsig;
4896 
4897 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4898 	if (!p)
4899 		return ICE_ERR_NO_MEMORY;
4900 
4901 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4902 	if (!status)
4903 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4904 
4905 	if (status) {
4906 		ice_free(hw, p);
4907 		return status;
4908 	}
4909 
4910 	p->type = ICE_VSI_MOVE;
4911 	p->vsi = vsi;
4912 	p->orig_vsig = orig_vsig;
4913 	p->vsig = vsig;
4914 
4915 	LIST_ADD(&p->list_entry, chg);
4916 
4917 	return ICE_SUCCESS;
4918 }
4919 
4920 /**
4921  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4922  * @hw: pointer to the HW struct
4923  * @idx: the index of the TCAM entry to remove
4924  * @chg: the list of change structures to search
4925  */
4926 static void
4927 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
4928 {
4929 	struct ice_chs_chg *pos, *tmp;
4930 
4931 	LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
4932 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4933 			LIST_DEL(&tmp->list_entry);
4934 			ice_free(hw, tmp);
4935 		}
4936 }
4937 
4938 /**
4939  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4940  * @hw: pointer to the HW struct
4941  * @blk: hardware block
4942  * @enable: true to enable, false to disable
4943  * @vsig: the VSIG of the TCAM entry
4944  * @tcam: pointer the TCAM info structure of the TCAM to disable
4945  * @chg: the change list
4946  *
4947  * This function appends an enable or disable TCAM entry in the change log
4948  */
4949 static enum ice_status
4950 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4951 		      u16 vsig, struct ice_tcam_inf *tcam,
4952 		      struct LIST_HEAD_TYPE *chg)
4953 {
4954 	enum ice_status status;
4955 	struct ice_chs_chg *p;
4956 
4957 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4958 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4959 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4960 
4961 	/* if disabling, free the TCAM */
4962 	if (!enable) {
4963 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4964 
4965 		/* if we have already created a change for this TCAM entry, then
4966 		 * we need to remove that entry, in order to prevent writing to
4967 		 * a TCAM entry we no longer will have ownership of.
4968 		 */
4969 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4970 		tcam->tcam_idx = 0;
4971 		tcam->in_use = 0;
4972 		return status;
4973 	}
4974 
4975 	/* for re-enabling, reallocate a TCAM */
4976 	status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx);
4977 	if (status)
4978 		return status;
4979 
4980 	/* add TCAM to change list */
4981 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
4982 	if (!p)
4983 		return ICE_ERR_NO_MEMORY;
4984 
4985 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4986 				      tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
4987 				      nm_msk);
4988 	if (status)
4989 		goto err_ice_prof_tcam_ena_dis;
4990 
4991 	tcam->in_use = 1;
4992 
4993 	p->type = ICE_TCAM_ADD;
4994 	p->add_tcam_idx = true;
4995 	p->prof_id = tcam->prof_id;
4996 	p->ptg = tcam->ptg;
4997 	p->vsig = 0;
4998 	p->tcam_idx = tcam->tcam_idx;
4999 
5000 	/* log change */
5001 	LIST_ADD(&p->list_entry, chg);
5002 
5003 	return ICE_SUCCESS;
5004 
5005 err_ice_prof_tcam_ena_dis:
5006 	ice_free(hw, p);
5007 	return status;
5008 }
5009 
5010 /**
5011  * ice_adj_prof_priorities - adjust profile based on priorities
5012  * @hw: pointer to the HW struct
5013  * @blk: hardware block
5014  * @vsig: the VSIG for which to adjust profile priorities
5015  * @chg: the change list
5016  */
5017 static enum ice_status
5018 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5019 			struct LIST_HEAD_TYPE *chg)
5020 {
5021 	ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5022 	enum ice_status status = ICE_SUCCESS;
5023 	struct ice_vsig_prof *t;
5024 	u16 idx;
5025 
5026 	ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5027 	idx = vsig & ICE_VSIG_IDX_M;
5028 
5029 	/* Priority is based on the order in which the profiles are added. The
5030 	 * newest added profile has highest priority and the oldest added
5031 	 * profile has the lowest priority. Since the profile property list for
5032 	 * a VSIG is sorted from newest to oldest, this code traverses the list
5033 	 * in order and enables the first of each PTG that it finds (that is not
5034 	 * already enabled); it also disables any duplicate PTGs that it finds
5035 	 * in the older profiles (that are currently enabled).
5036 	 */
5037 
5038 	LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5039 			    ice_vsig_prof, list) {
5040 		u16 i;
5041 
5042 		for (i = 0; i < t->tcam_count; i++) {
5043 			bool used;
5044 
5045 			/* Scan the priorities from newest to oldest.
5046 			 * Make sure that the newest profiles take priority.
5047 			 */
5048 			used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg);
5049 
5050 			if (used && t->tcam[i].in_use) {
5051 				/* need to mark this PTG as never match, as it
5052 				 * was already in use and therefore duplicate
5053 				 * (and lower priority)
5054 				 */
5055 				status = ice_prof_tcam_ena_dis(hw, blk, false,
5056 							       vsig,
5057 							       &t->tcam[i],
5058 							       chg);
5059 				if (status)
5060 					return status;
5061 			} else if (!used && !t->tcam[i].in_use) {
5062 				/* need to enable this PTG, as it in not in use
5063 				 * and not enabled (highest priority)
5064 				 */
5065 				status = ice_prof_tcam_ena_dis(hw, blk, true,
5066 							       vsig,
5067 							       &t->tcam[i],
5068 							       chg);
5069 				if (status)
5070 					return status;
5071 			}
5072 
5073 			/* keep track of used ptgs */
5074 			ice_set_bit(t->tcam[i].ptg, ptgs_used);
5075 		}
5076 	}
5077 
5078 	return status;
5079 }
5080 
5081 /**
5082  * ice_add_prof_id_vsig - add profile to VSIG
5083  * @hw: pointer to the HW struct
5084  * @blk: hardware block
5085  * @vsig: the VSIG to which this profile is to be added
5086  * @hdl: the profile handle indicating the profile to add
5087  * @rev: true to add entries to the end of the list
5088  * @chg: the change list
5089  */
5090 static enum ice_status
5091 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5092 		     bool rev, struct LIST_HEAD_TYPE *chg)
5093 {
5094 	/* Masks that ignore flags */
5095 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5096 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5097 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5098 	enum ice_status status = ICE_SUCCESS;
5099 	struct ice_prof_map *map;
5100 	struct ice_vsig_prof *t;
5101 	struct ice_chs_chg *p;
5102 	u16 vsig_idx, i;
5103 
5104 	/* Error, if this VSIG already has this profile */
5105 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5106 		return ICE_ERR_ALREADY_EXISTS;
5107 
5108 	/* new VSIG profile structure */
5109 	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5110 	if (!t)
5111 		return ICE_ERR_NO_MEMORY;
5112 
5113 	ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5114 	/* Get the details on the profile specified by the handle ID */
5115 	map = ice_search_prof_id(hw, blk, hdl);
5116 	if (!map) {
5117 		status = ICE_ERR_DOES_NOT_EXIST;
5118 		goto err_ice_add_prof_id_vsig;
5119 	}
5120 
5121 	t->profile_cookie = map->profile_cookie;
5122 	t->prof_id = map->prof_id;
5123 	t->tcam_count = map->ptg_cnt;
5124 
5125 	/* create TCAM entries */
5126 	for (i = 0; i < map->ptg_cnt; i++) {
5127 		u16 tcam_idx;
5128 
5129 		/* add TCAM to change list */
5130 		p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5131 		if (!p) {
5132 			status = ICE_ERR_NO_MEMORY;
5133 			goto err_ice_add_prof_id_vsig;
5134 		}
5135 
5136 		/* allocate the TCAM entry index */
5137 		status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx);
5138 		if (status) {
5139 			ice_free(hw, p);
5140 			goto err_ice_add_prof_id_vsig;
5141 		}
5142 
5143 		t->tcam[i].ptg = map->ptg[i];
5144 		t->tcam[i].prof_id = map->prof_id;
5145 		t->tcam[i].tcam_idx = tcam_idx;
5146 		t->tcam[i].in_use = true;
5147 
5148 		p->type = ICE_TCAM_ADD;
5149 		p->add_tcam_idx = true;
5150 		p->prof_id = t->tcam[i].prof_id;
5151 		p->ptg = t->tcam[i].ptg;
5152 		p->vsig = vsig;
5153 		p->tcam_idx = t->tcam[i].tcam_idx;
5154 
5155 		/* write the TCAM entry */
5156 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5157 					      t->tcam[i].prof_id,
5158 					      t->tcam[i].ptg, vsig, 0, 0,
5159 					      vl_msk, dc_msk, nm_msk);
5160 		if (status) {
5161 			ice_free(hw, p);
5162 			goto err_ice_add_prof_id_vsig;
5163 		}
5164 
5165 		/* log change */
5166 		LIST_ADD(&p->list_entry, chg);
5167 	}
5168 
5169 	/* add profile to VSIG */
5170 	vsig_idx = vsig & ICE_VSIG_IDX_M;
5171 	if (rev)
5172 		LIST_ADD_TAIL(&t->list,
5173 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5174 	else
5175 		LIST_ADD(&t->list,
5176 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5177 
5178 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5179 	return status;
5180 
5181 err_ice_add_prof_id_vsig:
5182 	ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5183 	/* let caller clean up the change list */
5184 	ice_free(hw, t);
5185 	return status;
5186 }
5187 
5188 /**
5189  * ice_create_prof_id_vsig - add a new VSIG with a single profile
5190  * @hw: pointer to the HW struct
5191  * @blk: hardware block
5192  * @vsi: the initial VSI that will be in VSIG
5193  * @hdl: the profile handle of the profile that will be added to the VSIG
5194  * @chg: the change list
5195  */
5196 static enum ice_status
5197 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5198 			struct LIST_HEAD_TYPE *chg)
5199 {
5200 	enum ice_status status;
5201 	struct ice_chs_chg *p;
5202 	u16 new_vsig;
5203 
5204 	p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5205 	if (!p)
5206 		return ICE_ERR_NO_MEMORY;
5207 
5208 	new_vsig = ice_vsig_alloc(hw, blk);
5209 	if (!new_vsig) {
5210 		status = ICE_ERR_HW_TABLE;
5211 		goto err_ice_create_prof_id_vsig;
5212 	}
5213 
5214 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5215 	if (status)
5216 		goto err_ice_create_prof_id_vsig;
5217 
5218 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5219 	if (status)
5220 		goto err_ice_create_prof_id_vsig;
5221 
5222 	p->type = ICE_VSIG_ADD;
5223 	p->vsi = vsi;
5224 	p->orig_vsig = ICE_DEFAULT_VSIG;
5225 	p->vsig = new_vsig;
5226 
5227 	LIST_ADD(&p->list_entry, chg);
5228 
5229 	return ICE_SUCCESS;
5230 
5231 err_ice_create_prof_id_vsig:
5232 	/* let caller clean up the change list */
5233 	ice_free(hw, p);
5234 	return status;
5235 }
5236 
5237 /**
5238  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5239  * @hw: pointer to the HW struct
5240  * @blk: hardware block
5241  * @vsi: the initial VSI that will be in VSIG
5242  * @lst: the list of profile that will be added to the VSIG
5243  * @new_vsig: return of new VSIG
5244  * @chg: the change list
5245  */
5246 static enum ice_status
5247 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5248 			 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
5249 			 struct LIST_HEAD_TYPE *chg)
5250 {
5251 	struct ice_vsig_prof *t;
5252 	enum ice_status status;
5253 	u16 vsig;
5254 
5255 	vsig = ice_vsig_alloc(hw, blk);
5256 	if (!vsig)
5257 		return ICE_ERR_HW_TABLE;
5258 
5259 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5260 	if (status)
5261 		return status;
5262 
5263 	LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5264 		/* Reverse the order here since we are copying the list */
5265 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5266 					      true, chg);
5267 		if (status)
5268 			return status;
5269 	}
5270 
5271 	*new_vsig = vsig;
5272 
5273 	return ICE_SUCCESS;
5274 }
5275 
5276 /**
5277  * ice_find_prof_vsig - find a VSIG with a specific profile handle
5278  * @hw: pointer to the HW struct
5279  * @blk: hardware block
5280  * @hdl: the profile handle of the profile to search for
5281  * @vsig: returns the VSIG with the matching profile
5282  */
5283 static bool
5284 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5285 {
5286 	struct ice_vsig_prof *t;
5287 	enum ice_status status;
5288 	struct LIST_HEAD_TYPE lst;
5289 
5290 	INIT_LIST_HEAD(&lst);
5291 
5292 	t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5293 	if (!t)
5294 		return false;
5295 
5296 	t->profile_cookie = hdl;
5297 	LIST_ADD(&t->list, &lst);
5298 
5299 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5300 
5301 	LIST_DEL(&t->list);
5302 	ice_free(hw, t);
5303 
5304 	return status == ICE_SUCCESS;
5305 }
5306 
5307 /**
5308  * ice_add_vsi_flow - add VSI flow
5309  * @hw: pointer to the HW struct
5310  * @blk: hardware block
5311  * @vsi: input VSI
5312  * @vsig: target VSIG to include the input VSI
5313  *
5314  * Calling this function will add the VSI to a given VSIG and
5315  * update the HW tables accordingly. This call can be used to
5316  * add multiple VSIs to a VSIG if we know beforehand that those
5317  * VSIs have the same characteristics of the VSIG. This will
5318  * save time in generating a new VSIG and TCAMs till a match is
5319  * found and subsequent rollback when a matching VSIG is found.
5320  */
5321 enum ice_status
5322 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
5323 {
5324 	struct ice_chs_chg *tmp, *del;
5325 	struct LIST_HEAD_TYPE chg;
5326 	enum ice_status status;
5327 
5328 	/* if target VSIG is default the move is invalid */
5329 	if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
5330 		return ICE_ERR_PARAM;
5331 
5332 	INIT_LIST_HEAD(&chg);
5333 
5334 	/* move VSI to the VSIG that matches */
5335 	status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5336 	/* update hardware if success */
5337 	if (!status)
5338 		status = ice_upd_prof_hw(hw, blk, &chg);
5339 
5340 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5341 		LIST_DEL(&del->list_entry);
5342 		ice_free(hw, del);
5343 	}
5344 
5345 	return status;
5346 }
5347 
5348 /**
5349  * ice_add_prof_id_flow - add profile flow
5350  * @hw: pointer to the HW struct
5351  * @blk: hardware block
5352  * @vsi: the VSI to enable with the profile specified by ID
5353  * @hdl: profile handle
5354  *
5355  * Calling this function will update the hardware tables to enable the
5356  * profile indicated by the ID parameter for the VSIs specified in the VSI
5357  * array. Once successfully called, the flow will be enabled.
5358  */
5359 enum ice_status
5360 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5361 {
5362 	struct ice_vsig_prof *tmp1, *del1;
5363 	struct ice_chs_chg *tmp, *del;
5364 	struct LIST_HEAD_TYPE union_lst;
5365 	enum ice_status status;
5366 	struct LIST_HEAD_TYPE chg;
5367 	u16 vsig;
5368 
5369 	INIT_LIST_HEAD(&union_lst);
5370 	INIT_LIST_HEAD(&chg);
5371 
5372 	/* Get profile */
5373 	status = ice_get_prof(hw, blk, hdl, &chg);
5374 	if (status)
5375 		return status;
5376 
5377 	/* determine if VSI is already part of a VSIG */
5378 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5379 	if (!status && vsig) {
5380 		bool only_vsi;
5381 		u16 or_vsig;
5382 		u16 ref;
5383 
5384 		/* found in VSIG */
5385 		or_vsig = vsig;
5386 
5387 		/* make sure that there is no overlap/conflict between the new
5388 		 * characteristics and the existing ones; we don't support that
5389 		 * scenario
5390 		 */
5391 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5392 			status = ICE_ERR_ALREADY_EXISTS;
5393 			goto err_ice_add_prof_id_flow;
5394 		}
5395 
5396 		/* last VSI in the VSIG? */
5397 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5398 		if (status)
5399 			goto err_ice_add_prof_id_flow;
5400 		only_vsi = (ref == 1);
5401 
5402 		/* create a union of the current profiles and the one being
5403 		 * added
5404 		 */
5405 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5406 		if (status)
5407 			goto err_ice_add_prof_id_flow;
5408 
5409 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5410 		if (status)
5411 			goto err_ice_add_prof_id_flow;
5412 
5413 		/* search for an existing VSIG with an exact charc match */
5414 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5415 		if (!status) {
5416 			/* move VSI to the VSIG that matches */
5417 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5418 			if (status)
5419 				goto err_ice_add_prof_id_flow;
5420 
5421 			/* VSI has been moved out of or_vsig. If the or_vsig had
5422 			 * only that VSI it is now empty and can be removed.
5423 			 */
5424 			if (only_vsi) {
5425 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5426 				if (status)
5427 					goto err_ice_add_prof_id_flow;
5428 			}
5429 		} else if (only_vsi) {
5430 			/* If the original VSIG only contains one VSI, then it
5431 			 * will be the requesting VSI. In this case the VSI is
5432 			 * not sharing entries and we can simply add the new
5433 			 * profile to the VSIG.
5434 			 */
5435 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5436 						      &chg);
5437 			if (status)
5438 				goto err_ice_add_prof_id_flow;
5439 
5440 			/* Adjust priorities */
5441 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5442 			if (status)
5443 				goto err_ice_add_prof_id_flow;
5444 		} else {
5445 			/* No match, so we need a new VSIG */
5446 			status = ice_create_vsig_from_lst(hw, blk, vsi,
5447 							  &union_lst, &vsig,
5448 							  &chg);
5449 			if (status)
5450 				goto err_ice_add_prof_id_flow;
5451 
5452 			/* Adjust priorities */
5453 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5454 			if (status)
5455 				goto err_ice_add_prof_id_flow;
5456 		}
5457 	} else {
5458 		/* need to find or add a VSIG */
5459 		/* search for an existing VSIG with an exact charc match */
5460 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5461 			/* found an exact match */
5462 			/* add or move VSI to the VSIG that matches */
5463 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5464 			if (status)
5465 				goto err_ice_add_prof_id_flow;
5466 		} else {
5467 			/* we did not find an exact match */
5468 			/* we need to add a VSIG */
5469 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5470 							 &chg);
5471 			if (status)
5472 				goto err_ice_add_prof_id_flow;
5473 		}
5474 	}
5475 
5476 	/* update hardware */
5477 	if (!status)
5478 		status = ice_upd_prof_hw(hw, blk, &chg);
5479 
5480 err_ice_add_prof_id_flow:
5481 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5482 		LIST_DEL(&del->list_entry);
5483 		ice_free(hw, del);
5484 	}
5485 
5486 	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
5487 		LIST_DEL(&del1->list);
5488 		ice_free(hw, del1);
5489 	}
5490 
5491 	return status;
5492 }
5493 
5494 /**
5495  * ice_add_flow - add flow
5496  * @hw: pointer to the HW struct
5497  * @blk: hardware block
5498  * @vsi: array of VSIs to enable with the profile specified by ID
5499  * @count: number of elements in the VSI array
5500  * @id: profile tracking ID
5501  *
5502  * Calling this function will update the hardware tables to enable the
5503  * profile indicated by the ID parameter for the VSIs specified in the VSI
5504  * array. Once successfully called, the flow will be enabled.
5505  */
5506 enum ice_status
5507 ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
5508 	     u64 id)
5509 {
5510 	enum ice_status status;
5511 	u16 i;
5512 
5513 	for (i = 0; i < count; i++) {
5514 		status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
5515 		if (status)
5516 			return status;
5517 	}
5518 
5519 	return ICE_SUCCESS;
5520 }
5521 
5522 /**
5523  * ice_rem_prof_from_list - remove a profile from list
5524  * @hw: pointer to the HW struct
5525  * @lst: list to remove the profile from
5526  * @hdl: the profile handle indicating the profile to remove
5527  */
5528 static enum ice_status
5529 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
5530 {
5531 	struct ice_vsig_prof *ent, *tmp;
5532 
5533 	LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
5534 		if (ent->profile_cookie == hdl) {
5535 			LIST_DEL(&ent->list);
5536 			ice_free(hw, ent);
5537 			return ICE_SUCCESS;
5538 		}
5539 
5540 	return ICE_ERR_DOES_NOT_EXIST;
5541 }
5542 
5543 /**
5544  * ice_rem_prof_id_flow - remove flow
5545  * @hw: pointer to the HW struct
5546  * @blk: hardware block
5547  * @vsi: the VSI from which to remove the profile specified by ID
5548  * @hdl: profile tracking handle
5549  *
5550  * Calling this function will update the hardware tables to remove the
5551  * profile indicated by the ID parameter for the VSIs specified in the VSI
5552  * array. Once successfully called, the flow will be disabled.
5553  */
5554 enum ice_status
5555 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5556 {
5557 	struct ice_vsig_prof *tmp1, *del1;
5558 	struct ice_chs_chg *tmp, *del;
5559 	struct LIST_HEAD_TYPE chg, copy;
5560 	enum ice_status status;
5561 	u16 vsig;
5562 
5563 	INIT_LIST_HEAD(&copy);
5564 	INIT_LIST_HEAD(&chg);
5565 
5566 	/* determine if VSI is already part of a VSIG */
5567 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5568 	if (!status && vsig) {
5569 		bool last_profile;
5570 		bool only_vsi;
5571 		u16 ref;
5572 
5573 		/* found in VSIG */
5574 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5575 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5576 		if (status)
5577 			goto err_ice_rem_prof_id_flow;
5578 		only_vsi = (ref == 1);
5579 
5580 		if (only_vsi) {
5581 			/* If the original VSIG only contains one reference,
5582 			 * which will be the requesting VSI, then the VSI is not
5583 			 * sharing entries and we can simply remove the specific
5584 			 * characteristics from the VSIG.
5585 			 */
5586 
5587 			if (last_profile) {
5588 				/* If there are no profiles left for this VSIG,
5589 				 * then simply remove the VSIG.
5590 				 */
5591 				status = ice_rem_vsig(hw, blk, vsig, &chg);
5592 				if (status)
5593 					goto err_ice_rem_prof_id_flow;
5594 			} else {
5595 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
5596 							      hdl, &chg);
5597 				if (status)
5598 					goto err_ice_rem_prof_id_flow;
5599 
5600 				/* Adjust priorities */
5601 				status = ice_adj_prof_priorities(hw, blk, vsig,
5602 								 &chg);
5603 				if (status)
5604 					goto err_ice_rem_prof_id_flow;
5605 			}
5606 
5607 		} else {
5608 			/* Make a copy of the VSIG's list of Profiles */
5609 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
5610 			if (status)
5611 				goto err_ice_rem_prof_id_flow;
5612 
5613 			/* Remove specified profile entry from the list */
5614 			status = ice_rem_prof_from_list(hw, &copy, hdl);
5615 			if (status)
5616 				goto err_ice_rem_prof_id_flow;
5617 
5618 			if (LIST_EMPTY(&copy)) {
5619 				status = ice_move_vsi(hw, blk, vsi,
5620 						      ICE_DEFAULT_VSIG, &chg);
5621 				if (status)
5622 					goto err_ice_rem_prof_id_flow;
5623 
5624 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
5625 							    &vsig)) {
5626 				/* found an exact match */
5627 				/* add or move VSI to the VSIG that matches */
5628 				/* Search for a VSIG with a matching profile
5629 				 * list
5630 				 */
5631 
5632 				/* Found match, move VSI to the matching VSIG */
5633 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5634 				if (status)
5635 					goto err_ice_rem_prof_id_flow;
5636 			} else {
5637 				/* since no existing VSIG supports this
5638 				 * characteristic pattern, we need to create a
5639 				 * new VSIG and TCAM entries
5640 				 */
5641 				status = ice_create_vsig_from_lst(hw, blk, vsi,
5642 								  &copy, &vsig,
5643 								  &chg);
5644 				if (status)
5645 					goto err_ice_rem_prof_id_flow;
5646 
5647 				/* Adjust priorities */
5648 				status = ice_adj_prof_priorities(hw, blk, vsig,
5649 								 &chg);
5650 				if (status)
5651 					goto err_ice_rem_prof_id_flow;
5652 			}
5653 		}
5654 	} else {
5655 		status = ICE_ERR_DOES_NOT_EXIST;
5656 	}
5657 
5658 	/* update hardware tables */
5659 	if (!status)
5660 		status = ice_upd_prof_hw(hw, blk, &chg);
5661 
5662 err_ice_rem_prof_id_flow:
5663 	LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5664 		LIST_DEL(&del->list_entry);
5665 		ice_free(hw, del);
5666 	}
5667 
5668 	LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &copy, ice_vsig_prof, list) {
5669 		LIST_DEL(&del1->list);
5670 		ice_free(hw, del1);
5671 	}
5672 
5673 	return status;
5674 }
5675 
5676 /**
5677  * ice_rem_flow - remove flow
5678  * @hw: pointer to the HW struct
5679  * @blk: hardware block
5680  * @vsi: array of VSIs from which to remove the profile specified by ID
5681  * @count: number of elements in the VSI array
5682  * @id: profile tracking ID
5683  *
5684  * The function will remove flows from the specified VSIs that were enabled
5685  * using ice_add_flow. The ID value will indicated which profile will be
5686  * removed. Once successfully called, the flow will be disabled.
5687  */
5688 enum ice_status
5689 ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
5690 	     u64 id)
5691 {
5692 	enum ice_status status;
5693 	u16 i;
5694 
5695 	for (i = 0; i < count; i++) {
5696 		status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
5697 		if (status)
5698 			return status;
5699 	}
5700 
5701 	return ICE_SUCCESS;
5702 }
5703