xref: /freebsd/sys/dev/ice/ice_ddp_common.c (revision 440addc642496f8d04fe17af9eb905ac4a5bdbd8)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2024, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "ice_ddp_common.h"
33 #include "ice_type.h"
34 #include "ice_common.h"
35 #include "ice_sched.h"
36 
37 /**
38  * ice_aq_download_pkg
39  * @hw: pointer to the hardware structure
40  * @pkg_buf: the package buffer to transfer
41  * @buf_size: the size of the package buffer
42  * @last_buf: last buffer indicator
43  * @error_offset: returns error offset
44  * @error_info: returns error information
45  * @cd: pointer to command details structure or NULL
46  *
47  * Download Package (0x0C40)
48  */
49 static int
50 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
51 		    u16 buf_size, bool last_buf, u32 *error_offset,
52 		    u32 *error_info, struct ice_sq_cd *cd)
53 {
54 	struct ice_aqc_download_pkg *cmd;
55 	struct ice_aq_desc desc;
56 	int status;
57 
58 	if (error_offset)
59 		*error_offset = 0;
60 	if (error_info)
61 		*error_info = 0;
62 
63 	cmd = &desc.params.download_pkg;
64 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
65 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
66 
67 	if (last_buf)
68 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
69 
70 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
71 	if (status == ICE_ERR_AQ_ERROR) {
72 		/* Read error from buffer only when the FW returned an error */
73 		struct ice_aqc_download_pkg_resp *resp;
74 
75 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
76 		if (error_offset)
77 			*error_offset = LE32_TO_CPU(resp->error_offset);
78 		if (error_info)
79 			*error_info = LE32_TO_CPU(resp->error_info);
80 	}
81 
82 	return status;
83 }
84 
85 /**
86  * ice_aq_upload_section
87  * @hw: pointer to the hardware structure
88  * @pkg_buf: the package buffer which will receive the section
89  * @buf_size: the size of the package buffer
90  * @cd: pointer to command details structure or NULL
91  *
92  * Upload Section (0x0C41)
93  */
94 int
95 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
96 		      u16 buf_size, struct ice_sq_cd *cd)
97 {
98 	struct ice_aq_desc desc;
99 
100 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
101 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
102 
103 	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
104 }
105 
106 /**
107  * ice_aq_update_pkg
108  * @hw: pointer to the hardware structure
109  * @pkg_buf: the package cmd buffer
110  * @buf_size: the size of the package cmd buffer
111  * @last_buf: last buffer indicator
112  * @error_offset: returns error offset
113  * @error_info: returns error information
114  * @cd: pointer to command details structure or NULL
115  *
116  * Update Package (0x0C42)
117  */
118 static int
119 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
120 		  bool last_buf, u32 *error_offset, u32 *error_info,
121 		  struct ice_sq_cd *cd)
122 {
123 	struct ice_aqc_download_pkg *cmd;
124 	struct ice_aq_desc desc;
125 	int status;
126 
127 	if (error_offset)
128 		*error_offset = 0;
129 	if (error_info)
130 		*error_info = 0;
131 
132 	cmd = &desc.params.download_pkg;
133 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
134 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
135 
136 	if (last_buf)
137 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
138 
139 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
140 	if (status == ICE_ERR_AQ_ERROR) {
141 		/* Read error from buffer only when the FW returned an error */
142 		struct ice_aqc_download_pkg_resp *resp;
143 
144 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
145 		if (error_offset)
146 			*error_offset = LE32_TO_CPU(resp->error_offset);
147 		if (error_info)
148 			*error_info = LE32_TO_CPU(resp->error_info);
149 	}
150 
151 	return status;
152 }
153 
154 /**
155  * ice_find_seg_in_pkg
156  * @hw: pointer to the hardware structure
157  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
158  * @pkg_hdr: pointer to the package header to be searched
159  *
160  * This function searches a package file for a particular segment type. On
161  * success it returns a pointer to the segment header, otherwise it will
162  * return NULL.
163  */
164 struct ice_generic_seg_hdr *
165 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
166 		    struct ice_pkg_hdr *pkg_hdr)
167 {
168 	u32 i;
169 
170 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
171 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
172 		  pkg_hdr->pkg_format_ver.update,
173 		  pkg_hdr->pkg_format_ver.draft);
174 
175 	/* Search all package segments for the requested segment type */
176 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
177 		struct ice_generic_seg_hdr *seg;
178 
179 		seg = (struct ice_generic_seg_hdr *)
180 			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
181 
182 		if (LE32_TO_CPU(seg->seg_type) == seg_type)
183 			return seg;
184 	}
185 
186 	return NULL;
187 }
188 
189 /**
190  * ice_get_pkg_seg_by_idx
191  * @pkg_hdr: pointer to the package header to be searched
192  * @idx: index of segment
193  */
194 static struct ice_generic_seg_hdr *
195 ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
196 {
197 	struct ice_generic_seg_hdr *seg = NULL;
198 
199 	if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
200 		seg = (struct ice_generic_seg_hdr *)
201 			((u8 *)pkg_hdr +
202 			 LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
203 
204 	return seg;
205 }
206 
207 /**
208  * ice_is_signing_seg_at_idx - determine if segment is a signing segment
209  * @pkg_hdr: pointer to package header
210  * @idx: segment index
211  */
212 static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
213 {
214 	struct ice_generic_seg_hdr *seg;
215 	bool retval = false;
216 
217 	seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
218 	if (seg)
219 		retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING;
220 
221 	return retval;
222 }
223 
224 /**
225  * ice_is_signing_seg_type_at_idx
226  * @pkg_hdr: pointer to package header
227  * @idx: segment index
228  * @seg_id: segment id that is expected
229  * @sign_type: signing type
230  *
231  * Determine if a segment is a signing segment of the correct type
232  */
233 static bool
234 ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
235 			       u32 seg_id, u32 sign_type)
236 {
237 	bool result = false;
238 
239 	if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
240 		struct ice_sign_seg *seg;
241 
242 		seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
243 								    idx);
244 		if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
245 		    LE32_TO_CPU(seg->sign_type) == sign_type)
246 			result = true;
247 	}
248 
249 	return result;
250 }
251 
252 /**
253  * ice_update_pkg_no_lock
254  * @hw: pointer to the hardware structure
255  * @bufs: pointer to an array of buffers
256  * @count: the number of buffers in the array
257  */
258 int
259 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
260 {
261 	int status = 0;
262 	u32 i;
263 
264 	for (i = 0; i < count; i++) {
265 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
266 		bool last = ((i + 1) == count);
267 		u32 offset, info;
268 
269 		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
270 					   last, &offset, &info, NULL);
271 
272 		if (status) {
273 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
274 				  status, offset, info);
275 			break;
276 		}
277 	}
278 
279 	return status;
280 }
281 
282 /**
283  * ice_update_pkg
284  * @hw: pointer to the hardware structure
285  * @bufs: pointer to an array of buffers
286  * @count: the number of buffers in the array
287  *
288  * Obtains change lock and updates package.
289  */
290 int
291 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
292 {
293 	int status;
294 
295 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
296 	if (status)
297 		return status;
298 
299 	status = ice_update_pkg_no_lock(hw, bufs, count);
300 
301 	ice_release_change_lock(hw);
302 
303 	return status;
304 }
305 
306 static enum ice_ddp_state
307 ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
308 {
309 	switch (aq_err) {
310 	case ICE_AQ_RC_ENOSEC:
311 		return ICE_DDP_PKG_NO_SEC_MANIFEST;
312 	case ICE_AQ_RC_EBADSIG:
313 		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
314 	case ICE_AQ_RC_ESVN:
315 		return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
316 	case ICE_AQ_RC_EBADMAN:
317 		return ICE_DDP_PKG_MANIFEST_INVALID;
318 	case ICE_AQ_RC_EBADBUF:
319 		return ICE_DDP_PKG_BUFFER_INVALID;
320 	default:
321 		return ICE_DDP_PKG_ERR;
322 	}
323 }
324 
325 /**
326  * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
327  * @buf: pointer to buffer header
328  */
329 static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
330 {
331 	bool metadata = false;
332 
333 	if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
334 		metadata = true;
335 
336 	return metadata;
337 }
338 
339 /**
340  * ice_is_last_download_buffer
341  * @buf: pointer to current buffer header
342  * @idx: index of the buffer in the current sequence
343  * @count: the buffer count in the current sequence
344  *
345  * Note: this routine should only be called if the buffer is not the last buffer
346  */
347 static bool
348 ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
349 {
350 	bool last = ((idx + 1) == count);
351 
352 	/* A set metadata flag in the next buffer will signal that the current
353 	 * buffer will be the last buffer downloaded
354 	 */
355 	if (!last) {
356 		struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
357 
358 		last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
359 	}
360 
361 	return last;
362 }
363 
364 /**
365  * ice_dwnld_cfg_bufs_no_lock
366  * @hw: pointer to the hardware structure
367  * @bufs: pointer to an array of buffers
368  * @start: buffer index of first buffer to download
369  * @count: the number of buffers to download
370  * @indicate_last: if true, then set last buffer flag on last buffer download
371  *
372  * Downloads package configuration buffers to the firmware. Metadata buffers
373  * are skipped, and the first metadata buffer found indicates that the rest
374  * of the buffers are all metadata buffers.
375  */
376 static enum ice_ddp_state
377 ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
378 			   u32 count, bool indicate_last)
379 {
380 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
381 	struct ice_buf_hdr *bh;
382 	enum ice_aq_err err;
383 	u32 offset, info, i;
384 
385 	if (!bufs || !count)
386 		return ICE_DDP_PKG_ERR;
387 
388 	/* If the first buffer's first section has its metadata bit set
389 	 * then there are no buffers to be downloaded, and the operation is
390 	 * considered a success.
391 	 */
392 	bh = (struct ice_buf_hdr *)(bufs + start);
393 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
394 		return ICE_DDP_PKG_SUCCESS;
395 
396 	for (i = 0; i < count; i++) {
397 		bool last = false;
398 		int status;
399 
400 		bh = (struct ice_buf_hdr *)(bufs + start + i);
401 
402 		if (indicate_last)
403 			last = ice_is_last_download_buffer(bh, i, count);
404 
405 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
406 					     &offset, &info, NULL);
407 
408 		/* Save AQ status from download package */
409 		if (status) {
410 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
411 				  status, offset, info);
412 			err = hw->adminq.sq_last_status;
413 			state = ice_map_aq_err_to_ddp_state(err);
414 			break;
415 		}
416 
417 		if (last)
418 			break;
419 	}
420 
421 	return state;
422 }
423 
424 /**
425  * ice_aq_get_pkg_info_list
426  * @hw: pointer to the hardware structure
427  * @pkg_info: the buffer which will receive the information list
428  * @buf_size: the size of the pkg_info information buffer
429  * @cd: pointer to command details structure or NULL
430  *
431  * Get Package Info List (0x0C43)
432  */
433 static int
434 ice_aq_get_pkg_info_list(struct ice_hw *hw,
435 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
436 			 u16 buf_size, struct ice_sq_cd *cd)
437 {
438 	struct ice_aq_desc desc;
439 
440 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
441 
442 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
443 }
444 
445 /**
446  * ice_get_pkg_segment_id - get correct package segment id, based on device
447  * @mac_type: MAC type of the device
448  */
449 static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
450 {
451 	u32 seg_id;
452 
453 	switch (mac_type) {
454 	case ICE_MAC_E830:
455 		seg_id = SEGMENT_TYPE_ICE_E830;
456 		break;
457 	case ICE_MAC_GENERIC:
458 	case ICE_MAC_GENERIC_3K:
459 	case ICE_MAC_GENERIC_3K_E825:
460 	default:
461 		seg_id = SEGMENT_TYPE_ICE_E810;
462 		break;
463 	}
464 
465 	return seg_id;
466 }
467 
468 /**
469  * ice_get_pkg_sign_type - get package segment sign type, based on device
470  * @mac_type: MAC type of the device
471  */
472 static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
473 {
474 	u32 sign_type;
475 
476 	switch (mac_type) {
477 	case ICE_MAC_E830:
478 		sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
479 		break;
480 	case ICE_MAC_GENERIC_3K:
481 		sign_type = SEGMENT_SIGN_TYPE_RSA3K;
482 		break;
483 	case ICE_MAC_GENERIC_3K_E825:
484 		sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
485 		break;
486 	case ICE_MAC_GENERIC:
487 	default:
488 		sign_type = SEGMENT_SIGN_TYPE_RSA2K;
489 		break;
490 	}
491 
492 	return sign_type;
493 }
494 
495 /**
496  * ice_get_signing_req - get correct package requirements, based on device
497  * @hw: pointer to the hardware structure
498  */
499 static void ice_get_signing_req(struct ice_hw *hw)
500 {
501 	hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
502 	hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
503 }
504 
505 /**
506  * ice_download_pkg_sig_seg - download a signature segment
507  * @hw: pointer to the hardware structure
508  * @seg: pointer to signature segment
509  */
510 static enum ice_ddp_state
511 ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
512 {
513 	enum ice_ddp_state state;
514 
515 	state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
516 					   LE32_TO_CPU(seg->buf_tbl.buf_count),
517 					   false);
518 
519 	return state;
520 }
521 
522 /**
523  * ice_download_pkg_config_seg - download a config segment
524  * @hw: pointer to the hardware structure
525  * @pkg_hdr: pointer to package header
526  * @idx: segment index
527  * @start: starting buffer
528  * @count: buffer count
529  * @last_seg: last segment being downloaded
530  *
531  * Note: idx must reference a ICE segment
532  */
533 static enum ice_ddp_state
534 ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
535 			    u32 idx, u32 start, u32 count, bool last_seg)
536 {
537 	struct ice_buf_table *bufs;
538 	enum ice_ddp_state state;
539 	struct ice_seg *seg;
540 	u32 buf_count;
541 
542 	seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
543 	if (!seg)
544 		return ICE_DDP_PKG_ERR;
545 
546 	bufs = ice_find_buf_table(seg);
547 	buf_count = LE32_TO_CPU(bufs->buf_count);
548 
549 	if (start >= buf_count || start + count > buf_count)
550 		return ICE_DDP_PKG_ERR;
551 
552 	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
553 					   last_seg);
554 
555 	return state;
556 }
557 
558 /**
559  * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
560  * @hw: pointer to the hardware structure
561  * @pkg_hdr: pointer to package header
562  * @idx: segment index (must be a signature segment)
563  *
564  * Note: idx must reference a signature segment
565  */
566 static enum ice_ddp_state
567 ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
568 			    u32 idx)
569 {
570 	enum ice_ddp_state state;
571 	struct ice_sign_seg *seg;
572 	bool last_seg = true;
573 	u32 conf_idx;
574 	u32 start;
575 	u32 count;
576 	u32 flags;
577 
578 	seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
579 	if (!seg) {
580 		state = ICE_DDP_PKG_ERR;
581 		goto exit;
582 	}
583 
584 	conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
585 	start = LE32_TO_CPU(seg->signed_buf_start);
586 	count = LE32_TO_CPU(seg->signed_buf_count);
587 	flags = LE32_TO_CPU(seg->flags);
588 
589 	if (flags & ICE_SIGN_SEG_FLAGS_VALID)
590 		last_seg = !!(flags & ICE_SIGN_SEG_FLAGS_LAST);
591 
592 	state = ice_download_pkg_sig_seg(hw, seg);
593 	if (state)
594 		goto exit;
595 
596 	if (count == 0) {
597 		/* this is a "Reference Signature Segment" and download should
598 		 * be only for the buffers in the signature segment (and not
599 		 * the hardware configuration segment)
600 		 */
601 		goto exit;
602 	}
603 
604 	state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
605 					    count, last_seg);
606 
607 exit:
608 	return state;
609 }
610 
611 /**
612  * ice_match_signing_seg - determine if a matching signing segment exists
613  * @pkg_hdr: pointer to package header
614  * @seg_id: segment id that is expected
615  * @sign_type: signing type
616  */
617 static bool
618 ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
619 {
620 	bool match = false;
621 	u32 i;
622 
623 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
624 		if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
625 						   sign_type)) {
626 			match = true;
627 			break;
628 		}
629 	}
630 
631 	return match;
632 }
633 
634 /**
635  * ice_post_dwnld_pkg_actions - perform post download package actions
636  * @hw: pointer to the hardware structure
637  */
638 static enum ice_ddp_state
639 ice_post_dwnld_pkg_actions(struct ice_hw *hw)
640 {
641 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
642 	int status;
643 
644 	status = ice_set_vlan_mode(hw);
645 	if (status) {
646 		ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
647 			  status);
648 		state = ICE_DDP_PKG_ERR;
649 	}
650 
651 	return state;
652 }
653 
654 /**
655  * ice_download_pkg_with_sig_seg - download package using signature segments
656  * @hw: pointer to the hardware structure
657  * @pkg_hdr: pointer to package header
658  */
659 static enum ice_ddp_state
660 ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
661 {
662 	enum ice_aq_err aq_err = hw->adminq.sq_last_status;
663 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
664 	int status;
665 	u32 i;
666 
667 	ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
668 	ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
669 
670 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
671 	if (status) {
672 		if (status == ICE_ERR_AQ_NO_WORK)
673 			state = ICE_DDP_PKG_ALREADY_LOADED;
674 		else
675 			state = ice_map_aq_err_to_ddp_state(aq_err);
676 		return state;
677 	}
678 
679 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
680 		if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
681 						    hw->pkg_sign_type))
682 			continue;
683 
684 		state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
685 		if (state)
686 			break;
687 	}
688 
689 	if (!state)
690 		state = ice_post_dwnld_pkg_actions(hw);
691 
692 	ice_release_global_cfg_lock(hw);
693 
694 	return state;
695 }
696 
697 /**
698  * ice_dwnld_cfg_bufs
699  * @hw: pointer to the hardware structure
700  * @bufs: pointer to an array of buffers
701  * @count: the number of buffers in the array
702  *
703  * Obtains global config lock and downloads the package configuration buffers
704  * to the firmware.
705  */
706 static enum ice_ddp_state
707 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
708 {
709 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
710 	struct ice_buf_hdr *bh;
711 	int status;
712 
713 	if (!bufs || !count)
714 		return ICE_DDP_PKG_ERR;
715 
716 	/* If the first buffer's first section has its metadata bit set
717 	 * then there are no buffers to be downloaded, and the operation is
718 	 * considered a success.
719 	 */
720 	bh = (struct ice_buf_hdr *)bufs;
721 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
722 		return ICE_DDP_PKG_SUCCESS;
723 
724 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
725 	if (status) {
726 		if (status == ICE_ERR_AQ_NO_WORK)
727 			return ICE_DDP_PKG_ALREADY_LOADED;
728 		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
729 	}
730 
731 	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
732 	if (!state)
733 		state = ice_post_dwnld_pkg_actions(hw);
734 
735 	ice_release_global_cfg_lock(hw);
736 
737 	return state;
738 }
739 
740 /**
741  * ice_download_pkg_without_sig_seg
742  * @hw: pointer to the hardware structure
743  * @ice_seg: pointer to the segment of the package to be downloaded
744  *
745  * Handles the download of a complete package without signature segment.
746  */
747 static enum ice_ddp_state
748 ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
749 {
750 	struct ice_buf_table *ice_buf_tbl;
751 	enum ice_ddp_state state;
752 
753 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
754 		  ice_seg->hdr.seg_format_ver.major,
755 		  ice_seg->hdr.seg_format_ver.minor,
756 		  ice_seg->hdr.seg_format_ver.update,
757 		  ice_seg->hdr.seg_format_ver.draft);
758 
759 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
760 		  LE32_TO_CPU(ice_seg->hdr.seg_type),
761 		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
762 
763 	ice_buf_tbl = ice_find_buf_table(ice_seg);
764 
765 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
766 		  LE32_TO_CPU(ice_buf_tbl->buf_count));
767 
768 	state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
769 				   LE32_TO_CPU(ice_buf_tbl->buf_count));
770 
771 	return state;
772 }
773 
774 /**
775  * ice_download_pkg
776  * @hw: pointer to the hardware structure
777  * @pkg_hdr: pointer to package header
778  * @ice_seg: pointer to the segment of the package to be downloaded
779  *
780  * Handles the download of a complete package.
781  */
782 static enum ice_ddp_state
783 ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
784 		 struct ice_seg *ice_seg)
785 {
786 	enum ice_ddp_state state;
787 
788 	if (ice_match_signing_seg(pkg_hdr, hw->pkg_seg_id, hw->pkg_sign_type))
789 		state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
790 	else
791 		state = ice_download_pkg_without_sig_seg(hw, ice_seg);
792 
793 	ice_post_pkg_dwnld_vlan_mode_cfg(hw);
794 
795 	return state;
796 }
797 
798 /**
799  * ice_init_pkg_info
800  * @hw: pointer to the hardware structure
801  * @pkg_hdr: pointer to the driver's package hdr
802  *
803  * Saves off the package details into the HW structure.
804  */
805 static enum ice_ddp_state
806 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
807 {
808 	struct ice_generic_seg_hdr *seg_hdr;
809 
810 	if (!pkg_hdr)
811 		return ICE_DDP_PKG_ERR;
812 
813 	ice_get_signing_req(hw);
814 
815 	ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
816 		  hw->pkg_seg_id);
817 
818 	seg_hdr = (struct ice_generic_seg_hdr *)
819 		ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
820 	if (seg_hdr) {
821 		struct ice_meta_sect *meta;
822 		struct ice_pkg_enum state;
823 
824 		ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
825 
826 		/* Get package information from the Metadata Section */
827 		meta = (struct ice_meta_sect *)
828 			ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
829 					     ICE_SID_METADATA);
830 		if (!meta) {
831 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
832 			return ICE_DDP_PKG_INVALID_FILE;
833 		}
834 
835 		hw->pkg_ver = meta->ver;
836 		ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
837 			   ICE_NONDMA_TO_NONDMA);
838 
839 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
840 			  meta->ver.major, meta->ver.minor, meta->ver.update,
841 			  meta->ver.draft, meta->name);
842 
843 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
844 		ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
845 			   sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
846 
847 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
848 			  seg_hdr->seg_format_ver.major,
849 			  seg_hdr->seg_format_ver.minor,
850 			  seg_hdr->seg_format_ver.update,
851 			  seg_hdr->seg_format_ver.draft,
852 			  seg_hdr->seg_id);
853 	} else {
854 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
855 		return ICE_DDP_PKG_INVALID_FILE;
856 	}
857 
858 	return ICE_DDP_PKG_SUCCESS;
859 }
860 
861 /**
862  * ice_get_pkg_info
863  * @hw: pointer to the hardware structure
864  *
865  * Store details of the package currently loaded in HW into the HW structure.
866  */
867 enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
868 {
869 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
870 	struct ice_aqc_get_pkg_info_resp *pkg_info;
871 	u16 size;
872 	u32 i;
873 
874 	size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
875 	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
876 	if (!pkg_info)
877 		return ICE_DDP_PKG_ERR;
878 
879 	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
880 		state = ICE_DDP_PKG_ERR;
881 		goto init_pkg_free_alloc;
882 	}
883 
884 	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
885 #define ICE_PKG_FLAG_COUNT	4
886 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
887 		u8 place = 0;
888 
889 		if (pkg_info->pkg_info[i].is_active) {
890 			flags[place++] = 'A';
891 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
892 			hw->active_track_id =
893 				LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
894 			ice_memcpy(hw->active_pkg_name,
895 				   pkg_info->pkg_info[i].name,
896 				   sizeof(pkg_info->pkg_info[i].name),
897 				   ICE_NONDMA_TO_NONDMA);
898 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
899 		}
900 		if (pkg_info->pkg_info[i].is_active_at_boot)
901 			flags[place++] = 'B';
902 		if (pkg_info->pkg_info[i].is_modified)
903 			flags[place++] = 'M';
904 		if (pkg_info->pkg_info[i].is_in_nvm)
905 			flags[place++] = 'N';
906 
907 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
908 			  i, pkg_info->pkg_info[i].ver.major,
909 			  pkg_info->pkg_info[i].ver.minor,
910 			  pkg_info->pkg_info[i].ver.update,
911 			  pkg_info->pkg_info[i].ver.draft,
912 			  pkg_info->pkg_info[i].name, flags);
913 	}
914 
915 init_pkg_free_alloc:
916 	ice_free(hw, pkg_info);
917 
918 	return state;
919 }
920 
921 /**
922  * ice_label_enum_handler
923  * @sect_type: section type
924  * @section: pointer to section
925  * @index: index of the label entry to be returned
926  * @offset: pointer to receive absolute offset, always zero for label sections
927  *
928  * This is a callback function that can be passed to ice_pkg_enum_entry.
929  * Handles enumeration of individual label entries.
930  */
931 static void *
932 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
933 		       u32 *offset)
934 {
935 	struct ice_label_section *labels;
936 
937 	if (!section)
938 		return NULL;
939 
940 	if (index > ICE_MAX_LABELS_IN_BUF)
941 		return NULL;
942 
943 	if (offset)
944 		*offset = 0;
945 
946 	labels = (struct ice_label_section *)section;
947 	if (index >= LE16_TO_CPU(labels->count))
948 		return NULL;
949 
950 	return labels->label + index;
951 }
952 
953 /**
954  * ice_enum_labels
955  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
956  * @type: the section type that will contain the label (0 on subsequent calls)
957  * @state: ice_pkg_enum structure that will hold the state of the enumeration
958  * @value: pointer to a value that will return the label's value if found
959  *
960  * Enumerates a list of labels in the package. The caller will call
961  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
962  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
963  * the end of the list has been reached.
964  */
965 static char *
966 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
967 		u16 *value)
968 {
969 	struct ice_label *label;
970 
971 	/* Check for valid label section on first call */
972 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
973 		return NULL;
974 
975 	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
976 						       NULL,
977 						       ice_label_enum_handler);
978 	if (!label)
979 		return NULL;
980 
981 	*value = LE16_TO_CPU(label->value);
982 	return label->name;
983 }
984 
985 /**
986  * ice_find_label_value
987  * @ice_seg: pointer to the ice segment (non-NULL)
988  * @name: name of the label to search for
989  * @type: the section type that will contain the label
990  * @value: pointer to a value that will return the label's value if found
991  *
992  * Finds a label's value given the label name and the section type to search.
993  * The ice_seg parameter must not be NULL since the first call to
994  * ice_enum_labels requires a pointer to an actual ice_seg structure.
995  */
996 int
997 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
998 		     u16 *value)
999 {
1000 	struct ice_pkg_enum state;
1001 	char *label_name;
1002 	u16 val;
1003 
1004 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1005 
1006 	if (!ice_seg)
1007 		return ICE_ERR_PARAM;
1008 
1009 	do {
1010 		label_name = ice_enum_labels(ice_seg, type, &state, &val);
1011 		if (label_name && !strcmp(label_name, name)) {
1012 			*value = val;
1013 			return 0;
1014 		}
1015 
1016 		ice_seg = NULL;
1017 	} while (label_name);
1018 
1019 	return ICE_ERR_CFG;
1020 }
1021 
1022 /**
1023  * ice_verify_pkg - verify package
1024  * @pkg: pointer to the package buffer
1025  * @len: size of the package buffer
1026  *
1027  * Verifies various attributes of the package file, including length, format
1028  * version, and the requirement of at least one segment.
1029  */
1030 enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1031 {
1032 	u32 seg_count;
1033 	u32 i;
1034 
1035 	if (len < ice_struct_size(pkg, seg_offset, 1))
1036 		return ICE_DDP_PKG_INVALID_FILE;
1037 
1038 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1039 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1040 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1041 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1042 		return ICE_DDP_PKG_INVALID_FILE;
1043 
1044 	/* pkg must have at least one segment */
1045 	seg_count = LE32_TO_CPU(pkg->seg_count);
1046 	if (seg_count < 1)
1047 		return ICE_DDP_PKG_INVALID_FILE;
1048 
1049 	/* make sure segment array fits in package length */
1050 	if (len < ice_struct_size(pkg, seg_offset, seg_count))
1051 		return ICE_DDP_PKG_INVALID_FILE;
1052 
1053 	/* all segments must fit within length */
1054 	for (i = 0; i < seg_count; i++) {
1055 		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1056 		struct ice_generic_seg_hdr *seg;
1057 
1058 		/* segment header must fit */
1059 		if (len < off + sizeof(*seg))
1060 			return ICE_DDP_PKG_INVALID_FILE;
1061 
1062 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1063 
1064 		/* segment body must fit */
1065 		if (len < off + LE32_TO_CPU(seg->seg_size))
1066 			return ICE_DDP_PKG_INVALID_FILE;
1067 	}
1068 
1069 	return ICE_DDP_PKG_SUCCESS;
1070 }
1071 
1072 /**
1073  * ice_free_seg - free package segment pointer
1074  * @hw: pointer to the hardware structure
1075  *
1076  * Frees the package segment pointer in the proper manner, depending on if the
1077  * segment was allocated or just the passed in pointer was stored.
1078  */
1079 void ice_free_seg(struct ice_hw *hw)
1080 {
1081 	if (hw->pkg_copy) {
1082 		ice_free(hw, hw->pkg_copy);
1083 		hw->pkg_copy = NULL;
1084 		hw->pkg_size = 0;
1085 	}
1086 	hw->seg = NULL;
1087 }
1088 
1089 /**
1090  * ice_chk_pkg_version - check package version for compatibility with driver
1091  * @pkg_ver: pointer to a version structure to check
1092  *
1093  * Check to make sure that the package about to be downloaded is compatible with
1094  * the driver. To be compatible, the major and minor components of the package
1095  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1096  * definitions.
1097  */
1098 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1099 {
1100 	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
1101 	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1102 	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
1103 		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
1104 	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
1105 		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1106 		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
1107 		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
1108 	return ICE_DDP_PKG_SUCCESS;
1109 }
1110 
1111 /**
1112  * ice_chk_pkg_compat
1113  * @hw: pointer to the hardware structure
1114  * @ospkg: pointer to the package hdr
1115  * @seg: pointer to the package segment hdr
1116  *
1117  * This function checks the package version compatibility with driver and NVM
1118  */
1119 static enum ice_ddp_state
1120 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1121 		   struct ice_seg **seg)
1122 {
1123 	struct ice_aqc_get_pkg_info_resp *pkg;
1124 	enum ice_ddp_state state;
1125 	u16 size;
1126 	u32 i;
1127 
1128 	/* Check package version compatibility */
1129 	state = ice_chk_pkg_version(&hw->pkg_ver);
1130 	if (state) {
1131 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1132 		return state;
1133 	}
1134 
1135 	/* find ICE segment in given package */
1136 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
1137 						     ospkg);
1138 	if (!*seg) {
1139 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1140 		return ICE_DDP_PKG_INVALID_FILE;
1141 	}
1142 
1143 	/* Check if FW is compatible with the OS package */
1144 	size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1145 	pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1146 	if (!pkg)
1147 		return ICE_DDP_PKG_ERR;
1148 
1149 	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
1150 		state = ICE_DDP_PKG_ERR;
1151 		goto fw_ddp_compat_free_alloc;
1152 	}
1153 
1154 	for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1155 		/* loop till we find the NVM package */
1156 		if (!pkg->pkg_info[i].is_in_nvm)
1157 			continue;
1158 		if ((*seg)->hdr.seg_format_ver.major !=
1159 			pkg->pkg_info[i].ver.major ||
1160 		    (*seg)->hdr.seg_format_ver.minor >
1161 			pkg->pkg_info[i].ver.minor) {
1162 			state = ICE_DDP_PKG_FW_MISMATCH;
1163 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1164 		}
1165 		/* done processing NVM package so break */
1166 		break;
1167 	}
1168 fw_ddp_compat_free_alloc:
1169 	ice_free(hw, pkg);
1170 	return state;
1171 }
1172 
1173 /**
1174  * ice_sw_fv_handler
1175  * @sect_type: section type
1176  * @section: pointer to section
1177  * @index: index of the field vector entry to be returned
1178  * @offset: ptr to variable that receives the offset in the field vector table
1179  *
1180  * This is a callback function that can be passed to ice_pkg_enum_entry.
1181  * This function treats the given section as of type ice_sw_fv_section and
1182  * enumerates offset field. "offset" is an index into the field vector table.
1183  */
1184 static void *
1185 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1186 {
1187 	struct ice_sw_fv_section *fv_section =
1188 		(struct ice_sw_fv_section *)section;
1189 
1190 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1191 		return NULL;
1192 	if (index >= LE16_TO_CPU(fv_section->count))
1193 		return NULL;
1194 	if (offset)
1195 		/* "index" passed in to this function is relative to a given
1196 		 * 4k block. To get to the true index into the field vector
1197 		 * table need to add the relative index to the base_offset
1198 		 * field of this section
1199 		 */
1200 		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
1201 	return fv_section->fv + index;
1202 }
1203 
1204 /**
1205  * ice_get_prof_index_max - get the max profile index for used profile
1206  * @hw: pointer to the HW struct
1207  *
1208  * Calling this function will get the max profile index for used profile
1209  * and store the index number in struct ice_switch_info *switch_info
1210  * in hw for following use.
1211  */
1212 static int ice_get_prof_index_max(struct ice_hw *hw)
1213 {
1214 	u16 prof_index = 0, j, max_prof_index = 0;
1215 	struct ice_pkg_enum state;
1216 	struct ice_seg *ice_seg;
1217 	bool flag = false;
1218 	struct ice_fv *fv;
1219 	u32 offset;
1220 
1221 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1222 
1223 	if (!hw->seg)
1224 		return ICE_ERR_PARAM;
1225 
1226 	ice_seg = hw->seg;
1227 
1228 	do {
1229 		fv = (struct ice_fv *)
1230 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1231 					   &offset, ice_sw_fv_handler);
1232 		if (!fv)
1233 			break;
1234 		ice_seg = NULL;
1235 
1236 		/* in the profile that not be used, the prot_id is set to 0xff
1237 		 * and the off is set to 0x1ff for all the field vectors.
1238 		 */
1239 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1240 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1241 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1242 				flag = true;
1243 		if (flag && prof_index > max_prof_index)
1244 			max_prof_index = prof_index;
1245 
1246 		prof_index++;
1247 		flag = false;
1248 	} while (fv);
1249 
1250 	hw->switch_info->max_used_prof_index = max_prof_index;
1251 
1252 	return 0;
1253 }
1254 
1255 /**
1256  * ice_get_ddp_pkg_state - get DDP pkg state after download
1257  * @hw: pointer to the HW struct
1258  * @already_loaded: indicates if pkg was already loaded onto the device
1259  *
1260  */
1261 static enum ice_ddp_state
1262 ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
1263 {
1264 	if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
1265 	    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
1266 	    hw->pkg_ver.update == hw->active_pkg_ver.update &&
1267 	    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
1268 	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
1269 		if (already_loaded)
1270 			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
1271 		else
1272 			return ICE_DDP_PKG_SUCCESS;
1273 	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
1274 		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
1275 		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
1276 	} else {
1277 		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
1278 	}
1279 }
1280 
1281 /**
1282  * ice_init_pkg_regs - initialize additional package registers
1283  * @hw: pointer to the hardware structure
1284  */
1285 static void ice_init_pkg_regs(struct ice_hw *hw)
1286 {
1287 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1288 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1289 #define ICE_SW_BLK_IDX	0
1290 
1291 	/* setup Switch block input mask, which is 48-bits in two parts */
1292 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1293 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1294 }
1295 
1296 /**
1297  * ice_init_pkg - initialize/download package
1298  * @hw: pointer to the hardware structure
1299  * @buf: pointer to the package buffer
1300  * @len: size of the package buffer
1301  *
1302  * This function initializes a package. The package contains HW tables
1303  * required to do packet processing. First, the function extracts package
1304  * information such as version. Then it finds the ice configuration segment
1305  * within the package; this function then saves a copy of the segment pointer
1306  * within the supplied package buffer. Next, the function will cache any hints
1307  * from the package, followed by downloading the package itself. Note, that if
1308  * a previous PF driver has already downloaded the package successfully, then
1309  * the current driver will not have to download the package again.
1310  *
1311  * The local package contents will be used to query default behavior and to
1312  * update specific sections of the HW's version of the package (e.g. to update
1313  * the parse graph to understand new protocols).
1314  *
1315  * This function stores a pointer to the package buffer memory, and it is
1316  * expected that the supplied buffer will not be freed immediately. If the
1317  * package buffer needs to be freed, such as when read from a file, use
1318  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1319  * case.
1320  */
1321 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1322 {
1323 	bool already_loaded = false;
1324 	enum ice_ddp_state state;
1325 	struct ice_pkg_hdr *pkg;
1326 	struct ice_seg *seg;
1327 
1328 	if (!buf || !len)
1329 		return ICE_DDP_PKG_ERR;
1330 
1331 	pkg = (struct ice_pkg_hdr *)buf;
1332 	state = ice_verify_pkg(pkg, len);
1333 	if (state) {
1334 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1335 			  state);
1336 		return state;
1337 	}
1338 
1339 	/* initialize package info */
1340 	state = ice_init_pkg_info(hw, pkg);
1341 	if (state)
1342 		return state;
1343 
1344 	/* before downloading the package, check package version for
1345 	 * compatibility with driver
1346 	 */
1347 	state = ice_chk_pkg_compat(hw, pkg, &seg);
1348 	if (state)
1349 		return state;
1350 
1351 	/* initialize package hints and then download package */
1352 	ice_init_pkg_hints(hw, seg);
1353 	state = ice_download_pkg(hw, pkg, seg);
1354 
1355 	if (state == ICE_DDP_PKG_ALREADY_LOADED) {
1356 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1357 		already_loaded = true;
1358 	}
1359 
1360 	/* Get information on the package currently loaded in HW, then make sure
1361 	 * the driver is compatible with this version.
1362 	 */
1363 	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
1364 		state = ice_get_pkg_info(hw);
1365 		if (!state)
1366 			state = ice_get_ddp_pkg_state(hw, already_loaded);
1367 	}
1368 
1369 	if (ice_is_init_pkg_successful(state)) {
1370 		hw->seg = seg;
1371 		/* on successful package download update other required
1372 		 * registers to support the package and fill HW tables
1373 		 * with package content.
1374 		 */
1375 		ice_init_pkg_regs(hw);
1376 		ice_fill_blk_tbls(hw);
1377 		ice_get_prof_index_max(hw);
1378 	} else {
1379 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1380 			  state);
1381 	}
1382 
1383 	return state;
1384 }
1385 
1386 /**
1387  * ice_copy_and_init_pkg - initialize/download a copy of the package
1388  * @hw: pointer to the hardware structure
1389  * @buf: pointer to the package buffer
1390  * @len: size of the package buffer
1391  *
1392  * This function copies the package buffer, and then calls ice_init_pkg() to
1393  * initialize the copied package contents.
1394  *
1395  * The copying is necessary if the package buffer supplied is constant, or if
1396  * the memory may disappear shortly after calling this function.
1397  *
1398  * If the package buffer resides in the data segment and can be modified, the
1399  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1400  *
1401  * However, if the package buffer needs to be copied first, such as when being
1402  * read from a file, the caller should use ice_copy_and_init_pkg().
1403  *
1404  * This function will first copy the package buffer, before calling
1405  * ice_init_pkg(). The caller is free to immediately destroy the original
1406  * package buffer, as the new copy will be managed by this function and
1407  * related routines.
1408  */
1409 enum ice_ddp_state
1410 ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1411 {
1412 	enum ice_ddp_state state;
1413 	u8 *buf_copy;
1414 
1415 	if (!buf || !len)
1416 		return ICE_DDP_PKG_ERR;
1417 
1418 	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1419 
1420 	state = ice_init_pkg(hw, buf_copy, len);
1421 	if (!ice_is_init_pkg_successful(state)) {
1422 		/* Free the copy, since we failed to initialize the package */
1423 		ice_free(hw, buf_copy);
1424 	} else {
1425 		/* Track the copied pkg so we can free it later */
1426 		hw->pkg_copy = buf_copy;
1427 		hw->pkg_size = len;
1428 	}
1429 
1430 	return state;
1431 }
1432 
1433 /**
1434  * ice_is_init_pkg_successful - check if DDP init was successful
1435  * @state: state of the DDP pkg after download
1436  */
1437 bool ice_is_init_pkg_successful(enum ice_ddp_state state)
1438 {
1439 	switch (state) {
1440 	case ICE_DDP_PKG_SUCCESS:
1441 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
1442 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
1443 		return true;
1444 	default:
1445 		return false;
1446 	}
1447 }
1448 
1449 /**
1450  * ice_pkg_buf_alloc
1451  * @hw: pointer to the HW structure
1452  *
1453  * Allocates a package buffer and returns a pointer to the buffer header.
1454  * Note: all package contents must be in Little Endian form.
1455  */
1456 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1457 {
1458 	struct ice_buf_build *bld;
1459 	struct ice_buf_hdr *buf;
1460 
1461 	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1462 	if (!bld)
1463 		return NULL;
1464 
1465 	buf = (struct ice_buf_hdr *)bld;
1466 	buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1467 					     section_entry));
1468 	return bld;
1469 }
1470 
1471 static bool ice_is_gtp_u_profile(u32 prof_idx)
1472 {
1473 	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
1474 		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) ||
1475 	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
1476 }
1477 
1478 static bool ice_is_gtp_c_profile(u32 prof_idx)
1479 {
1480 	switch (prof_idx) {
1481 	case ICE_PROFID_IPV4_GTPC_TEID:
1482 	case ICE_PROFID_IPV4_GTPC_NO_TEID:
1483 	case ICE_PROFID_IPV6_GTPC_TEID:
1484 	case ICE_PROFID_IPV6_GTPC_NO_TEID:
1485 		return true;
1486 	default:
1487 		return false;
1488 	}
1489 }
1490 
1491 /**
1492  * ice_get_sw_prof_type - determine switch profile type
1493  * @hw: pointer to the HW structure
1494  * @fv: pointer to the switch field vector
1495  * @prof_idx: profile index to check
1496  */
1497 static enum ice_prof_type
1498 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
1499 {
1500 	bool valid_prof = false;
1501 	u16 i;
1502 
1503 	if (ice_is_gtp_c_profile(prof_idx))
1504 		return ICE_PROF_TUN_GTPC;
1505 
1506 	if (ice_is_gtp_u_profile(prof_idx))
1507 		return ICE_PROF_TUN_GTPU;
1508 
1509 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1510 		if (fv->ew[i].off != ICE_NAN_OFFSET)
1511 			valid_prof = true;
1512 
1513 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1514 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1515 		    fv->ew[i].off == ICE_VNI_OFFSET)
1516 			return ICE_PROF_TUN_UDP;
1517 
1518 		/* GRE tunnel will have GRE protocol */
1519 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1520 			return ICE_PROF_TUN_GRE;
1521 	}
1522 
1523 	return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
1524 }
1525 
1526 /**
1527  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1528  * @hw: pointer to hardware structure
1529  * @req_profs: type of profiles requested
1530  * @bm: pointer to memory for returning the bitmap of field vectors
1531  */
1532 void
1533 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1534 		     ice_bitmap_t *bm)
1535 {
1536 	struct ice_pkg_enum state;
1537 	struct ice_seg *ice_seg;
1538 	struct ice_fv *fv;
1539 
1540 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1541 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1542 	ice_seg = hw->seg;
1543 	do {
1544 		enum ice_prof_type prof_type;
1545 		u32 offset;
1546 
1547 		fv = (struct ice_fv *)
1548 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1549 					   &offset, ice_sw_fv_handler);
1550 		ice_seg = NULL;
1551 
1552 		if (fv) {
1553 			/* Determine field vector type */
1554 			prof_type = ice_get_sw_prof_type(hw, fv, offset);
1555 
1556 			if (req_profs & prof_type)
1557 				ice_set_bit((u16)offset, bm);
1558 		}
1559 	} while (fv);
1560 }
1561 
1562 /**
1563  * ice_get_sw_fv_list
1564  * @hw: pointer to the HW structure
1565  * @lkups: lookup elements or match criteria for the advanced recipe, one
1566  *	   structure per protocol header
1567  * @bm: bitmap of field vectors to consider
1568  * @fv_list: Head of a list
1569  *
1570  * Finds all the field vector entries from switch block that contain
1571  * a given protocol ID and offset and returns a list of structures of type
1572  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1573  * definition and profile ID information
1574  * NOTE: The caller of the function is responsible for freeing the memory
1575  * allocated for every list entry.
1576  */
1577 int
1578 ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
1579 		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1580 {
1581 	struct ice_sw_fv_list_entry *fvl;
1582 	struct ice_sw_fv_list_entry *tmp;
1583 	struct ice_pkg_enum state;
1584 	struct ice_seg *ice_seg;
1585 	struct ice_fv *fv;
1586 	u32 offset;
1587 
1588 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1589 
1590 	if (!lkups->n_val_words || !hw->seg)
1591 		return ICE_ERR_PARAM;
1592 
1593 	ice_seg = hw->seg;
1594 	do {
1595 		u16 i;
1596 
1597 		fv = (struct ice_fv *)
1598 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1599 					   &offset, ice_sw_fv_handler);
1600 		if (!fv)
1601 			break;
1602 		ice_seg = NULL;
1603 
1604 		/* If field vector is not in the bitmap list, then skip this
1605 		 * profile.
1606 		 */
1607 		if (!ice_is_bit_set(bm, (u16)offset))
1608 			continue;
1609 
1610 		for (i = 0; i < lkups->n_val_words; i++) {
1611 			int j;
1612 
1613 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1614 				if (fv->ew[j].prot_id ==
1615 				    lkups->fv_words[i].prot_id &&
1616 				    fv->ew[j].off == lkups->fv_words[i].off)
1617 					break;
1618 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1619 				break;
1620 			if (i + 1 == lkups->n_val_words) {
1621 				fvl = (struct ice_sw_fv_list_entry *)
1622 					ice_malloc(hw, sizeof(*fvl));
1623 				if (!fvl)
1624 					goto err;
1625 				fvl->fv_ptr = fv;
1626 				fvl->profile_id = offset;
1627 				LIST_ADD(&fvl->list_entry, fv_list);
1628 				break;
1629 			}
1630 		}
1631 	} while (fv);
1632 	if (LIST_EMPTY(fv_list)) {
1633 		ice_warn(hw, "Required profiles not found in currently loaded DDP package");
1634 		return ICE_ERR_CFG;
1635 	}
1636 	return 0;
1637 
1638 err:
1639 	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1640 				 list_entry) {
1641 		LIST_DEL(&fvl->list_entry);
1642 		ice_free(hw, fvl);
1643 	}
1644 
1645 	return ICE_ERR_NO_MEMORY;
1646 }
1647 
1648 /**
1649  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1650  * @hw: pointer to hardware structure
1651  */
1652 void ice_init_prof_result_bm(struct ice_hw *hw)
1653 {
1654 	struct ice_pkg_enum state;
1655 	struct ice_seg *ice_seg;
1656 	struct ice_fv *fv;
1657 
1658 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1659 
1660 	if (!hw->seg)
1661 		return;
1662 
1663 	ice_seg = hw->seg;
1664 	do {
1665 		u32 off;
1666 		u16 i;
1667 
1668 		fv = (struct ice_fv *)
1669 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1670 					   &off, ice_sw_fv_handler);
1671 		ice_seg = NULL;
1672 		if (!fv)
1673 			break;
1674 
1675 		ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1676 				ICE_MAX_FV_WORDS);
1677 
1678 		/* Determine empty field vector indices, these can be
1679 		 * used for recipe results. Skip index 0, since it is
1680 		 * always used for Switch ID.
1681 		 */
1682 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1683 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1684 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1685 				ice_set_bit(i,
1686 					    hw->switch_info->prof_res_bm[off]);
1687 	} while (fv);
1688 }
1689 
1690 /**
1691  * ice_pkg_buf_free
1692  * @hw: pointer to the HW structure
1693  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1694  *
1695  * Frees a package buffer
1696  */
1697 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1698 {
1699 	ice_free(hw, bld);
1700 }
1701 
1702 /**
1703  * ice_pkg_buf_reserve_section
1704  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1705  * @count: the number of sections to reserve
1706  *
1707  * Reserves one or more section table entries in a package buffer. This routine
1708  * can be called multiple times as long as they are made before calling
1709  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1710  * is called once, the number of sections that can be allocated will not be able
1711  * to be increased; not using all reserved sections is fine, but this will
1712  * result in some wasted space in the buffer.
1713  * Note: all package contents must be in Little Endian form.
1714  */
1715 int
1716 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1717 {
1718 	struct ice_buf_hdr *buf;
1719 	u16 section_count;
1720 	u16 data_end;
1721 
1722 	if (!bld)
1723 		return ICE_ERR_PARAM;
1724 
1725 	buf = (struct ice_buf_hdr *)&bld->buf;
1726 
1727 	/* already an active section, can't increase table size */
1728 	section_count = LE16_TO_CPU(buf->section_count);
1729 	if (section_count > 0)
1730 		return ICE_ERR_CFG;
1731 
1732 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1733 		return ICE_ERR_CFG;
1734 	bld->reserved_section_table_entries += count;
1735 
1736 	data_end = LE16_TO_CPU(buf->data_end) +
1737 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1738 	buf->data_end = CPU_TO_LE16(data_end);
1739 
1740 	return 0;
1741 }
1742 
1743 /**
1744  * ice_pkg_buf_alloc_section
1745  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1746  * @type: the section type value
1747  * @size: the size of the section to reserve (in bytes)
1748  *
1749  * Reserves memory in the buffer for a section's content and updates the
1750  * buffers' status accordingly. This routine returns a pointer to the first
1751  * byte of the section start within the buffer, which is used to fill in the
1752  * section contents.
1753  * Note: all package contents must be in Little Endian form.
1754  */
1755 void *
1756 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1757 {
1758 	struct ice_buf_hdr *buf;
1759 	u16 sect_count;
1760 	u16 data_end;
1761 
1762 	if (!bld || !type || !size)
1763 		return NULL;
1764 
1765 	buf = (struct ice_buf_hdr *)&bld->buf;
1766 
1767 	/* check for enough space left in buffer */
1768 	data_end = LE16_TO_CPU(buf->data_end);
1769 
1770 	/* section start must align on 4 byte boundary */
1771 	data_end = ICE_ALIGN(data_end, 4);
1772 
1773 	if ((data_end + size) > ICE_MAX_S_DATA_END)
1774 		return NULL;
1775 
1776 	/* check for more available section table entries */
1777 	sect_count = LE16_TO_CPU(buf->section_count);
1778 	if (sect_count < bld->reserved_section_table_entries) {
1779 		void *section_ptr = ((u8 *)buf) + data_end;
1780 
1781 		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1782 		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1783 		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1784 
1785 		data_end += size;
1786 		buf->data_end = CPU_TO_LE16(data_end);
1787 
1788 		buf->section_count = CPU_TO_LE16(sect_count + 1);
1789 		return section_ptr;
1790 	}
1791 
1792 	/* no free section table entries */
1793 	return NULL;
1794 }
1795 
1796 /**
1797  * ice_pkg_buf_alloc_single_section
1798  * @hw: pointer to the HW structure
1799  * @type: the section type value
1800  * @size: the size of the section to reserve (in bytes)
1801  * @section: returns pointer to the section
1802  *
1803  * Allocates a package buffer with a single section.
1804  * Note: all package contents must be in Little Endian form.
1805  */
1806 struct ice_buf_build *
1807 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
1808 				 void **section)
1809 {
1810 	struct ice_buf_build *buf;
1811 
1812 	if (!section)
1813 		return NULL;
1814 
1815 	buf = ice_pkg_buf_alloc(hw);
1816 	if (!buf)
1817 		return NULL;
1818 
1819 	if (ice_pkg_buf_reserve_section(buf, 1))
1820 		goto ice_pkg_buf_alloc_single_section_err;
1821 
1822 	*section = ice_pkg_buf_alloc_section(buf, type, size);
1823 	if (!*section)
1824 		goto ice_pkg_buf_alloc_single_section_err;
1825 
1826 	return buf;
1827 
1828 ice_pkg_buf_alloc_single_section_err:
1829 	ice_pkg_buf_free(hw, buf);
1830 	return NULL;
1831 }
1832 
1833 /**
1834  * ice_pkg_buf_unreserve_section
1835  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1836  * @count: the number of sections to unreserve
1837  *
1838  * Unreserves one or more section table entries in a package buffer, releasing
1839  * space that can be used for section data. This routine can be called
1840  * multiple times as long as they are made before calling
1841  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1842  * is called once, the number of sections that can be allocated will not be able
1843  * to be increased; not using all reserved sections is fine, but this will
1844  * result in some wasted space in the buffer.
1845  * Note: all package contents must be in Little Endian form.
1846  */
1847 int
1848 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
1849 {
1850 	struct ice_buf_hdr *buf;
1851 	u16 section_count;
1852 	u16 data_end;
1853 
1854 	if (!bld)
1855 		return ICE_ERR_PARAM;
1856 
1857 	buf = (struct ice_buf_hdr *)&bld->buf;
1858 
1859 	/* already an active section, can't decrease table size */
1860 	section_count = LE16_TO_CPU(buf->section_count);
1861 	if (section_count > 0)
1862 		return ICE_ERR_CFG;
1863 
1864 	if (count > bld->reserved_section_table_entries)
1865 		return ICE_ERR_CFG;
1866 	bld->reserved_section_table_entries -= count;
1867 
1868 	data_end = LE16_TO_CPU(buf->data_end) -
1869 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1870 	buf->data_end = CPU_TO_LE16(data_end);
1871 
1872 	return 0;
1873 }
1874 
1875 /**
1876  * ice_pkg_buf_get_free_space
1877  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1878  *
1879  * Returns the number of free bytes remaining in the buffer.
1880  * Note: all package contents must be in Little Endian form.
1881  */
1882 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
1883 {
1884 	struct ice_buf_hdr *buf;
1885 
1886 	if (!bld)
1887 		return 0;
1888 
1889 	buf = (struct ice_buf_hdr *)&bld->buf;
1890 	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
1891 }
1892 
1893 /**
1894  * ice_pkg_buf_get_active_sections
1895  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1896  *
1897  * Returns the number of active sections. Before using the package buffer
1898  * in an update package command, the caller should make sure that there is at
1899  * least one active section - otherwise, the buffer is not legal and should
1900  * not be used.
1901  * Note: all package contents must be in Little Endian form.
1902  */
1903 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1904 {
1905 	struct ice_buf_hdr *buf;
1906 
1907 	if (!bld)
1908 		return 0;
1909 
1910 	buf = (struct ice_buf_hdr *)&bld->buf;
1911 	return LE16_TO_CPU(buf->section_count);
1912 }
1913 
1914 /**
1915  * ice_pkg_buf
1916  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1917  *
1918  * Return a pointer to the buffer's header
1919  */
1920 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1921 {
1922 	if (bld)
1923 		return &bld->buf;
1924 
1925 	return NULL;
1926 }
1927 
1928 /**
1929  * ice_find_buf_table
1930  * @ice_seg: pointer to the ice segment
1931  *
1932  * Returns the address of the buffer table within the ice segment.
1933  */
1934 struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
1935 {
1936 	struct ice_nvm_table *nvms;
1937 
1938 	nvms = (struct ice_nvm_table *)
1939 		(ice_seg->device_table +
1940 		 LE32_TO_CPU(ice_seg->device_table_count));
1941 
1942 	return (_FORCE_ struct ice_buf_table *)
1943 		(nvms->vers + LE32_TO_CPU(nvms->table_count));
1944 }
1945 
1946 /**
1947  * ice_pkg_val_buf
1948  * @buf: pointer to the ice buffer
1949  *
1950  * This helper function validates a buffer's header.
1951  */
1952 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
1953 {
1954 	struct ice_buf_hdr *hdr;
1955 	u16 section_count;
1956 	u16 data_end;
1957 
1958 	hdr = (struct ice_buf_hdr *)buf->buf;
1959 	/* verify data */
1960 	section_count = LE16_TO_CPU(hdr->section_count);
1961 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
1962 		return NULL;
1963 
1964 	data_end = LE16_TO_CPU(hdr->data_end);
1965 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
1966 		return NULL;
1967 
1968 	return hdr;
1969 }
1970 
1971 /**
1972  * ice_pkg_enum_buf
1973  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
1974  * @state: pointer to the enum state
1975  *
1976  * This function will enumerate all the buffers in the ice segment. The first
1977  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
1978  * ice_seg is set to NULL which continues the enumeration. When the function
1979  * returns a NULL pointer, then the end of the buffers has been reached, or an
1980  * unexpected value has been detected (for example an invalid section count or
1981  * an invalid buffer end value).
1982  */
1983 struct ice_buf_hdr *
1984 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
1985 {
1986 	if (ice_seg) {
1987 		state->buf_table = ice_find_buf_table(ice_seg);
1988 		if (!state->buf_table)
1989 			return NULL;
1990 
1991 		state->buf_idx = 0;
1992 		return ice_pkg_val_buf(state->buf_table->buf_array);
1993 	}
1994 
1995 	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
1996 		return ice_pkg_val_buf(state->buf_table->buf_array +
1997 				       state->buf_idx);
1998 	else
1999 		return NULL;
2000 }
2001 
2002 /**
2003  * ice_pkg_advance_sect
2004  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2005  * @state: pointer to the enum state
2006  *
2007  * This helper function will advance the section within the ice segment,
2008  * also advancing the buffer if needed.
2009  */
2010 bool
2011 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
2012 {
2013 	if (!ice_seg && !state->buf)
2014 		return false;
2015 
2016 	if (!ice_seg && state->buf)
2017 		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
2018 			return true;
2019 
2020 	state->buf = ice_pkg_enum_buf(ice_seg, state);
2021 	if (!state->buf)
2022 		return false;
2023 
2024 	/* start of new buffer, reset section index */
2025 	state->sect_idx = 0;
2026 	return true;
2027 }
2028 
2029 /**
2030  * ice_pkg_enum_section
2031  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2032  * @state: pointer to the enum state
2033  * @sect_type: section type to enumerate
2034  *
2035  * This function will enumerate all the sections of a particular type in the
2036  * ice segment. The first call is made with the ice_seg parameter non-NULL;
2037  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2038  * When the function returns a NULL pointer, then the end of the matching
2039  * sections has been reached.
2040  */
2041 void *
2042 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2043 		     u32 sect_type)
2044 {
2045 	u16 offset, size;
2046 
2047 	if (ice_seg)
2048 		state->type = sect_type;
2049 
2050 	if (!ice_pkg_advance_sect(ice_seg, state))
2051 		return NULL;
2052 
2053 	/* scan for next matching section */
2054 	while (state->buf->section_entry[state->sect_idx].type !=
2055 	       CPU_TO_LE32(state->type))
2056 		if (!ice_pkg_advance_sect(NULL, state))
2057 			return NULL;
2058 
2059 	/* validate section */
2060 	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2061 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
2062 		return NULL;
2063 
2064 	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
2065 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
2066 		return NULL;
2067 
2068 	/* make sure the section fits in the buffer */
2069 	if (offset + size > ICE_PKG_BUF_SIZE)
2070 		return NULL;
2071 
2072 	state->sect_type =
2073 		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
2074 
2075 	/* calc pointer to this section */
2076 	state->sect = ((u8 *)state->buf) +
2077 		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2078 
2079 	return state->sect;
2080 }
2081 
2082 /**
2083  * ice_pkg_enum_entry
2084  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2085  * @state: pointer to the enum state
2086  * @sect_type: section type to enumerate
2087  * @offset: pointer to variable that receives the offset in the table (optional)
2088  * @handler: function that handles access to the entries into the section type
2089  *
2090  * This function will enumerate all the entries in particular section type in
2091  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
2092  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2093  * When the function returns a NULL pointer, then the end of the entries has
2094  * been reached.
2095  *
2096  * Since each section may have a different header and entry size, the handler
2097  * function is needed to determine the number and location entries in each
2098  * section.
2099  *
2100  * The offset parameter is optional, but should be used for sections that
2101  * contain an offset for each section table. For such cases, the section handler
2102  * function must return the appropriate offset + index to give the absolution
2103  * offset for each entry. For example, if the base for a section's header
2104  * indicates a base offset of 10, and the index for the entry is 2, then
2105  * section handler function should set the offset to 10 + 2 = 12.
2106  */
2107 void *
2108 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2109 		   u32 sect_type, u32 *offset,
2110 		   void *(*handler)(u32 sect_type, void *section,
2111 				    u32 index, u32 *offset))
2112 {
2113 	void *entry;
2114 
2115 	if (ice_seg) {
2116 		if (!handler)
2117 			return NULL;
2118 
2119 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
2120 			return NULL;
2121 
2122 		state->entry_idx = 0;
2123 		state->handler = handler;
2124 	} else {
2125 		state->entry_idx++;
2126 	}
2127 
2128 	if (!state->handler)
2129 		return NULL;
2130 
2131 	/* get entry */
2132 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
2133 			       offset);
2134 	if (!entry) {
2135 		/* end of a section, look for another section of this type */
2136 		if (!ice_pkg_enum_section(NULL, state, 0))
2137 			return NULL;
2138 
2139 		state->entry_idx = 0;
2140 		entry = state->handler(state->sect_type, state->sect,
2141 				       state->entry_idx, offset);
2142 	}
2143 
2144 	return entry;
2145 }
2146 
2147 /**
2148  * ice_boost_tcam_handler
2149  * @sect_type: section type
2150  * @section: pointer to section
2151  * @index: index of the boost TCAM entry to be returned
2152  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
2153  *
2154  * This is a callback function that can be passed to ice_pkg_enum_entry.
2155  * Handles enumeration of individual boost TCAM entries.
2156  */
2157 static void *
2158 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
2159 {
2160 	struct ice_boost_tcam_section *boost;
2161 
2162 	if (!section)
2163 		return NULL;
2164 
2165 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
2166 		return NULL;
2167 
2168 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
2169 		return NULL;
2170 
2171 	if (offset)
2172 		*offset = 0;
2173 
2174 	boost = (struct ice_boost_tcam_section *)section;
2175 	if (index >= LE16_TO_CPU(boost->count))
2176 		return NULL;
2177 
2178 	return boost->tcam + index;
2179 }
2180 
2181 /**
2182  * ice_find_boost_entry
2183  * @ice_seg: pointer to the ice segment (non-NULL)
2184  * @addr: Boost TCAM address of entry to search for
2185  * @entry: returns pointer to the entry
2186  *
2187  * Finds a particular Boost TCAM entry and returns a pointer to that entry
2188  * if it is found. The ice_seg parameter must not be NULL since the first call
2189  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
2190  */
2191 static int
2192 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
2193 		     struct ice_boost_tcam_entry **entry)
2194 {
2195 	struct ice_boost_tcam_entry *tcam;
2196 	struct ice_pkg_enum state;
2197 
2198 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2199 
2200 	if (!ice_seg)
2201 		return ICE_ERR_PARAM;
2202 
2203 	do {
2204 		tcam = (struct ice_boost_tcam_entry *)
2205 		       ice_pkg_enum_entry(ice_seg, &state,
2206 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
2207 					  ice_boost_tcam_handler);
2208 		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
2209 			*entry = tcam;
2210 			return 0;
2211 		}
2212 
2213 		ice_seg = NULL;
2214 	} while (tcam);
2215 
2216 	*entry = NULL;
2217 	return ICE_ERR_CFG;
2218 }
2219 
2220 /**
2221  * ice_init_pkg_hints
2222  * @hw: pointer to the HW structure
2223  * @ice_seg: pointer to the segment of the package scan (non-NULL)
2224  *
2225  * This function will scan the package and save off relevant information
2226  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
2227  * since the first call to ice_enum_labels requires a pointer to an actual
2228  * ice_seg structure.
2229  */
2230 void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
2231 {
2232 	struct ice_pkg_enum state;
2233 	char *label_name;
2234 	u16 val;
2235 	int i;
2236 
2237 	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
2238 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2239 
2240 	if (!ice_seg)
2241 		return;
2242 
2243 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
2244 				     &val);
2245 
2246 	while (label_name) {
2247 /* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
2248 		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
2249 			/* check for a tunnel entry */
2250 			ice_add_tunnel_hint(hw, label_name, val);
2251 
2252 		label_name = ice_enum_labels(NULL, 0, &state, &val);
2253 	}
2254 
2255 	/* Cache the appropriate boost TCAM entry pointers for tunnels */
2256 	for (i = 0; i < hw->tnl.count; i++) {
2257 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
2258 				     &hw->tnl.tbl[i].boost_entry);
2259 		if (hw->tnl.tbl[i].boost_entry)
2260 			hw->tnl.tbl[i].valid = true;
2261 	}
2262 }
2263 
2264 /**
2265  * ice_acquire_global_cfg_lock
2266  * @hw: pointer to the HW structure
2267  * @access: access type (read or write)
2268  *
2269  * This function will request ownership of the global config lock for reading
2270  * or writing of the package. When attempting to obtain write access, the
2271  * caller must check for the following two return values:
2272  *
2273  * 0                  - Means the caller has acquired the global config lock
2274  *                      and can perform writing of the package.
2275  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
2276  *                      package or has found that no update was necessary; in
2277  *                      this case, the caller can just skip performing any
2278  *                      update of the package.
2279  */
2280 int
2281 ice_acquire_global_cfg_lock(struct ice_hw *hw,
2282 			    enum ice_aq_res_access_type access)
2283 {
2284 	int status;
2285 
2286 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
2287 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2288 
2289 	if (status == ICE_ERR_AQ_NO_WORK)
2290 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
2291 
2292 	return status;
2293 }
2294 
2295 /**
2296  * ice_release_global_cfg_lock
2297  * @hw: pointer to the HW structure
2298  *
2299  * This function will release the global config lock.
2300  */
2301 void ice_release_global_cfg_lock(struct ice_hw *hw)
2302 {
2303 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
2304 }
2305 
2306 /**
2307  * ice_acquire_change_lock
2308  * @hw: pointer to the HW structure
2309  * @access: access type (read or write)
2310  *
2311  * This function will request ownership of the change lock.
2312  */
2313 int
2314 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
2315 {
2316 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
2317 			       ICE_CHANGE_LOCK_TIMEOUT);
2318 }
2319 
2320 /**
2321  * ice_release_change_lock
2322  * @hw: pointer to the HW structure
2323  *
2324  * This function will release the change lock using the proper Admin Command.
2325  */
2326 void ice_release_change_lock(struct ice_hw *hw)
2327 {
2328 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
2329 }
2330 
2331 /**
2332  * ice_is_get_tx_sched_new_format
2333  * @hw: pointer to the HW struct
2334  *
2335  * Determines if the new format for the Tx scheduler get api is supported
2336  */
2337 static bool
2338 ice_is_get_tx_sched_new_format(struct ice_hw *hw)
2339 {
2340 	if (ice_is_e830(hw))
2341 		return true;
2342 	if (ice_is_e825c(hw))
2343 		return true;
2344 	return false;
2345 }
2346 
2347 /**
2348  * ice_get_set_tx_topo - get or set tx topology
2349  * @hw: pointer to the HW struct
2350  * @buf: pointer to tx topology buffer
2351  * @buf_size: buffer size
2352  * @cd: pointer to command details structure or NULL
2353  * @flags: pointer to descriptor flags
2354  * @set: 0-get, 1-set topology
2355  *
2356  * The function will get or set tx topology
2357  */
2358 static int
2359 ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
2360 		    struct ice_sq_cd *cd, u8 *flags, bool set)
2361 {
2362 	struct ice_aqc_get_set_tx_topo *cmd;
2363 	struct ice_aq_desc desc;
2364 	int status;
2365 
2366 	cmd = &desc.params.get_set_tx_topo;
2367 	if (set) {
2368 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
2369 		cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
2370 		/* requested to update a new topology, not a default topolgy */
2371 		if (buf)
2372 			cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
2373 					  ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
2374 
2375 		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2376 	} else {
2377 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
2378 		cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
2379 
2380 		if (!ice_is_get_tx_sched_new_format(hw))
2381 			desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2382 	}
2383 
2384 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2385 	if (status)
2386 		return status;
2387 	/* read the return flag values (first byte) for get operation */
2388 	if (!set && flags)
2389 		*flags = desc.params.get_set_tx_topo.set_flags;
2390 
2391 	return 0;
2392 }
2393 
2394 /**
2395  * ice_cfg_tx_topo - Initialize new tx topology if available
2396  * @hw: pointer to the HW struct
2397  * @buf: pointer to Tx topology buffer
2398  * @len: buffer size
2399  *
2400  * The function will apply the new Tx topology from the package buffer
2401  * if available.
2402  */
2403 int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
2404 {
2405 	u8 *current_topo, *new_topo = NULL;
2406 	struct ice_run_time_cfg_seg *seg;
2407 	struct ice_buf_hdr *section;
2408 	struct ice_pkg_hdr *pkg_hdr;
2409 	enum ice_ddp_state state;
2410 	u16 i, size = 0, offset;
2411 	u32 reg = 0;
2412 	int status;
2413 	u8 flags;
2414 
2415 	if (!buf || !len)
2416 		return ICE_ERR_PARAM;
2417 
2418 	/* Does FW support new Tx topology mode ? */
2419 	if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
2420 		ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
2421 		return ICE_ERR_NOT_SUPPORTED;
2422 	}
2423 
2424 	current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2425 	if (!current_topo)
2426 		return ICE_ERR_NO_MEMORY;
2427 
2428 	/* get the current Tx topology */
2429 	status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
2430 				     &flags, false);
2431 	ice_free(hw, current_topo);
2432 
2433 	if (status) {
2434 		ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
2435 		return status;
2436 	}
2437 
2438 	/* Is default topology already applied ? */
2439 	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2440 	    hw->num_tx_sched_layers == 9) {
2441 		ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
2442 		/* Already default topology is loaded */
2443 		return ICE_ERR_ALREADY_EXISTS;
2444 	}
2445 
2446 	/* Is new topology already applied ? */
2447 	if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2448 	    hw->num_tx_sched_layers == 5) {
2449 		ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
2450 		/* Already new topology is loaded */
2451 		return ICE_ERR_ALREADY_EXISTS;
2452 	}
2453 
2454 	/* Is set topology issued already ? */
2455 	if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
2456 		ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n");
2457 		/* add a small delay before exiting */
2458 		for (i = 0; i < 20; i++)
2459 			ice_msec_delay(100, true);
2460 		return ICE_ERR_ALREADY_EXISTS;
2461 	}
2462 
2463 	/* Change the topology from new to default (5 to 9) */
2464 	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2465 	    hw->num_tx_sched_layers == 5) {
2466 		ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
2467 		goto update_topo;
2468 	}
2469 
2470 	pkg_hdr = (struct ice_pkg_hdr *)buf;
2471 	state = ice_verify_pkg(pkg_hdr, len);
2472 	if (state) {
2473 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
2474 			  state);
2475 		return ICE_ERR_CFG;
2476 	}
2477 
2478 	/* find run time configuration segment */
2479 	seg = (struct ice_run_time_cfg_seg *)
2480 		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
2481 	if (!seg) {
2482 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
2483 		return ICE_ERR_CFG;
2484 	}
2485 
2486 	if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
2487 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
2488 			  seg->buf_table.buf_count);
2489 		return ICE_ERR_CFG;
2490 	}
2491 
2492 	section = ice_pkg_val_buf(seg->buf_table.buf_array);
2493 
2494 	if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
2495 		ICE_SID_TX_5_LAYER_TOPO) {
2496 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
2497 		return ICE_ERR_CFG;
2498 	}
2499 
2500 	size = LE16_TO_CPU(section->section_entry[0].size);
2501 	offset = LE16_TO_CPU(section->section_entry[0].offset);
2502 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
2503 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
2504 		return ICE_ERR_CFG;
2505 	}
2506 
2507 	/* make sure the section fits in the buffer */
2508 	if (offset + size > ICE_PKG_BUF_SIZE) {
2509 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
2510 		return ICE_ERR_CFG;
2511 	}
2512 
2513 	/* Get the new topology buffer */
2514 	new_topo = ((u8 *)section) + offset;
2515 
2516 update_topo:
2517 	/* acquire global lock to make sure that set topology issued
2518 	 * by one PF
2519 	 */
2520 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
2521 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2522 	if (status) {
2523 		ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
2524 		return status;
2525 	}
2526 
2527 	/* check reset was triggered already or not */
2528 	reg = rd32(hw, GLGEN_RSTAT);
2529 	if (reg & GLGEN_RSTAT_DEVSTATE_M) {
2530 		/* Reset is in progress, re-init the hw again */
2531 		ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
2532 		ice_check_reset(hw);
2533 		return 0;
2534 	}
2535 
2536 	/* set new topology */
2537 	status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
2538 	if (status) {
2539 		ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
2540 		return status;
2541 	}
2542 
2543 	/* new topology is updated, delay 1 second before issuing the CORRER */
2544 	for (i = 0; i < 10; i++)
2545 		ice_msec_delay(100, true);
2546 	ice_reset(hw, ICE_RESET_CORER);
2547 	/* CORER will clear the global lock, so no explicit call
2548 	 * required for release
2549 	 */
2550 	return 0;
2551 }
2552