xref: /freebsd/sys/dev/ice/ice_ddp_common.c (revision 19fae0f66023a97a9b464b3beeeabb2081f575b3)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_ddp_common.h"
34 #include "ice_type.h"
35 #include "ice_common.h"
36 #include "ice_sched.h"
37 
38 /**
39  * ice_aq_download_pkg
40  * @hw: pointer to the hardware structure
41  * @pkg_buf: the package buffer to transfer
42  * @buf_size: the size of the package buffer
43  * @last_buf: last buffer indicator
44  * @error_offset: returns error offset
45  * @error_info: returns error information
46  * @cd: pointer to command details structure or NULL
47  *
48  * Download Package (0x0C40)
49  */
50 static enum ice_status
51 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
52 		    u16 buf_size, bool last_buf, u32 *error_offset,
53 		    u32 *error_info, struct ice_sq_cd *cd)
54 {
55 	struct ice_aqc_download_pkg *cmd;
56 	struct ice_aq_desc desc;
57 	enum ice_status status;
58 
59 	if (error_offset)
60 		*error_offset = 0;
61 	if (error_info)
62 		*error_info = 0;
63 
64 	cmd = &desc.params.download_pkg;
65 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
66 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
67 
68 	if (last_buf)
69 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
70 
71 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
72 	if (status == ICE_ERR_AQ_ERROR) {
73 		/* Read error from buffer only when the FW returned an error */
74 		struct ice_aqc_download_pkg_resp *resp;
75 
76 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
77 		if (error_offset)
78 			*error_offset = LE32_TO_CPU(resp->error_offset);
79 		if (error_info)
80 			*error_info = LE32_TO_CPU(resp->error_info);
81 	}
82 
83 	return status;
84 }
85 
86 /**
87  * ice_aq_upload_section
88  * @hw: pointer to the hardware structure
89  * @pkg_buf: the package buffer which will receive the section
90  * @buf_size: the size of the package buffer
91  * @cd: pointer to command details structure or NULL
92  *
93  * Upload Section (0x0C41)
94  */
95 enum ice_status
96 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
97 		      u16 buf_size, struct ice_sq_cd *cd)
98 {
99 	struct ice_aq_desc desc;
100 
101 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
102 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
103 
104 	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
105 }
106 
107 /**
108  * ice_aq_update_pkg
109  * @hw: pointer to the hardware structure
110  * @pkg_buf: the package cmd buffer
111  * @buf_size: the size of the package cmd buffer
112  * @last_buf: last buffer indicator
113  * @error_offset: returns error offset
114  * @error_info: returns error information
115  * @cd: pointer to command details structure or NULL
116  *
117  * Update Package (0x0C42)
118  */
119 static enum ice_status
120 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
121 		  bool last_buf, u32 *error_offset, u32 *error_info,
122 		  struct ice_sq_cd *cd)
123 {
124 	struct ice_aqc_download_pkg *cmd;
125 	struct ice_aq_desc desc;
126 	enum ice_status status;
127 
128 	if (error_offset)
129 		*error_offset = 0;
130 	if (error_info)
131 		*error_info = 0;
132 
133 	cmd = &desc.params.download_pkg;
134 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
135 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
136 
137 	if (last_buf)
138 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
139 
140 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
141 	if (status == ICE_ERR_AQ_ERROR) {
142 		/* Read error from buffer only when the FW returned an error */
143 		struct ice_aqc_download_pkg_resp *resp;
144 
145 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
146 		if (error_offset)
147 			*error_offset = LE32_TO_CPU(resp->error_offset);
148 		if (error_info)
149 			*error_info = LE32_TO_CPU(resp->error_info);
150 	}
151 
152 	return status;
153 }
154 
155 /**
156  * ice_find_seg_in_pkg
157  * @hw: pointer to the hardware structure
158  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
159  * @pkg_hdr: pointer to the package header to be searched
160  *
161  * This function searches a package file for a particular segment type. On
162  * success it returns a pointer to the segment header, otherwise it will
163  * return NULL.
164  */
165 struct ice_generic_seg_hdr *
166 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
167 		    struct ice_pkg_hdr *pkg_hdr)
168 {
169 	u32 i;
170 
171 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
172 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
173 		  pkg_hdr->pkg_format_ver.update,
174 		  pkg_hdr->pkg_format_ver.draft);
175 
176 	/* Search all package segments for the requested segment type */
177 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
178 		struct ice_generic_seg_hdr *seg;
179 
180 		seg = (struct ice_generic_seg_hdr *)
181 			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
182 
183 		if (LE32_TO_CPU(seg->seg_type) == seg_type)
184 			return seg;
185 	}
186 
187 	return NULL;
188 }
189 
190 /**
191  * ice_get_pkg_seg_by_idx
192  * @pkg_hdr: pointer to the package header to be searched
193  * @idx: index of segment
194  */
195 static struct ice_generic_seg_hdr *
196 ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
197 {
198 	struct ice_generic_seg_hdr *seg = NULL;
199 
200 	if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
201 		seg = (struct ice_generic_seg_hdr *)
202 			((u8 *)pkg_hdr +
203 			 LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
204 
205 	return seg;
206 }
207 
208 /**
209  * ice_is_signing_seg_at_idx - determine if segment is a signing segment
210  * @pkg_hdr: pointer to package header
211  * @idx: segment index
212  */
213 static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
214 {
215 	struct ice_generic_seg_hdr *seg;
216 	bool retval = false;
217 
218 	seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
219 	if (seg)
220 		retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING;
221 
222 	return retval;
223 }
224 
225 /**
226  * ice_is_signing_seg_type_at_idx
227  * @pkg_hdr: pointer to package header
228  * @idx: segment index
229  * @seg_id: segment id that is expected
230  * @sign_type: signing type
231  *
232  * Determine if a segment is a signing segment of the correct type
233  */
234 static bool
235 ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
236 			       u32 seg_id, u32 sign_type)
237 {
238 	bool result = false;
239 
240 	if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
241 		struct ice_sign_seg *seg;
242 
243 		seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
244 								    idx);
245 		if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
246 		    LE32_TO_CPU(seg->sign_type) == sign_type)
247 			result = true;
248 	}
249 
250 	return result;
251 }
252 
253 /**
254  * ice_update_pkg_no_lock
255  * @hw: pointer to the hardware structure
256  * @bufs: pointer to an array of buffers
257  * @count: the number of buffers in the array
258  */
259 enum ice_status
260 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
261 {
262 	enum ice_status status = ICE_SUCCESS;
263 	u32 i;
264 
265 	for (i = 0; i < count; i++) {
266 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
267 		bool last = ((i + 1) == count);
268 		u32 offset, info;
269 
270 		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
271 					   last, &offset, &info, NULL);
272 
273 		if (status) {
274 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
275 				  status, offset, info);
276 			break;
277 		}
278 	}
279 
280 	return status;
281 }
282 
283 /**
284  * ice_update_pkg
285  * @hw: pointer to the hardware structure
286  * @bufs: pointer to an array of buffers
287  * @count: the number of buffers in the array
288  *
289  * Obtains change lock and updates package.
290  */
291 enum ice_status
292 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
293 {
294 	enum ice_status status;
295 
296 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
297 	if (status)
298 		return status;
299 
300 	status = ice_update_pkg_no_lock(hw, bufs, count);
301 
302 	ice_release_change_lock(hw);
303 
304 	return status;
305 }
306 
307 static enum ice_ddp_state
308 ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
309 {
310 	switch (aq_err) {
311 	case ICE_AQ_RC_ENOSEC:
312 		return ICE_DDP_PKG_NO_SEC_MANIFEST;
313 	case ICE_AQ_RC_EBADSIG:
314 		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
315 	case ICE_AQ_RC_ESVN:
316 		return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
317 	case ICE_AQ_RC_EBADMAN:
318 		return ICE_DDP_PKG_MANIFEST_INVALID;
319 	case ICE_AQ_RC_EBADBUF:
320 		return ICE_DDP_PKG_BUFFER_INVALID;
321 	default:
322 		return ICE_DDP_PKG_ERR;
323 	}
324 }
325 
326 /**
327  * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
328  * @buf: pointer to buffer header
329  */
330 static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
331 {
332 	bool metadata = false;
333 
334 	if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
335 		metadata = true;
336 
337 	return metadata;
338 }
339 
340 /**
341  * ice_is_last_download_buffer
342  * @buf: pointer to current buffer header
343  * @idx: index of the buffer in the current sequence
344  * @count: the buffer count in the current sequence
345  *
346  * Note: this routine should only be called if the buffer is not the last buffer
347  */
348 static bool
349 ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
350 {
351 	bool last = ((idx + 1) == count);
352 
353 	/* A set metadata flag in the next buffer will signal that the current
354 	 * buffer will be the last buffer downloaded
355 	 */
356 	if (!last) {
357 		struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
358 
359 		last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
360 	}
361 
362 	return last;
363 }
364 
365 /**
366  * ice_dwnld_cfg_bufs_no_lock
367  * @hw: pointer to the hardware structure
368  * @bufs: pointer to an array of buffers
369  * @start: buffer index of first buffer to download
370  * @count: the number of buffers to download
371  * @indicate_last: if true, then set last buffer flag on last buffer download
372  *
373  * Downloads package configuration buffers to the firmware. Metadata buffers
374  * are skipped, and the first metadata buffer found indicates that the rest
375  * of the buffers are all metadata buffers.
376  */
377 static enum ice_ddp_state
378 ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
379 			   u32 count, bool indicate_last)
380 {
381 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
382 	struct ice_buf_hdr *bh;
383 	enum ice_aq_err err;
384 	u32 offset, info, i;
385 
386 	if (!bufs || !count)
387 		return ICE_DDP_PKG_ERR;
388 
389 	/* If the first buffer's first section has its metadata bit set
390 	 * then there are no buffers to be downloaded, and the operation is
391 	 * considered a success.
392 	 */
393 	bh = (struct ice_buf_hdr *)(bufs + start);
394 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
395 		return ICE_DDP_PKG_SUCCESS;
396 
397 	for (i = 0; i < count; i++) {
398 		enum ice_status status;
399 		bool last = false;
400 
401 		bh = (struct ice_buf_hdr *)(bufs + start + i);
402 
403 		if (indicate_last)
404 			last = ice_is_last_download_buffer(bh, i, count);
405 
406 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
407 					     &offset, &info, NULL);
408 
409 		/* Save AQ status from download package */
410 		if (status) {
411 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
412 				  status, offset, info);
413 			err = hw->adminq.sq_last_status;
414 			state = ice_map_aq_err_to_ddp_state(err);
415 			break;
416 		}
417 
418 		if (last)
419 			break;
420 	}
421 
422 	return state;
423 }
424 
425 /**
426  * ice_aq_get_pkg_info_list
427  * @hw: pointer to the hardware structure
428  * @pkg_info: the buffer which will receive the information list
429  * @buf_size: the size of the pkg_info information buffer
430  * @cd: pointer to command details structure or NULL
431  *
432  * Get Package Info List (0x0C43)
433  */
434 static enum ice_status
435 ice_aq_get_pkg_info_list(struct ice_hw *hw,
436 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
437 			 u16 buf_size, struct ice_sq_cd *cd)
438 {
439 	struct ice_aq_desc desc;
440 
441 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
442 
443 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
444 }
445 
446 /**
447  * ice_has_signing_seg - determine if package has a signing segment
448  * @hw: pointer to the hardware structure
449  * @pkg_hdr: pointer to the driver's package hdr
450  */
451 static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
452 {
453 	struct ice_generic_seg_hdr *seg_hdr;
454 
455 	seg_hdr = (struct ice_generic_seg_hdr *)
456 		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
457 
458 	return seg_hdr ? true : false;
459 }
460 
461 /**
462  * ice_get_pkg_segment_id - get correct package segment id, based on device
463  * @mac_type: MAC type of the device
464  */
465 static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
466 {
467 	u32 seg_id;
468 
469 	switch (mac_type) {
470 	case ICE_MAC_GENERIC:
471 	case ICE_MAC_GENERIC_3K:
472 	default:
473 		seg_id = SEGMENT_TYPE_ICE_E810;
474 		break;
475 	}
476 
477 	return seg_id;
478 }
479 
480 /**
481  * ice_get_pkg_sign_type - get package segment sign type, based on device
482  * @mac_type: MAC type of the device
483  */
484 static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
485 {
486 	u32 sign_type;
487 
488 	switch (mac_type) {
489 	case ICE_MAC_GENERIC_3K:
490 		sign_type = SEGMENT_SIGN_TYPE_RSA3K;
491 		break;
492 	case ICE_MAC_GENERIC:
493 	default:
494 		sign_type = SEGMENT_SIGN_TYPE_RSA2K;
495 		break;
496 	}
497 
498 	return sign_type;
499 }
500 
501 /**
502  * ice_get_signing_req - get correct package requirements, based on device
503  * @hw: pointer to the hardware structure
504  */
505 static void ice_get_signing_req(struct ice_hw *hw)
506 {
507 	hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
508 	hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
509 }
510 
511 /**
512  * ice_download_pkg_sig_seg - download a signature segment
513  * @hw: pointer to the hardware structure
514  * @seg: pointer to signature segment
515  */
516 static enum ice_ddp_state
517 ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
518 {
519 	enum ice_ddp_state state;
520 
521 	state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
522 					   LE32_TO_CPU(seg->buf_tbl.buf_count),
523 					   false);
524 
525 	return state;
526 }
527 
528 /**
529  * ice_download_pkg_config_seg - download a config segment
530  * @hw: pointer to the hardware structure
531  * @pkg_hdr: pointer to package header
532  * @idx: segment index
533  * @start: starting buffer
534  * @count: buffer count
535  *
536  * Note: idx must reference a ICE segment
537  */
538 static enum ice_ddp_state
539 ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
540 			    u32 idx, u32 start, u32 count)
541 {
542 	struct ice_buf_table *bufs;
543 	enum ice_ddp_state state;
544 	struct ice_seg *seg;
545 	u32 buf_count;
546 
547 	seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
548 	if (!seg)
549 		return ICE_DDP_PKG_ERR;
550 
551 	bufs = ice_find_buf_table(seg);
552 	buf_count = LE32_TO_CPU(bufs->buf_count);
553 
554 	if (start >= buf_count || start + count > buf_count)
555 		return ICE_DDP_PKG_ERR;
556 
557 	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
558 					   true);
559 
560 	return state;
561 }
562 
563 /**
564  * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
565  * @hw: pointer to the hardware structure
566  * @pkg_hdr: pointer to package header
567  * @idx: segment index (must be a signature segment)
568  *
569  * Note: idx must reference a signature segment
570  */
571 static enum ice_ddp_state
572 ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
573 			    u32 idx)
574 {
575 	enum ice_ddp_state state;
576 	struct ice_sign_seg *seg;
577 	u32 conf_idx;
578 	u32 start;
579 	u32 count;
580 
581 	seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
582 	if (!seg) {
583 		state = ICE_DDP_PKG_ERR;
584 		goto exit;
585 	}
586 
587 	conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
588 	start = LE32_TO_CPU(seg->signed_buf_start);
589 	count = LE32_TO_CPU(seg->signed_buf_count);
590 
591 	state = ice_download_pkg_sig_seg(hw, seg);
592 	if (state)
593 		goto exit;
594 
595 	state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
596 					    count);
597 
598 exit:
599 	return state;
600 }
601 
602 /**
603  * ice_match_signing_seg - determine if a matching signing segment exists
604  * @pkg_hdr: pointer to package header
605  * @seg_id: segment id that is expected
606  * @sign_type: signing type
607  */
608 static bool
609 ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
610 {
611 	bool match = false;
612 	u32 i;
613 
614 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
615 		if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
616 						   sign_type)) {
617 			match = true;
618 			break;
619 		}
620 	}
621 
622 	return match;
623 }
624 
625 /**
626  * ice_post_dwnld_pkg_actions - perform post download package actions
627  * @hw: pointer to the hardware structure
628  */
629 static enum ice_ddp_state
630 ice_post_dwnld_pkg_actions(struct ice_hw *hw)
631 {
632 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
633 	enum ice_status status;
634 
635 	status = ice_set_vlan_mode(hw);
636 	if (status) {
637 		ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
638 			  status);
639 		state = ICE_DDP_PKG_ERR;
640 	}
641 
642 	return state;
643 }
644 
645 /**
646  * ice_download_pkg_with_sig_seg - download package using signature segments
647  * @hw: pointer to the hardware structure
648  * @pkg_hdr: pointer to package header
649  */
650 static enum ice_ddp_state
651 ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
652 {
653 	enum ice_aq_err aq_err = hw->adminq.sq_last_status;
654 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
655 	enum ice_status status;
656 	u32 i;
657 
658 	ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
659 	ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
660 
661 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
662 	if (status) {
663 		if (status == ICE_ERR_AQ_NO_WORK)
664 			state = ICE_DDP_PKG_ALREADY_LOADED;
665 		else
666 			state = ice_map_aq_err_to_ddp_state(aq_err);
667 		return state;
668 	}
669 
670 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
671 		if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
672 						    hw->pkg_sign_type))
673 			continue;
674 
675 		state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
676 		if (state)
677 			break;
678 	}
679 
680 	if (!state)
681 		state = ice_post_dwnld_pkg_actions(hw);
682 
683 	ice_release_global_cfg_lock(hw);
684 
685 	return state;
686 }
687 
688 /**
689  * ice_dwnld_cfg_bufs
690  * @hw: pointer to the hardware structure
691  * @bufs: pointer to an array of buffers
692  * @count: the number of buffers in the array
693  *
694  * Obtains global config lock and downloads the package configuration buffers
695  * to the firmware.
696  */
697 static enum ice_ddp_state
698 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
699 {
700 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
701 	enum ice_status status;
702 	struct ice_buf_hdr *bh;
703 
704 	if (!bufs || !count)
705 		return ICE_DDP_PKG_ERR;
706 
707 	/* If the first buffer's first section has its metadata bit set
708 	 * then there are no buffers to be downloaded, and the operation is
709 	 * considered a success.
710 	 */
711 	bh = (struct ice_buf_hdr *)bufs;
712 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
713 		return ICE_DDP_PKG_SUCCESS;
714 
715 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
716 	if (status) {
717 		if (status == ICE_ERR_AQ_NO_WORK)
718 			return ICE_DDP_PKG_ALREADY_LOADED;
719 		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
720 	}
721 
722 	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
723 	if (!state)
724 		state = ice_post_dwnld_pkg_actions(hw);
725 
726 	ice_release_global_cfg_lock(hw);
727 
728 	return state;
729 }
730 
731 /**
732  * ice_download_pkg_without_sig_seg
733  * @hw: pointer to the hardware structure
734  * @ice_seg: pointer to the segment of the package to be downloaded
735  *
736  * Handles the download of a complete package without signature segment.
737  */
738 static enum ice_ddp_state
739 ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
740 {
741 	struct ice_buf_table *ice_buf_tbl;
742 	enum ice_ddp_state state;
743 
744 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
745 		  ice_seg->hdr.seg_format_ver.major,
746 		  ice_seg->hdr.seg_format_ver.minor,
747 		  ice_seg->hdr.seg_format_ver.update,
748 		  ice_seg->hdr.seg_format_ver.draft);
749 
750 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
751 		  LE32_TO_CPU(ice_seg->hdr.seg_type),
752 		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
753 
754 	ice_buf_tbl = ice_find_buf_table(ice_seg);
755 
756 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
757 		  LE32_TO_CPU(ice_buf_tbl->buf_count));
758 
759 	state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
760 				   LE32_TO_CPU(ice_buf_tbl->buf_count));
761 
762 	return state;
763 }
764 
765 /**
766  * ice_download_pkg
767  * @hw: pointer to the hardware structure
768  * @pkg_hdr: pointer to package header
769  * @ice_seg: pointer to the segment of the package to be downloaded
770  *
771  * Handles the download of a complete package.
772  */
773 static enum ice_ddp_state
774 ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
775 		 struct ice_seg *ice_seg)
776 {
777 	enum ice_ddp_state state;
778 
779 	if (hw->pkg_has_signing_seg)
780 		state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
781 	else
782 		state = ice_download_pkg_without_sig_seg(hw, ice_seg);
783 
784 	ice_post_pkg_dwnld_vlan_mode_cfg(hw);
785 
786 	return state;
787 }
788 
789 /**
790  * ice_init_pkg_info
791  * @hw: pointer to the hardware structure
792  * @pkg_hdr: pointer to the driver's package hdr
793  *
794  * Saves off the package details into the HW structure.
795  */
796 static enum ice_ddp_state
797 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
798 {
799 	struct ice_generic_seg_hdr *seg_hdr;
800 
801 	if (!pkg_hdr)
802 		return ICE_DDP_PKG_ERR;
803 
804 	hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
805 	ice_get_signing_req(hw);
806 
807 	ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
808 		  hw->pkg_seg_id);
809 
810 	seg_hdr = (struct ice_generic_seg_hdr *)
811 		ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
812 	if (seg_hdr) {
813 		struct ice_meta_sect *meta;
814 		struct ice_pkg_enum state;
815 
816 		ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
817 
818 		/* Get package information from the Metadata Section */
819 		meta = (struct ice_meta_sect *)
820 			ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
821 					     ICE_SID_METADATA);
822 		if (!meta) {
823 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
824 			return ICE_DDP_PKG_INVALID_FILE;
825 		}
826 
827 		hw->pkg_ver = meta->ver;
828 		ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
829 			   ICE_NONDMA_TO_NONDMA);
830 
831 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
832 			  meta->ver.major, meta->ver.minor, meta->ver.update,
833 			  meta->ver.draft, meta->name);
834 
835 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
836 		ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
837 			   sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
838 
839 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
840 			  seg_hdr->seg_format_ver.major,
841 			  seg_hdr->seg_format_ver.minor,
842 			  seg_hdr->seg_format_ver.update,
843 			  seg_hdr->seg_format_ver.draft,
844 			  seg_hdr->seg_id);
845 	} else {
846 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
847 		return ICE_DDP_PKG_INVALID_FILE;
848 	}
849 
850 	return ICE_DDP_PKG_SUCCESS;
851 }
852 
853 /**
854  * ice_get_pkg_info
855  * @hw: pointer to the hardware structure
856  *
857  * Store details of the package currently loaded in HW into the HW structure.
858  */
859 enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
860 {
861 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
862 	struct ice_aqc_get_pkg_info_resp *pkg_info;
863 	u16 size;
864 	u32 i;
865 
866 	size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
867 	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
868 	if (!pkg_info)
869 		return ICE_DDP_PKG_ERR;
870 
871 	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
872 		state = ICE_DDP_PKG_ERR;
873 		goto init_pkg_free_alloc;
874 	}
875 
876 	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
877 #define ICE_PKG_FLAG_COUNT	4
878 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
879 		u8 place = 0;
880 
881 		if (pkg_info->pkg_info[i].is_active) {
882 			flags[place++] = 'A';
883 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
884 			hw->active_track_id =
885 				LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
886 			ice_memcpy(hw->active_pkg_name,
887 				   pkg_info->pkg_info[i].name,
888 				   sizeof(pkg_info->pkg_info[i].name),
889 				   ICE_NONDMA_TO_NONDMA);
890 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
891 		}
892 		if (pkg_info->pkg_info[i].is_active_at_boot)
893 			flags[place++] = 'B';
894 		if (pkg_info->pkg_info[i].is_modified)
895 			flags[place++] = 'M';
896 		if (pkg_info->pkg_info[i].is_in_nvm)
897 			flags[place++] = 'N';
898 
899 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
900 			  i, pkg_info->pkg_info[i].ver.major,
901 			  pkg_info->pkg_info[i].ver.minor,
902 			  pkg_info->pkg_info[i].ver.update,
903 			  pkg_info->pkg_info[i].ver.draft,
904 			  pkg_info->pkg_info[i].name, flags);
905 	}
906 
907 init_pkg_free_alloc:
908 	ice_free(hw, pkg_info);
909 
910 	return state;
911 }
912 
913 /**
914  * ice_label_enum_handler
915  * @sect_type: section type
916  * @section: pointer to section
917  * @index: index of the label entry to be returned
918  * @offset: pointer to receive absolute offset, always zero for label sections
919  *
920  * This is a callback function that can be passed to ice_pkg_enum_entry.
921  * Handles enumeration of individual label entries.
922  */
923 static void *
924 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
925 		       u32 *offset)
926 {
927 	struct ice_label_section *labels;
928 
929 	if (!section)
930 		return NULL;
931 
932 	if (index > ICE_MAX_LABELS_IN_BUF)
933 		return NULL;
934 
935 	if (offset)
936 		*offset = 0;
937 
938 	labels = (struct ice_label_section *)section;
939 	if (index >= LE16_TO_CPU(labels->count))
940 		return NULL;
941 
942 	return labels->label + index;
943 }
944 
945 /**
946  * ice_enum_labels
947  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
948  * @type: the section type that will contain the label (0 on subsequent calls)
949  * @state: ice_pkg_enum structure that will hold the state of the enumeration
950  * @value: pointer to a value that will return the label's value if found
951  *
952  * Enumerates a list of labels in the package. The caller will call
953  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
954  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
955  * the end of the list has been reached.
956  */
957 static char *
958 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
959 		u16 *value)
960 {
961 	struct ice_label *label;
962 
963 	/* Check for valid label section on first call */
964 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
965 		return NULL;
966 
967 	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
968 						       NULL,
969 						       ice_label_enum_handler);
970 	if (!label)
971 		return NULL;
972 
973 	*value = LE16_TO_CPU(label->value);
974 	return label->name;
975 }
976 
977 /**
978  * ice_find_label_value
979  * @ice_seg: pointer to the ice segment (non-NULL)
980  * @name: name of the label to search for
981  * @type: the section type that will contain the label
982  * @value: pointer to a value that will return the label's value if found
983  *
984  * Finds a label's value given the label name and the section type to search.
985  * The ice_seg parameter must not be NULL since the first call to
986  * ice_enum_labels requires a pointer to an actual ice_seg structure.
987  */
988 enum ice_status
989 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
990 		     u16 *value)
991 {
992 	struct ice_pkg_enum state;
993 	char *label_name;
994 	u16 val;
995 
996 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
997 
998 	if (!ice_seg)
999 		return ICE_ERR_PARAM;
1000 
1001 	do {
1002 		label_name = ice_enum_labels(ice_seg, type, &state, &val);
1003 		if (label_name && !strcmp(label_name, name)) {
1004 			*value = val;
1005 			return ICE_SUCCESS;
1006 		}
1007 
1008 		ice_seg = NULL;
1009 	} while (label_name);
1010 
1011 	return ICE_ERR_CFG;
1012 }
1013 
1014 /**
1015  * ice_verify_pkg - verify package
1016  * @pkg: pointer to the package buffer
1017  * @len: size of the package buffer
1018  *
1019  * Verifies various attributes of the package file, including length, format
1020  * version, and the requirement of at least one segment.
1021  */
1022 enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1023 {
1024 	u32 seg_count;
1025 	u32 i;
1026 
1027 	if (len < ice_struct_size(pkg, seg_offset, 1))
1028 		return ICE_DDP_PKG_INVALID_FILE;
1029 
1030 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1031 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1032 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1033 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1034 		return ICE_DDP_PKG_INVALID_FILE;
1035 
1036 	/* pkg must have at least one segment */
1037 	seg_count = LE32_TO_CPU(pkg->seg_count);
1038 	if (seg_count < 1)
1039 		return ICE_DDP_PKG_INVALID_FILE;
1040 
1041 	/* make sure segment array fits in package length */
1042 	if (len < ice_struct_size(pkg, seg_offset, seg_count))
1043 		return ICE_DDP_PKG_INVALID_FILE;
1044 
1045 	/* all segments must fit within length */
1046 	for (i = 0; i < seg_count; i++) {
1047 		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1048 		struct ice_generic_seg_hdr *seg;
1049 
1050 		/* segment header must fit */
1051 		if (len < off + sizeof(*seg))
1052 			return ICE_DDP_PKG_INVALID_FILE;
1053 
1054 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1055 
1056 		/* segment body must fit */
1057 		if (len < off + LE32_TO_CPU(seg->seg_size))
1058 			return ICE_DDP_PKG_INVALID_FILE;
1059 	}
1060 
1061 	return ICE_DDP_PKG_SUCCESS;
1062 }
1063 
1064 /**
1065  * ice_free_seg - free package segment pointer
1066  * @hw: pointer to the hardware structure
1067  *
1068  * Frees the package segment pointer in the proper manner, depending on if the
1069  * segment was allocated or just the passed in pointer was stored.
1070  */
1071 void ice_free_seg(struct ice_hw *hw)
1072 {
1073 	if (hw->pkg_copy) {
1074 		ice_free(hw, hw->pkg_copy);
1075 		hw->pkg_copy = NULL;
1076 		hw->pkg_size = 0;
1077 	}
1078 	hw->seg = NULL;
1079 }
1080 
1081 /**
1082  * ice_chk_pkg_version - check package version for compatibility with driver
1083  * @pkg_ver: pointer to a version structure to check
1084  *
1085  * Check to make sure that the package about to be downloaded is compatible with
1086  * the driver. To be compatible, the major and minor components of the package
1087  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1088  * definitions.
1089  */
1090 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1091 {
1092 	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
1093 	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1094 	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
1095 		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
1096 	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
1097 		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1098 		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
1099 		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
1100 
1101 	return ICE_DDP_PKG_SUCCESS;
1102 }
1103 
1104 /**
1105  * ice_chk_pkg_compat
1106  * @hw: pointer to the hardware structure
1107  * @ospkg: pointer to the package hdr
1108  * @seg: pointer to the package segment hdr
1109  *
1110  * This function checks the package version compatibility with driver and NVM
1111  */
1112 static enum ice_ddp_state
1113 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1114 		   struct ice_seg **seg)
1115 {
1116 	struct ice_aqc_get_pkg_info_resp *pkg;
1117 	enum ice_ddp_state state;
1118 	u16 size;
1119 	u32 i;
1120 
1121 	/* Check package version compatibility */
1122 	state = ice_chk_pkg_version(&hw->pkg_ver);
1123 	if (state) {
1124 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1125 		return state;
1126 	}
1127 
1128 	/* find ICE segment in given package */
1129 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
1130 						     ospkg);
1131 	if (!*seg) {
1132 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1133 		return ICE_DDP_PKG_INVALID_FILE;
1134 	}
1135 
1136 	/* Check if FW is compatible with the OS package */
1137 	size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1138 	pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1139 	if (!pkg)
1140 		return ICE_DDP_PKG_ERR;
1141 
1142 	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
1143 		state = ICE_DDP_PKG_ERR;
1144 		goto fw_ddp_compat_free_alloc;
1145 	}
1146 
1147 	for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1148 		/* loop till we find the NVM package */
1149 		if (!pkg->pkg_info[i].is_in_nvm)
1150 			continue;
1151 		if ((*seg)->hdr.seg_format_ver.major !=
1152 			pkg->pkg_info[i].ver.major ||
1153 		    (*seg)->hdr.seg_format_ver.minor >
1154 			pkg->pkg_info[i].ver.minor) {
1155 			state = ICE_DDP_PKG_FW_MISMATCH;
1156 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1157 		}
1158 		/* done processing NVM package so break */
1159 		break;
1160 	}
1161 fw_ddp_compat_free_alloc:
1162 	ice_free(hw, pkg);
1163 	return state;
1164 }
1165 
1166 /**
1167  * ice_sw_fv_handler
1168  * @sect_type: section type
1169  * @section: pointer to section
1170  * @index: index of the field vector entry to be returned
1171  * @offset: ptr to variable that receives the offset in the field vector table
1172  *
1173  * This is a callback function that can be passed to ice_pkg_enum_entry.
1174  * This function treats the given section as of type ice_sw_fv_section and
1175  * enumerates offset field. "offset" is an index into the field vector table.
1176  */
1177 static void *
1178 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1179 {
1180 	struct ice_sw_fv_section *fv_section =
1181 		(struct ice_sw_fv_section *)section;
1182 
1183 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1184 		return NULL;
1185 	if (index >= LE16_TO_CPU(fv_section->count))
1186 		return NULL;
1187 	if (offset)
1188 		/* "index" passed in to this function is relative to a given
1189 		 * 4k block. To get to the true index into the field vector
1190 		 * table need to add the relative index to the base_offset
1191 		 * field of this section
1192 		 */
1193 		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
1194 	return fv_section->fv + index;
1195 }
1196 
1197 /**
1198  * ice_get_prof_index_max - get the max profile index for used profile
1199  * @hw: pointer to the HW struct
1200  *
1201  * Calling this function will get the max profile index for used profile
1202  * and store the index number in struct ice_switch_info *switch_info
1203  * in hw for following use.
1204  */
1205 static int ice_get_prof_index_max(struct ice_hw *hw)
1206 {
1207 	u16 prof_index = 0, j, max_prof_index = 0;
1208 	struct ice_pkg_enum state;
1209 	struct ice_seg *ice_seg;
1210 	bool flag = false;
1211 	struct ice_fv *fv;
1212 	u32 offset;
1213 
1214 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1215 
1216 	if (!hw->seg)
1217 		return ICE_ERR_PARAM;
1218 
1219 	ice_seg = hw->seg;
1220 
1221 	do {
1222 		fv = (struct ice_fv *)
1223 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1224 					   &offset, ice_sw_fv_handler);
1225 		if (!fv)
1226 			break;
1227 		ice_seg = NULL;
1228 
1229 		/* in the profile that not be used, the prot_id is set to 0xff
1230 		 * and the off is set to 0x1ff for all the field vectors.
1231 		 */
1232 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1233 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1234 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1235 				flag = true;
1236 		if (flag && prof_index > max_prof_index)
1237 			max_prof_index = prof_index;
1238 
1239 		prof_index++;
1240 		flag = false;
1241 	} while (fv);
1242 
1243 	hw->switch_info->max_used_prof_index = max_prof_index;
1244 
1245 	return ICE_SUCCESS;
1246 }
1247 
1248 /**
1249  * ice_get_ddp_pkg_state - get DDP pkg state after download
1250  * @hw: pointer to the HW struct
1251  * @already_loaded: indicates if pkg was already loaded onto the device
1252  *
1253  */
1254 static enum ice_ddp_state
1255 ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
1256 {
1257 	if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
1258 	    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
1259 	    hw->pkg_ver.update == hw->active_pkg_ver.update &&
1260 	    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
1261 	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
1262 		if (already_loaded)
1263 			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
1264 		else
1265 			return ICE_DDP_PKG_SUCCESS;
1266 	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
1267 		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
1268 		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
1269 	} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
1270 		   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
1271 		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
1272 	} else {
1273 		return ICE_DDP_PKG_ERR;
1274 	}
1275 }
1276 
1277 /**
1278  * ice_init_pkg_regs - initialize additional package registers
1279  * @hw: pointer to the hardware structure
1280  */
1281 static void ice_init_pkg_regs(struct ice_hw *hw)
1282 {
1283 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1284 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1285 #define ICE_SW_BLK_IDX	0
1286 
1287 	/* setup Switch block input mask, which is 48-bits in two parts */
1288 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1289 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1290 }
1291 
1292 /**
1293  * ice_init_pkg - initialize/download package
1294  * @hw: pointer to the hardware structure
1295  * @buf: pointer to the package buffer
1296  * @len: size of the package buffer
1297  *
1298  * This function initializes a package. The package contains HW tables
1299  * required to do packet processing. First, the function extracts package
1300  * information such as version. Then it finds the ice configuration segment
1301  * within the package; this function then saves a copy of the segment pointer
1302  * within the supplied package buffer. Next, the function will cache any hints
1303  * from the package, followed by downloading the package itself. Note, that if
1304  * a previous PF driver has already downloaded the package successfully, then
1305  * the current driver will not have to download the package again.
1306  *
1307  * The local package contents will be used to query default behavior and to
1308  * update specific sections of the HW's version of the package (e.g. to update
1309  * the parse graph to understand new protocols).
1310  *
1311  * This function stores a pointer to the package buffer memory, and it is
1312  * expected that the supplied buffer will not be freed immediately. If the
1313  * package buffer needs to be freed, such as when read from a file, use
1314  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1315  * case.
1316  */
1317 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1318 {
1319 	bool already_loaded = false;
1320 	enum ice_ddp_state state;
1321 	struct ice_pkg_hdr *pkg;
1322 	struct ice_seg *seg;
1323 
1324 	if (!buf || !len)
1325 		return ICE_DDP_PKG_ERR;
1326 
1327 	pkg = (struct ice_pkg_hdr *)buf;
1328 	state = ice_verify_pkg(pkg, len);
1329 	if (state) {
1330 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1331 			  state);
1332 		return state;
1333 	}
1334 
1335 	/* initialize package info */
1336 	state = ice_init_pkg_info(hw, pkg);
1337 	if (state)
1338 		return state;
1339 
1340 	/* For packages with signing segments, must be a matching segment */
1341 	if (hw->pkg_has_signing_seg)
1342 		if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
1343 					   hw->pkg_sign_type))
1344 			return ICE_DDP_PKG_ERR;
1345 
1346 	/* before downloading the package, check package version for
1347 	 * compatibility with driver
1348 	 */
1349 	state = ice_chk_pkg_compat(hw, pkg, &seg);
1350 	if (state)
1351 		return state;
1352 
1353 	/* initialize package hints and then download package */
1354 	ice_init_pkg_hints(hw, seg);
1355 	state = ice_download_pkg(hw, pkg, seg);
1356 
1357 	if (state == ICE_DDP_PKG_ALREADY_LOADED) {
1358 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1359 		already_loaded = true;
1360 	}
1361 
1362 	/* Get information on the package currently loaded in HW, then make sure
1363 	 * the driver is compatible with this version.
1364 	 */
1365 	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
1366 		state = ice_get_pkg_info(hw);
1367 		if (!state)
1368 			state = ice_get_ddp_pkg_state(hw, already_loaded);
1369 	}
1370 
1371 	if (ice_is_init_pkg_successful(state)) {
1372 		hw->seg = seg;
1373 		/* on successful package download update other required
1374 		 * registers to support the package and fill HW tables
1375 		 * with package content.
1376 		 */
1377 		ice_init_pkg_regs(hw);
1378 		ice_fill_blk_tbls(hw);
1379 		ice_get_prof_index_max(hw);
1380 	} else {
1381 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1382 			  state);
1383 	}
1384 
1385 	return state;
1386 }
1387 
1388 /**
1389  * ice_copy_and_init_pkg - initialize/download a copy of the package
1390  * @hw: pointer to the hardware structure
1391  * @buf: pointer to the package buffer
1392  * @len: size of the package buffer
1393  *
1394  * This function copies the package buffer, and then calls ice_init_pkg() to
1395  * initialize the copied package contents.
1396  *
1397  * The copying is necessary if the package buffer supplied is constant, or if
1398  * the memory may disappear shortly after calling this function.
1399  *
1400  * If the package buffer resides in the data segment and can be modified, the
1401  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1402  *
1403  * However, if the package buffer needs to be copied first, such as when being
1404  * read from a file, the caller should use ice_copy_and_init_pkg().
1405  *
1406  * This function will first copy the package buffer, before calling
1407  * ice_init_pkg(). The caller is free to immediately destroy the original
1408  * package buffer, as the new copy will be managed by this function and
1409  * related routines.
1410  */
1411 enum ice_ddp_state
1412 ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1413 {
1414 	enum ice_ddp_state state;
1415 	u8 *buf_copy;
1416 
1417 	if (!buf || !len)
1418 		return ICE_DDP_PKG_ERR;
1419 
1420 	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1421 
1422 	state = ice_init_pkg(hw, buf_copy, len);
1423 	if (!ice_is_init_pkg_successful(state)) {
1424 		/* Free the copy, since we failed to initialize the package */
1425 		ice_free(hw, buf_copy);
1426 	} else {
1427 		/* Track the copied pkg so we can free it later */
1428 		hw->pkg_copy = buf_copy;
1429 		hw->pkg_size = len;
1430 	}
1431 
1432 	return state;
1433 }
1434 
1435 /**
1436  * ice_is_init_pkg_successful - check if DDP init was successful
1437  * @state: state of the DDP pkg after download
1438  */
1439 bool ice_is_init_pkg_successful(enum ice_ddp_state state)
1440 {
1441 	switch (state) {
1442 	case ICE_DDP_PKG_SUCCESS:
1443 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
1444 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
1445 		return true;
1446 	default:
1447 		return false;
1448 	}
1449 }
1450 
1451 /**
1452  * ice_pkg_buf_alloc
1453  * @hw: pointer to the HW structure
1454  *
1455  * Allocates a package buffer and returns a pointer to the buffer header.
1456  * Note: all package contents must be in Little Endian form.
1457  */
1458 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1459 {
1460 	struct ice_buf_build *bld;
1461 	struct ice_buf_hdr *buf;
1462 
1463 	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1464 	if (!bld)
1465 		return NULL;
1466 
1467 	buf = (struct ice_buf_hdr *)bld;
1468 	buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1469 					     section_entry));
1470 	return bld;
1471 }
1472 
1473 static bool ice_is_gtp_u_profile(u32 prof_idx)
1474 {
1475 	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
1476 		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) ||
1477 	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
1478 }
1479 
1480 static bool ice_is_gtp_c_profile(u32 prof_idx)
1481 {
1482 	switch (prof_idx) {
1483 	case ICE_PROFID_IPV4_GTPC_TEID:
1484 	case ICE_PROFID_IPV4_GTPC_NO_TEID:
1485 	case ICE_PROFID_IPV6_GTPC_TEID:
1486 	case ICE_PROFID_IPV6_GTPC_NO_TEID:
1487 		return true;
1488 	default:
1489 		return false;
1490 	}
1491 }
1492 
1493 /**
1494  * ice_get_sw_prof_type - determine switch profile type
1495  * @hw: pointer to the HW structure
1496  * @fv: pointer to the switch field vector
1497  * @prof_idx: profile index to check
1498  */
1499 static enum ice_prof_type
1500 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
1501 {
1502 	bool valid_prof = false;
1503 	u16 i;
1504 
1505 	if (ice_is_gtp_c_profile(prof_idx))
1506 		return ICE_PROF_TUN_GTPC;
1507 
1508 	if (ice_is_gtp_u_profile(prof_idx))
1509 		return ICE_PROF_TUN_GTPU;
1510 
1511 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1512 		if (fv->ew[i].off != ICE_NAN_OFFSET)
1513 			valid_prof = true;
1514 
1515 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1516 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1517 		    fv->ew[i].off == ICE_VNI_OFFSET)
1518 			return ICE_PROF_TUN_UDP;
1519 
1520 		/* GRE tunnel will have GRE protocol */
1521 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1522 			return ICE_PROF_TUN_GRE;
1523 	}
1524 
1525 	return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
1526 }
1527 
1528 /**
1529  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1530  * @hw: pointer to hardware structure
1531  * @req_profs: type of profiles requested
1532  * @bm: pointer to memory for returning the bitmap of field vectors
1533  */
1534 void
1535 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1536 		     ice_bitmap_t *bm)
1537 {
1538 	struct ice_pkg_enum state;
1539 	struct ice_seg *ice_seg;
1540 	struct ice_fv *fv;
1541 
1542 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1543 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1544 	ice_seg = hw->seg;
1545 	do {
1546 		enum ice_prof_type prof_type;
1547 		u32 offset;
1548 
1549 		fv = (struct ice_fv *)
1550 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1551 					   &offset, ice_sw_fv_handler);
1552 		ice_seg = NULL;
1553 
1554 		if (fv) {
1555 			/* Determine field vector type */
1556 			prof_type = ice_get_sw_prof_type(hw, fv, offset);
1557 
1558 			if (req_profs & prof_type)
1559 				ice_set_bit((u16)offset, bm);
1560 		}
1561 	} while (fv);
1562 }
1563 
1564 /**
1565  * ice_get_sw_fv_list
1566  * @hw: pointer to the HW structure
1567  * @lkups: lookup elements or match criteria for the advanced recipe, one
1568  *	   structure per protocol header
1569  * @bm: bitmap of field vectors to consider
1570  * @fv_list: Head of a list
1571  *
1572  * Finds all the field vector entries from switch block that contain
1573  * a given protocol ID and offset and returns a list of structures of type
1574  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1575  * definition and profile ID information
1576  * NOTE: The caller of the function is responsible for freeing the memory
1577  * allocated for every list entry.
1578  */
1579 enum ice_status
1580 ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
1581 		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1582 {
1583 	struct ice_sw_fv_list_entry *fvl;
1584 	struct ice_sw_fv_list_entry *tmp;
1585 	struct ice_pkg_enum state;
1586 	struct ice_seg *ice_seg;
1587 	struct ice_fv *fv;
1588 	u32 offset;
1589 
1590 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1591 
1592 	if (!lkups->n_val_words || !hw->seg)
1593 		return ICE_ERR_PARAM;
1594 
1595 	ice_seg = hw->seg;
1596 	do {
1597 		u16 i;
1598 
1599 		fv = (struct ice_fv *)
1600 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1601 					   &offset, ice_sw_fv_handler);
1602 		if (!fv)
1603 			break;
1604 		ice_seg = NULL;
1605 
1606 		/* If field vector is not in the bitmap list, then skip this
1607 		 * profile.
1608 		 */
1609 		if (!ice_is_bit_set(bm, (u16)offset))
1610 			continue;
1611 
1612 		for (i = 0; i < lkups->n_val_words; i++) {
1613 			int j;
1614 
1615 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1616 				if (fv->ew[j].prot_id ==
1617 				    lkups->fv_words[i].prot_id &&
1618 				    fv->ew[j].off == lkups->fv_words[i].off)
1619 					break;
1620 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1621 				break;
1622 			if (i + 1 == lkups->n_val_words) {
1623 				fvl = (struct ice_sw_fv_list_entry *)
1624 					ice_malloc(hw, sizeof(*fvl));
1625 				if (!fvl)
1626 					goto err;
1627 				fvl->fv_ptr = fv;
1628 				fvl->profile_id = offset;
1629 				LIST_ADD(&fvl->list_entry, fv_list);
1630 				break;
1631 			}
1632 		}
1633 	} while (fv);
1634 	if (LIST_EMPTY(fv_list)) {
1635 		ice_warn(hw, "Required profiles not found in currently loaded DDP package");
1636 		return ICE_ERR_CFG;
1637 	}
1638 	return ICE_SUCCESS;
1639 
1640 err:
1641 	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1642 				 list_entry) {
1643 		LIST_DEL(&fvl->list_entry);
1644 		ice_free(hw, fvl);
1645 	}
1646 
1647 	return ICE_ERR_NO_MEMORY;
1648 }
1649 
1650 /**
1651  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1652  * @hw: pointer to hardware structure
1653  */
1654 void ice_init_prof_result_bm(struct ice_hw *hw)
1655 {
1656 	struct ice_pkg_enum state;
1657 	struct ice_seg *ice_seg;
1658 	struct ice_fv *fv;
1659 
1660 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1661 
1662 	if (!hw->seg)
1663 		return;
1664 
1665 	ice_seg = hw->seg;
1666 	do {
1667 		u32 off;
1668 		u16 i;
1669 
1670 		fv = (struct ice_fv *)
1671 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1672 					   &off, ice_sw_fv_handler);
1673 		ice_seg = NULL;
1674 		if (!fv)
1675 			break;
1676 
1677 		ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1678 				ICE_MAX_FV_WORDS);
1679 
1680 		/* Determine empty field vector indices, these can be
1681 		 * used for recipe results. Skip index 0, since it is
1682 		 * always used for Switch ID.
1683 		 */
1684 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1685 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1686 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1687 				ice_set_bit(i,
1688 					    hw->switch_info->prof_res_bm[off]);
1689 	} while (fv);
1690 }
1691 
1692 /**
1693  * ice_pkg_buf_free
1694  * @hw: pointer to the HW structure
1695  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1696  *
1697  * Frees a package buffer
1698  */
1699 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1700 {
1701 	ice_free(hw, bld);
1702 }
1703 
1704 /**
1705  * ice_pkg_buf_reserve_section
1706  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1707  * @count: the number of sections to reserve
1708  *
1709  * Reserves one or more section table entries in a package buffer. This routine
1710  * can be called multiple times as long as they are made before calling
1711  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1712  * is called once, the number of sections that can be allocated will not be able
1713  * to be increased; not using all reserved sections is fine, but this will
1714  * result in some wasted space in the buffer.
1715  * Note: all package contents must be in Little Endian form.
1716  */
1717 enum ice_status
1718 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1719 {
1720 	struct ice_buf_hdr *buf;
1721 	u16 section_count;
1722 	u16 data_end;
1723 
1724 	if (!bld)
1725 		return ICE_ERR_PARAM;
1726 
1727 	buf = (struct ice_buf_hdr *)&bld->buf;
1728 
1729 	/* already an active section, can't increase table size */
1730 	section_count = LE16_TO_CPU(buf->section_count);
1731 	if (section_count > 0)
1732 		return ICE_ERR_CFG;
1733 
1734 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1735 		return ICE_ERR_CFG;
1736 	bld->reserved_section_table_entries += count;
1737 
1738 	data_end = LE16_TO_CPU(buf->data_end) +
1739 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1740 	buf->data_end = CPU_TO_LE16(data_end);
1741 
1742 	return ICE_SUCCESS;
1743 }
1744 
1745 /**
1746  * ice_pkg_buf_alloc_section
1747  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1748  * @type: the section type value
1749  * @size: the size of the section to reserve (in bytes)
1750  *
1751  * Reserves memory in the buffer for a section's content and updates the
1752  * buffers' status accordingly. This routine returns a pointer to the first
1753  * byte of the section start within the buffer, which is used to fill in the
1754  * section contents.
1755  * Note: all package contents must be in Little Endian form.
1756  */
1757 void *
1758 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1759 {
1760 	struct ice_buf_hdr *buf;
1761 	u16 sect_count;
1762 	u16 data_end;
1763 
1764 	if (!bld || !type || !size)
1765 		return NULL;
1766 
1767 	buf = (struct ice_buf_hdr *)&bld->buf;
1768 
1769 	/* check for enough space left in buffer */
1770 	data_end = LE16_TO_CPU(buf->data_end);
1771 
1772 	/* section start must align on 4 byte boundary */
1773 	data_end = ICE_ALIGN(data_end, 4);
1774 
1775 	if ((data_end + size) > ICE_MAX_S_DATA_END)
1776 		return NULL;
1777 
1778 	/* check for more available section table entries */
1779 	sect_count = LE16_TO_CPU(buf->section_count);
1780 	if (sect_count < bld->reserved_section_table_entries) {
1781 		void *section_ptr = ((u8 *)buf) + data_end;
1782 
1783 		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1784 		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1785 		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1786 
1787 		data_end += size;
1788 		buf->data_end = CPU_TO_LE16(data_end);
1789 
1790 		buf->section_count = CPU_TO_LE16(sect_count + 1);
1791 		return section_ptr;
1792 	}
1793 
1794 	/* no free section table entries */
1795 	return NULL;
1796 }
1797 
1798 /**
1799  * ice_pkg_buf_alloc_single_section
1800  * @hw: pointer to the HW structure
1801  * @type: the section type value
1802  * @size: the size of the section to reserve (in bytes)
1803  * @section: returns pointer to the section
1804  *
1805  * Allocates a package buffer with a single section.
1806  * Note: all package contents must be in Little Endian form.
1807  */
1808 struct ice_buf_build *
1809 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
1810 				 void **section)
1811 {
1812 	struct ice_buf_build *buf;
1813 
1814 	if (!section)
1815 		return NULL;
1816 
1817 	buf = ice_pkg_buf_alloc(hw);
1818 	if (!buf)
1819 		return NULL;
1820 
1821 	if (ice_pkg_buf_reserve_section(buf, 1))
1822 		goto ice_pkg_buf_alloc_single_section_err;
1823 
1824 	*section = ice_pkg_buf_alloc_section(buf, type, size);
1825 	if (!*section)
1826 		goto ice_pkg_buf_alloc_single_section_err;
1827 
1828 	return buf;
1829 
1830 ice_pkg_buf_alloc_single_section_err:
1831 	ice_pkg_buf_free(hw, buf);
1832 	return NULL;
1833 }
1834 
1835 /**
1836  * ice_pkg_buf_unreserve_section
1837  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1838  * @count: the number of sections to unreserve
1839  *
1840  * Unreserves one or more section table entries in a package buffer, releasing
1841  * space that can be used for section data. This routine can be called
1842  * multiple times as long as they are made before calling
1843  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1844  * is called once, the number of sections that can be allocated will not be able
1845  * to be increased; not using all reserved sections is fine, but this will
1846  * result in some wasted space in the buffer.
1847  * Note: all package contents must be in Little Endian form.
1848  */
1849 enum ice_status
1850 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
1851 {
1852 	struct ice_buf_hdr *buf;
1853 	u16 section_count;
1854 	u16 data_end;
1855 
1856 	if (!bld)
1857 		return ICE_ERR_PARAM;
1858 
1859 	buf = (struct ice_buf_hdr *)&bld->buf;
1860 
1861 	/* already an active section, can't decrease table size */
1862 	section_count = LE16_TO_CPU(buf->section_count);
1863 	if (section_count > 0)
1864 		return ICE_ERR_CFG;
1865 
1866 	if (count > bld->reserved_section_table_entries)
1867 		return ICE_ERR_CFG;
1868 	bld->reserved_section_table_entries -= count;
1869 
1870 	data_end = LE16_TO_CPU(buf->data_end) -
1871 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1872 	buf->data_end = CPU_TO_LE16(data_end);
1873 
1874 	return ICE_SUCCESS;
1875 }
1876 
1877 /**
1878  * ice_pkg_buf_get_free_space
1879  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1880  *
1881  * Returns the number of free bytes remaining in the buffer.
1882  * Note: all package contents must be in Little Endian form.
1883  */
1884 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
1885 {
1886 	struct ice_buf_hdr *buf;
1887 
1888 	if (!bld)
1889 		return 0;
1890 
1891 	buf = (struct ice_buf_hdr *)&bld->buf;
1892 	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
1893 }
1894 
1895 /**
1896  * ice_pkg_buf_get_active_sections
1897  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1898  *
1899  * Returns the number of active sections. Before using the package buffer
1900  * in an update package command, the caller should make sure that there is at
1901  * least one active section - otherwise, the buffer is not legal and should
1902  * not be used.
1903  * Note: all package contents must be in Little Endian form.
1904  */
1905 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1906 {
1907 	struct ice_buf_hdr *buf;
1908 
1909 	if (!bld)
1910 		return 0;
1911 
1912 	buf = (struct ice_buf_hdr *)&bld->buf;
1913 	return LE16_TO_CPU(buf->section_count);
1914 }
1915 
1916 /**
1917  * ice_pkg_buf
1918  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1919  *
1920  * Return a pointer to the buffer's header
1921  */
1922 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1923 {
1924 	if (bld)
1925 		return &bld->buf;
1926 
1927 	return NULL;
1928 }
1929 
1930 /**
1931  * ice_find_buf_table
1932  * @ice_seg: pointer to the ice segment
1933  *
1934  * Returns the address of the buffer table within the ice segment.
1935  */
1936 struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
1937 {
1938 	struct ice_nvm_table *nvms;
1939 
1940 	nvms = (struct ice_nvm_table *)
1941 		(ice_seg->device_table +
1942 		 LE32_TO_CPU(ice_seg->device_table_count));
1943 
1944 	return (_FORCE_ struct ice_buf_table *)
1945 		(nvms->vers + LE32_TO_CPU(nvms->table_count));
1946 }
1947 
1948 /**
1949  * ice_pkg_val_buf
1950  * @buf: pointer to the ice buffer
1951  *
1952  * This helper function validates a buffer's header.
1953  */
1954 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
1955 {
1956 	struct ice_buf_hdr *hdr;
1957 	u16 section_count;
1958 	u16 data_end;
1959 
1960 	hdr = (struct ice_buf_hdr *)buf->buf;
1961 	/* verify data */
1962 	section_count = LE16_TO_CPU(hdr->section_count);
1963 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
1964 		return NULL;
1965 
1966 	data_end = LE16_TO_CPU(hdr->data_end);
1967 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
1968 		return NULL;
1969 
1970 	return hdr;
1971 }
1972 
1973 /**
1974  * ice_pkg_enum_buf
1975  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
1976  * @state: pointer to the enum state
1977  *
1978  * This function will enumerate all the buffers in the ice segment. The first
1979  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
1980  * ice_seg is set to NULL which continues the enumeration. When the function
1981  * returns a NULL pointer, then the end of the buffers has been reached, or an
1982  * unexpected value has been detected (for example an invalid section count or
1983  * an invalid buffer end value).
1984  */
1985 struct ice_buf_hdr *
1986 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
1987 {
1988 	if (ice_seg) {
1989 		state->buf_table = ice_find_buf_table(ice_seg);
1990 		if (!state->buf_table)
1991 			return NULL;
1992 
1993 		state->buf_idx = 0;
1994 		return ice_pkg_val_buf(state->buf_table->buf_array);
1995 	}
1996 
1997 	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
1998 		return ice_pkg_val_buf(state->buf_table->buf_array +
1999 				       state->buf_idx);
2000 	else
2001 		return NULL;
2002 }
2003 
2004 /**
2005  * ice_pkg_advance_sect
2006  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2007  * @state: pointer to the enum state
2008  *
2009  * This helper function will advance the section within the ice segment,
2010  * also advancing the buffer if needed.
2011  */
2012 bool
2013 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
2014 {
2015 	if (!ice_seg && !state->buf)
2016 		return false;
2017 
2018 	if (!ice_seg && state->buf)
2019 		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
2020 			return true;
2021 
2022 	state->buf = ice_pkg_enum_buf(ice_seg, state);
2023 	if (!state->buf)
2024 		return false;
2025 
2026 	/* start of new buffer, reset section index */
2027 	state->sect_idx = 0;
2028 	return true;
2029 }
2030 
2031 /**
2032  * ice_pkg_enum_section
2033  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2034  * @state: pointer to the enum state
2035  * @sect_type: section type to enumerate
2036  *
2037  * This function will enumerate all the sections of a particular type in the
2038  * ice segment. The first call is made with the ice_seg parameter non-NULL;
2039  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2040  * When the function returns a NULL pointer, then the end of the matching
2041  * sections has been reached.
2042  */
2043 void *
2044 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2045 		     u32 sect_type)
2046 {
2047 	u16 offset, size;
2048 
2049 	if (ice_seg)
2050 		state->type = sect_type;
2051 
2052 	if (!ice_pkg_advance_sect(ice_seg, state))
2053 		return NULL;
2054 
2055 	/* scan for next matching section */
2056 	while (state->buf->section_entry[state->sect_idx].type !=
2057 	       CPU_TO_LE32(state->type))
2058 		if (!ice_pkg_advance_sect(NULL, state))
2059 			return NULL;
2060 
2061 	/* validate section */
2062 	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2063 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
2064 		return NULL;
2065 
2066 	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
2067 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
2068 		return NULL;
2069 
2070 	/* make sure the section fits in the buffer */
2071 	if (offset + size > ICE_PKG_BUF_SIZE)
2072 		return NULL;
2073 
2074 	state->sect_type =
2075 		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
2076 
2077 	/* calc pointer to this section */
2078 	state->sect = ((u8 *)state->buf) +
2079 		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2080 
2081 	return state->sect;
2082 }
2083 
2084 /**
2085  * ice_pkg_enum_entry
2086  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2087  * @state: pointer to the enum state
2088  * @sect_type: section type to enumerate
2089  * @offset: pointer to variable that receives the offset in the table (optional)
2090  * @handler: function that handles access to the entries into the section type
2091  *
2092  * This function will enumerate all the entries in particular section type in
2093  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
2094  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2095  * When the function returns a NULL pointer, then the end of the entries has
2096  * been reached.
2097  *
2098  * Since each section may have a different header and entry size, the handler
2099  * function is needed to determine the number and location entries in each
2100  * section.
2101  *
2102  * The offset parameter is optional, but should be used for sections that
2103  * contain an offset for each section table. For such cases, the section handler
2104  * function must return the appropriate offset + index to give the absolution
2105  * offset for each entry. For example, if the base for a section's header
2106  * indicates a base offset of 10, and the index for the entry is 2, then
2107  * section handler function should set the offset to 10 + 2 = 12.
2108  */
2109 void *
2110 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2111 		   u32 sect_type, u32 *offset,
2112 		   void *(*handler)(u32 sect_type, void *section,
2113 				    u32 index, u32 *offset))
2114 {
2115 	void *entry;
2116 
2117 	if (ice_seg) {
2118 		if (!handler)
2119 			return NULL;
2120 
2121 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
2122 			return NULL;
2123 
2124 		state->entry_idx = 0;
2125 		state->handler = handler;
2126 	} else {
2127 		state->entry_idx++;
2128 	}
2129 
2130 	if (!state->handler)
2131 		return NULL;
2132 
2133 	/* get entry */
2134 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
2135 			       offset);
2136 	if (!entry) {
2137 		/* end of a section, look for another section of this type */
2138 		if (!ice_pkg_enum_section(NULL, state, 0))
2139 			return NULL;
2140 
2141 		state->entry_idx = 0;
2142 		entry = state->handler(state->sect_type, state->sect,
2143 				       state->entry_idx, offset);
2144 	}
2145 
2146 	return entry;
2147 }
2148 
2149 /**
2150  * ice_boost_tcam_handler
2151  * @sect_type: section type
2152  * @section: pointer to section
2153  * @index: index of the boost TCAM entry to be returned
2154  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
2155  *
2156  * This is a callback function that can be passed to ice_pkg_enum_entry.
2157  * Handles enumeration of individual boost TCAM entries.
2158  */
2159 static void *
2160 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
2161 {
2162 	struct ice_boost_tcam_section *boost;
2163 
2164 	if (!section)
2165 		return NULL;
2166 
2167 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
2168 		return NULL;
2169 
2170 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
2171 		return NULL;
2172 
2173 	if (offset)
2174 		*offset = 0;
2175 
2176 	boost = (struct ice_boost_tcam_section *)section;
2177 	if (index >= LE16_TO_CPU(boost->count))
2178 		return NULL;
2179 
2180 	return boost->tcam + index;
2181 }
2182 
2183 /**
2184  * ice_find_boost_entry
2185  * @ice_seg: pointer to the ice segment (non-NULL)
2186  * @addr: Boost TCAM address of entry to search for
2187  * @entry: returns pointer to the entry
2188  *
2189  * Finds a particular Boost TCAM entry and returns a pointer to that entry
2190  * if it is found. The ice_seg parameter must not be NULL since the first call
2191  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
2192  */
2193 static enum ice_status
2194 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
2195 		     struct ice_boost_tcam_entry **entry)
2196 {
2197 	struct ice_boost_tcam_entry *tcam;
2198 	struct ice_pkg_enum state;
2199 
2200 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2201 
2202 	if (!ice_seg)
2203 		return ICE_ERR_PARAM;
2204 
2205 	do {
2206 		tcam = (struct ice_boost_tcam_entry *)
2207 		       ice_pkg_enum_entry(ice_seg, &state,
2208 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
2209 					  ice_boost_tcam_handler);
2210 		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
2211 			*entry = tcam;
2212 			return ICE_SUCCESS;
2213 		}
2214 
2215 		ice_seg = NULL;
2216 	} while (tcam);
2217 
2218 	*entry = NULL;
2219 	return ICE_ERR_CFG;
2220 }
2221 
2222 /**
2223  * ice_init_pkg_hints
2224  * @hw: pointer to the HW structure
2225  * @ice_seg: pointer to the segment of the package scan (non-NULL)
2226  *
2227  * This function will scan the package and save off relevant information
2228  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
2229  * since the first call to ice_enum_labels requires a pointer to an actual
2230  * ice_seg structure.
2231  */
2232 void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
2233 {
2234 	struct ice_pkg_enum state;
2235 	char *label_name;
2236 	u16 val;
2237 	int i;
2238 
2239 	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
2240 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2241 
2242 	if (!ice_seg)
2243 		return;
2244 
2245 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
2246 				     &val);
2247 
2248 	while (label_name) {
2249 /* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
2250 		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
2251 			/* check for a tunnel entry */
2252 			ice_add_tunnel_hint(hw, label_name, val);
2253 
2254 		label_name = ice_enum_labels(NULL, 0, &state, &val);
2255 	}
2256 
2257 	/* Cache the appropriate boost TCAM entry pointers for tunnels */
2258 	for (i = 0; i < hw->tnl.count; i++) {
2259 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
2260 				     &hw->tnl.tbl[i].boost_entry);
2261 		if (hw->tnl.tbl[i].boost_entry)
2262 			hw->tnl.tbl[i].valid = true;
2263 	}
2264 }
2265 
2266 /**
2267  * ice_acquire_global_cfg_lock
2268  * @hw: pointer to the HW structure
2269  * @access: access type (read or write)
2270  *
2271  * This function will request ownership of the global config lock for reading
2272  * or writing of the package. When attempting to obtain write access, the
2273  * caller must check for the following two return values:
2274  *
2275  * ICE_SUCCESS        - Means the caller has acquired the global config lock
2276  *                      and can perform writing of the package.
2277  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
2278  *                      package or has found that no update was necessary; in
2279  *                      this case, the caller can just skip performing any
2280  *                      update of the package.
2281  */
2282 enum ice_status
2283 ice_acquire_global_cfg_lock(struct ice_hw *hw,
2284 			    enum ice_aq_res_access_type access)
2285 {
2286 	enum ice_status status;
2287 
2288 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
2289 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2290 
2291 	if (status == ICE_ERR_AQ_NO_WORK)
2292 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
2293 
2294 	return status;
2295 }
2296 
2297 /**
2298  * ice_release_global_cfg_lock
2299  * @hw: pointer to the HW structure
2300  *
2301  * This function will release the global config lock.
2302  */
2303 void ice_release_global_cfg_lock(struct ice_hw *hw)
2304 {
2305 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
2306 }
2307 
2308 /**
2309  * ice_acquire_change_lock
2310  * @hw: pointer to the HW structure
2311  * @access: access type (read or write)
2312  *
2313  * This function will request ownership of the change lock.
2314  */
2315 enum ice_status
2316 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
2317 {
2318 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
2319 			       ICE_CHANGE_LOCK_TIMEOUT);
2320 }
2321 
2322 /**
2323  * ice_release_change_lock
2324  * @hw: pointer to the HW structure
2325  *
2326  * This function will release the change lock using the proper Admin Command.
2327  */
2328 void ice_release_change_lock(struct ice_hw *hw)
2329 {
2330 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
2331 }
2332 
2333 /**
2334  * ice_get_set_tx_topo - get or set tx topology
2335  * @hw: pointer to the HW struct
2336  * @buf: pointer to tx topology buffer
2337  * @buf_size: buffer size
2338  * @cd: pointer to command details structure or NULL
2339  * @flags: pointer to descriptor flags
2340  * @set: 0-get, 1-set topology
2341  *
2342  * The function will get or set tx topology
2343  */
2344 static enum ice_status
2345 ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
2346 		    struct ice_sq_cd *cd, u8 *flags, bool set)
2347 {
2348 	struct ice_aqc_get_set_tx_topo *cmd;
2349 	struct ice_aq_desc desc;
2350 	enum ice_status status;
2351 
2352 	cmd = &desc.params.get_set_tx_topo;
2353 	if (set) {
2354 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
2355 		cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
2356 		/* requested to update a new topology, not a default topolgy */
2357 		if (buf)
2358 			cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
2359 					  ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
2360 	} else {
2361 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
2362 		cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
2363 	}
2364 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2365 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2366 	if (status)
2367 		return status;
2368 	/* read the return flag values (first byte) for get operation */
2369 	if (!set && flags)
2370 		*flags = desc.params.get_set_tx_topo.set_flags;
2371 
2372 	return ICE_SUCCESS;
2373 }
2374 
2375 /**
2376  * ice_cfg_tx_topo - Initialize new tx topology if available
2377  * @hw: pointer to the HW struct
2378  * @buf: pointer to Tx topology buffer
2379  * @len: buffer size
2380  *
2381  * The function will apply the new Tx topology from the package buffer
2382  * if available.
2383  */
2384 enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
2385 {
2386 	u8 *current_topo, *new_topo = NULL;
2387 	struct ice_run_time_cfg_seg *seg;
2388 	struct ice_buf_hdr *section;
2389 	struct ice_pkg_hdr *pkg_hdr;
2390 	enum ice_ddp_state state;
2391 	u16 i, size = 0, offset;
2392 	enum ice_status status;
2393 	u32 reg = 0;
2394 	u8 flags;
2395 
2396 	if (!buf || !len)
2397 		return ICE_ERR_PARAM;
2398 
2399 	/* Does FW support new Tx topology mode ? */
2400 	if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
2401 		ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
2402 		return ICE_ERR_NOT_SUPPORTED;
2403 	}
2404 
2405 	current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2406 	if (!current_topo)
2407 		return ICE_ERR_NO_MEMORY;
2408 
2409 	/* get the current Tx topology */
2410 	status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
2411 				     &flags, false);
2412 	ice_free(hw, current_topo);
2413 
2414 	if (status) {
2415 		ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
2416 		return status;
2417 	}
2418 
2419 	/* Is default topology already applied ? */
2420 	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2421 	    hw->num_tx_sched_layers == 9) {
2422 		ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
2423 		/* Already default topology is loaded */
2424 		return ICE_ERR_ALREADY_EXISTS;
2425 	}
2426 
2427 	/* Is new topology already applied ? */
2428 	if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2429 	    hw->num_tx_sched_layers == 5) {
2430 		ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
2431 		/* Already new topology is loaded */
2432 		return ICE_ERR_ALREADY_EXISTS;
2433 	}
2434 
2435 	/* Is set topology issued already ? */
2436 	if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
2437 		ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n");
2438 		/* add a small delay before exiting */
2439 		for (i = 0; i < 20; i++)
2440 			ice_msec_delay(100, true);
2441 		return ICE_ERR_ALREADY_EXISTS;
2442 	}
2443 
2444 	/* Change the topology from new to default (5 to 9) */
2445 	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2446 	    hw->num_tx_sched_layers == 5) {
2447 		ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
2448 		goto update_topo;
2449 	}
2450 
2451 	pkg_hdr = (struct ice_pkg_hdr *)buf;
2452 	state = ice_verify_pkg(pkg_hdr, len);
2453 	if (state) {
2454 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
2455 			  state);
2456 		return ICE_ERR_CFG;
2457 	}
2458 
2459 	/* find run time configuration segment */
2460 	seg = (struct ice_run_time_cfg_seg *)
2461 		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
2462 	if (!seg) {
2463 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
2464 		return ICE_ERR_CFG;
2465 	}
2466 
2467 	if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
2468 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
2469 			  seg->buf_table.buf_count);
2470 		return ICE_ERR_CFG;
2471 	}
2472 
2473 	section = ice_pkg_val_buf(seg->buf_table.buf_array);
2474 
2475 	if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
2476 		ICE_SID_TX_5_LAYER_TOPO) {
2477 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
2478 		return ICE_ERR_CFG;
2479 	}
2480 
2481 	size = LE16_TO_CPU(section->section_entry[0].size);
2482 	offset = LE16_TO_CPU(section->section_entry[0].offset);
2483 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
2484 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
2485 		return ICE_ERR_CFG;
2486 	}
2487 
2488 	/* make sure the section fits in the buffer */
2489 	if (offset + size > ICE_PKG_BUF_SIZE) {
2490 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
2491 		return ICE_ERR_CFG;
2492 	}
2493 
2494 	/* Get the new topology buffer */
2495 	new_topo = ((u8 *)section) + offset;
2496 
2497 update_topo:
2498 	/* acquire global lock to make sure that set topology issued
2499 	 * by one PF
2500 	 */
2501 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
2502 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2503 	if (status) {
2504 		ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
2505 		return status;
2506 	}
2507 
2508 	/* check reset was triggered already or not */
2509 	reg = rd32(hw, GLGEN_RSTAT);
2510 	if (reg & GLGEN_RSTAT_DEVSTATE_M) {
2511 		/* Reset is in progress, re-init the hw again */
2512 		ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
2513 		ice_check_reset(hw);
2514 		return ICE_SUCCESS;
2515 	}
2516 
2517 	/* set new topology */
2518 	status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
2519 	if (status) {
2520 		ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
2521 		return status;
2522 	}
2523 
2524 	/* new topology is updated, delay 1 second before issuing the CORRER */
2525 	for (i = 0; i < 10; i++)
2526 		ice_msec_delay(100, true);
2527 	ice_reset(hw, ICE_RESET_CORER);
2528 	/* CORER will clear the global lock, so no explicit call
2529 	 * required for release
2530 	 */
2531 	return ICE_SUCCESS;
2532 }
2533