Lines Matching +full:tcam +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0
10 * boost tcam entries. The metadata labels names that match the following
27 * ice_verify_pkg - verify package
42 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || in ice_verify_pkg()
43 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || in ice_verify_pkg()
44 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || in ice_verify_pkg()
45 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) in ice_verify_pkg()
49 seg_count = le32_to_cpu(pkg->seg_count); in ice_verify_pkg()
59 u32 off = le32_to_cpu(pkg->seg_offset[i]); in ice_verify_pkg()
69 if (len < off + le32_to_cpu(seg->seg_size)) in ice_verify_pkg()
77 * ice_free_seg - free package segment pointer
85 if (hw->pkg_copy) { in ice_free_seg()
86 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); in ice_free_seg()
87 hw->pkg_copy = NULL; in ice_free_seg()
88 hw->pkg_size = 0; in ice_free_seg()
90 hw->seg = NULL; in ice_free_seg()
94 * ice_chk_pkg_version - check package version for compatibility with driver
104 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || in ice_chk_pkg_version()
105 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && in ice_chk_pkg_version()
106 pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) in ice_chk_pkg_version()
108 else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || in ice_chk_pkg_version()
109 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && in ice_chk_pkg_version()
110 pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) in ice_chk_pkg_version()
128 hdr = (const struct ice_buf_hdr *)buf->buf; in ice_pkg_val_buf()
130 section_count = le16_to_cpu(hdr->section_count); in ice_pkg_val_buf()
134 data_end = le16_to_cpu(hdr->data_end); in ice_pkg_val_buf()
150 (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); in ice_find_buf_table()
152 return (__force struct ice_buf_table *)(nvms->vers + in ice_find_buf_table()
153 le32_to_cpu(nvms->table_count)); in ice_find_buf_table()
162 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
172 state->buf_table = ice_find_buf_table(ice_seg); in ice_pkg_enum_buf()
173 if (!state->buf_table) in ice_pkg_enum_buf()
176 state->buf_idx = 0; in ice_pkg_enum_buf()
177 return ice_pkg_val_buf(state->buf_table->buf_array); in ice_pkg_enum_buf()
180 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) in ice_pkg_enum_buf()
181 return ice_pkg_val_buf(state->buf_table->buf_array + in ice_pkg_enum_buf()
182 state->buf_idx); in ice_pkg_enum_buf()
198 if (!ice_seg && !state->buf) in ice_pkg_advance_sect()
201 if (!ice_seg && state->buf) in ice_pkg_advance_sect()
202 if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) in ice_pkg_advance_sect()
205 state->buf = ice_pkg_enum_buf(ice_seg, state); in ice_pkg_advance_sect()
206 if (!state->buf) in ice_pkg_advance_sect()
210 state->sect_idx = 0; in ice_pkg_advance_sect()
221 * ice segment. The first call is made with the ice_seg parameter non-NULL;
232 state->type = sect_type; in ice_pkg_enum_section()
238 while (state->buf->section_entry[state->sect_idx].type != in ice_pkg_enum_section()
239 cpu_to_le32(state->type)) in ice_pkg_enum_section()
244 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); in ice_pkg_enum_section()
248 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); in ice_pkg_enum_section()
256 state->sect_type = in ice_pkg_enum_section()
257 le32_to_cpu(state->buf->section_entry[state->sect_idx].type); in ice_pkg_enum_section()
260 state->sect = in ice_pkg_enum_section()
261 ((u8 *)state->buf) + in ice_pkg_enum_section()
262 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); in ice_pkg_enum_section()
264 return state->sect; in ice_pkg_enum_section()
276 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
307 state->entry_idx = 0; in ice_pkg_enum_entry()
308 state->handler = handler; in ice_pkg_enum_entry()
310 state->entry_idx++; in ice_pkg_enum_entry()
313 if (!state->handler) in ice_pkg_enum_entry()
317 entry = state->handler(state->sect_type, state->sect, state->entry_idx, in ice_pkg_enum_entry()
324 state->entry_idx = 0; in ice_pkg_enum_entry()
325 entry = state->handler(state->sect_type, state->sect, in ice_pkg_enum_entry()
326 state->entry_idx, offset); in ice_pkg_enum_entry()
350 if (index >= le16_to_cpu(fv_section->count)) in ice_sw_fv_handler()
358 *offset = le16_to_cpu(fv_section->base_offset) + index; in ice_sw_fv_handler()
359 return fv_section->fv + index; in ice_sw_fv_handler()
363 * ice_get_prof_index_max - get the max profile index for used profile
381 if (!hw->seg) in ice_get_prof_index_max()
382 return -EINVAL; in ice_get_prof_index_max()
384 ice_seg = hw->seg; in ice_get_prof_index_max()
396 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) in ice_get_prof_index_max()
397 if (fv->ew[j].prot_id != ICE_PROT_INVALID || in ice_get_prof_index_max()
398 fv->ew[j].off != ICE_FV_OFFSET_INVAL) in ice_get_prof_index_max()
407 hw->switch_info->max_used_prof_index = max_prof_index; in ice_get_prof_index_max()
413 * ice_get_ddp_pkg_state - get DDP pkg state after download
420 if (hw->pkg_ver.major == hw->active_pkg_ver.major && in ice_get_ddp_pkg_state()
421 hw->pkg_ver.minor == hw->active_pkg_ver.minor && in ice_get_ddp_pkg_state()
422 hw->pkg_ver.update == hw->active_pkg_ver.update && in ice_get_ddp_pkg_state()
423 hw->pkg_ver.draft == hw->active_pkg_ver.draft && in ice_get_ddp_pkg_state()
424 !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { in ice_get_ddp_pkg_state()
429 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || in ice_get_ddp_pkg_state()
430 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { in ice_get_ddp_pkg_state()
432 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && in ice_get_ddp_pkg_state()
433 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { in ice_get_ddp_pkg_state()
441 * ice_init_pkg_regs - initialize additional package registers
450 /* setup Switch block input mask, which is 48-bits in two parts */ in ice_init_pkg_regs()
459 * @index: index of the Marker PType TCAM entry to be returned
460 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
463 * Handles enumeration of individual Marker PType TCAM entries.
480 if (index >= le16_to_cpu(marker_ptype->count)) in ice_marker_ptype_tcam_handler()
483 return marker_ptype->tcam + index; in ice_marker_ptype_tcam_handler()
494 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { in ice_add_dvm_hint()
495 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; in ice_add_dvm_hint()
496 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; in ice_add_dvm_hint()
497 hw->dvm_upd.count++; in ice_add_dvm_hint()
509 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { in ice_add_tunnel_hint()
520 * character ('0' - '7') will be located where our in ice_add_tunnel_hint()
523 if ((label_name[len] - '0') == hw->pf_id) { in ice_add_tunnel_hint()
524 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; in ice_add_tunnel_hint()
525 hw->tnl.tbl[hw->tnl.count].valid = false; in ice_add_tunnel_hint()
526 hw->tnl.tbl[hw->tnl.count].boost_addr = val; in ice_add_tunnel_hint()
527 hw->tnl.tbl[hw->tnl.count].port = 0; in ice_add_tunnel_hint()
528 hw->tnl.count++; in ice_add_tunnel_hint()
560 if (index >= le16_to_cpu(labels->count)) in ice_label_enum_handler()
563 return labels->label + index; in ice_label_enum_handler()
592 *value = le16_to_cpu(label->value); in ice_enum_labels()
593 return label->name; in ice_enum_labels()
600 * @index: index of the boost TCAM entry to be returned
601 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
604 * Handles enumeration of individual boost TCAM entries.
624 if (index >= le16_to_cpu(boost->count)) in ice_boost_tcam_handler()
627 return boost->tcam + index; in ice_boost_tcam_handler()
632 * @ice_seg: pointer to the ice segment (non-NULL)
633 * @addr: Boost TCAM address of entry to search for
636 * Finds a particular Boost TCAM entry and returns a pointer to that entry
643 struct ice_boost_tcam_entry *tcam; in ice_find_boost_entry() local
649 return -EINVAL; in ice_find_boost_entry()
652 tcam = ice_pkg_enum_entry(ice_seg, &state, in ice_find_boost_entry()
655 if (tcam && le16_to_cpu(tcam->addr) == addr) { in ice_find_boost_entry()
656 *entry = tcam; in ice_find_boost_entry()
661 } while (tcam); in ice_find_boost_entry()
664 return -EIO; in ice_find_boost_entry()
668 * ice_is_init_pkg_successful - check if DDP init was successful
700 buf->data_end = in ice_pkg_buf_alloc()
732 * ice_get_sw_prof_type - determine switch profile type
751 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { in ice_get_sw_prof_type()
753 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && in ice_get_sw_prof_type()
754 fv->ew[i].off == ICE_VNI_OFFSET) in ice_get_sw_prof_type()
758 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) in ice_get_sw_prof_type()
766 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
785 ice_seg = hw->seg; in ice_get_sw_fv_bitmap()
830 if (!lkups->n_val_words || !hw->seg) in ice_get_sw_fv_list()
831 return -EINVAL; in ice_get_sw_fv_list()
833 ice_seg = hw->seg; in ice_get_sw_fv_list()
849 for (i = 0; i < lkups->n_val_words; i++) { in ice_get_sw_fv_list()
852 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) in ice_get_sw_fv_list()
853 if (fv->ew[j].prot_id == in ice_get_sw_fv_list()
854 lkups->fv_words[i].prot_id && in ice_get_sw_fv_list()
855 fv->ew[j].off == lkups->fv_words[i].off) in ice_get_sw_fv_list()
857 if (j >= hw->blk[ICE_BLK_SW].es.fvw) in ice_get_sw_fv_list()
859 if (i + 1 == lkups->n_val_words) { in ice_get_sw_fv_list()
864 fvl->fv_ptr = fv; in ice_get_sw_fv_list()
865 fvl->profile_id = offset; in ice_get_sw_fv_list()
866 list_add(&fvl->list_entry, fv_list); in ice_get_sw_fv_list()
874 return -EIO; in ice_get_sw_fv_list()
881 list_del(&fvl->list_entry); in ice_get_sw_fv_list()
885 return -ENOMEM; in ice_get_sw_fv_list()
889 * ice_init_prof_result_bm - Initialize the profile result index bitmap
900 if (!hw->seg) in ice_init_prof_result_bm()
903 ice_seg = hw->seg; in ice_init_prof_result_bm()
914 bitmap_zero(hw->switch_info->prof_res_bm[off], in ice_init_prof_result_bm()
922 if (fv->ew[i].prot_id == ICE_PROT_INVALID && in ice_init_prof_result_bm()
923 fv->ew[i].off == ICE_FV_OFFSET_INVAL) in ice_init_prof_result_bm()
924 set_bit(i, hw->switch_info->prof_res_bm[off]); in ice_init_prof_result_bm()
960 return -EINVAL; in ice_pkg_buf_reserve_section()
962 buf = (struct ice_buf_hdr *)&bld->buf; in ice_pkg_buf_reserve_section()
965 section_count = le16_to_cpu(buf->section_count); in ice_pkg_buf_reserve_section()
967 return -EIO; in ice_pkg_buf_reserve_section()
969 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) in ice_pkg_buf_reserve_section()
970 return -EIO; in ice_pkg_buf_reserve_section()
971 bld->reserved_section_table_entries += count; in ice_pkg_buf_reserve_section()
973 data_end = le16_to_cpu(buf->data_end) + in ice_pkg_buf_reserve_section()
975 buf->data_end = cpu_to_le16(data_end); in ice_pkg_buf_reserve_section()
1001 buf = (struct ice_buf_hdr *)&bld->buf; in ice_pkg_buf_alloc_section()
1004 data_end = le16_to_cpu(buf->data_end); in ice_pkg_buf_alloc_section()
1013 sect_count = le16_to_cpu(buf->section_count); in ice_pkg_buf_alloc_section()
1014 if (sect_count < bld->reserved_section_table_entries) { in ice_pkg_buf_alloc_section()
1017 buf->section_entry[sect_count].offset = cpu_to_le16(data_end); in ice_pkg_buf_alloc_section()
1018 buf->section_entry[sect_count].size = cpu_to_le16(size); in ice_pkg_buf_alloc_section()
1019 buf->section_entry[sect_count].type = cpu_to_le32(type); in ice_pkg_buf_alloc_section()
1022 buf->data_end = cpu_to_le16(data_end); in ice_pkg_buf_alloc_section()
1024 buf->section_count = cpu_to_le16(sect_count + 1); in ice_pkg_buf_alloc_section()
1075 * least one active section - otherwise, the buffer is not legal and should
1086 buf = (struct ice_buf_hdr *)&bld->buf; in ice_pkg_buf_get_active_sections()
1087 return le16_to_cpu(buf->section_count); in ice_pkg_buf_get_active_sections()
1101 return &bld->buf; in ice_pkg_buf()
1129 * 0 - Means the caller has acquired the global config lock
1131 * -EALREADY - Indicates another driver has already written the
1146 else if (status == -EALREADY) in ice_acquire_global_cfg_lock()
1196 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; in ice_aq_download_pkg()
1199 if (status == -EIO) { in ice_aq_download_pkg()
1205 *error_offset = le32_to_cpu(resp->error_offset); in ice_aq_download_pkg()
1207 *error_info = le32_to_cpu(resp->error_info); in ice_aq_download_pkg()
1214 * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
1220 return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF; in ice_is_buffer_metadata()
1224 * struct ice_ddp_send_ctx - sending context of current DDP segment
1239 ctx->err = err; in ice_ddp_send_ctx_set_err()
1243 * ice_ddp_send_hunk - send one hunk of data to FW
1259 struct ice_buf_hdr *prev_hunk = ctx->hdr; in ice_ddp_send_hunk()
1260 struct ice_hw *hw = ctx->hw; in ice_ddp_send_hunk()
1266 if (ctx->err) in ice_ddp_send_hunk()
1267 return ctx->err; in ice_ddp_send_hunk()
1269 ctx->hdr = hunk; in ice_ddp_send_hunk()
1280 aq_err = hw->adminq.sq_last_status; in ice_ddp_send_hunk()
1288 ctx->err = ice_map_aq_err_to_ddp_state(aq_err); in ice_ddp_send_hunk()
1294 return ctx->err; in ice_ddp_send_hunk()
1346 if (idx < le32_to_cpu(pkg_hdr->seg_count)) in ice_get_pkg_seg_by_idx()
1349 le32_to_cpu(pkg_hdr->seg_offset[idx])); in ice_get_pkg_seg_by_idx()
1355 * ice_is_signing_seg_at_idx - determine if segment is a signing segment
1367 return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; in ice_is_signing_seg_at_idx()
1390 if (seg && le32_to_cpu(seg->seg_id) == seg_id && in ice_is_signing_seg_type_at_idx()
1391 le32_to_cpu(seg->sign_type) == sign_type) in ice_is_signing_seg_type_at_idx()
1398 * ice_download_pkg_sig_seg - download a signature segment
1405 return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0, in ice_download_pkg_sig_seg()
1406 le32_to_cpu(seg->buf_tbl.buf_count)); in ice_download_pkg_sig_seg()
1410 * ice_download_pkg_config_seg - download a config segment
1433 buf_count = le32_to_cpu(bufs->buf_count); in ice_download_pkg_config_seg()
1438 return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count); in ice_download_pkg_config_seg()
1448 * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
1470 count = le32_to_cpu(seg->signed_buf_count); in ice_dwnld_sign_and_cfg_segs()
1475 conf_idx = le32_to_cpu(seg->signed_seg_idx); in ice_dwnld_sign_and_cfg_segs()
1476 start = le32_to_cpu(seg->signed_buf_start); in ice_dwnld_sign_and_cfg_segs()
1484 flags = le32_to_cpu(seg->flags); in ice_dwnld_sign_and_cfg_segs()
1492 * ice_match_signing_seg - determine if a matching signing segment exists
1502 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { in ice_match_signing_seg()
1512 * ice_post_dwnld_pkg_actions - perform post download package actions
1540 enum ice_aq_err aq_err = hw->adminq.sq_last_status; in ice_download_pkg_with_sig_seg()
1546 ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); in ice_download_pkg_with_sig_seg()
1547 ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); in ice_download_pkg_with_sig_seg()
1551 if (status == -EALREADY) in ice_download_pkg_with_sig_seg()
1558 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { in ice_download_pkg_with_sig_seg()
1559 if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, in ice_download_pkg_with_sig_seg()
1560 hw->pkg_sign_type)) in ice_download_pkg_with_sig_seg()
1606 if (status == -EALREADY) in ice_dwnld_cfg_bufs()
1608 return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); in ice_dwnld_cfg_bufs()
1635 ice_seg->hdr.seg_format_ver.major, in ice_download_pkg_without_sig_seg()
1636 ice_seg->hdr.seg_format_ver.minor, in ice_download_pkg_without_sig_seg()
1637 ice_seg->hdr.seg_format_ver.update, in ice_download_pkg_without_sig_seg()
1638 ice_seg->hdr.seg_format_ver.draft); in ice_download_pkg_without_sig_seg()
1641 le32_to_cpu(ice_seg->hdr.seg_type), in ice_download_pkg_without_sig_seg()
1642 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); in ice_download_pkg_without_sig_seg()
1647 le32_to_cpu(ice_buf_tbl->buf_count)); in ice_download_pkg_without_sig_seg()
1649 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, in ice_download_pkg_without_sig_seg()
1650 le32_to_cpu(ice_buf_tbl->buf_count)); in ice_download_pkg_without_sig_seg()
1667 if (hw->pkg_has_signing_seg) in ice_download_pkg()
1727 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; in ice_aq_update_pkg()
1730 if (status == -EIO) { in ice_aq_update_pkg()
1736 *error_offset = le32_to_cpu(resp->error_offset); in ice_aq_update_pkg()
1738 *error_info = le32_to_cpu(resp->error_info); in ice_aq_update_pkg()
1780 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), in ice_update_pkg_no_lock()
1834 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, in ice_find_seg_in_pkg()
1835 pkg_hdr->pkg_format_ver.update, in ice_find_seg_in_pkg()
1836 pkg_hdr->pkg_format_ver.draft); in ice_find_seg_in_pkg()
1839 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { in ice_find_seg_in_pkg()
1842 seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]); in ice_find_seg_in_pkg()
1844 if (le32_to_cpu(seg->seg_type) == seg_type) in ice_find_seg_in_pkg()
1852 * ice_has_signing_seg - determine if package has a signing segment
1867 * ice_get_pkg_segment_id - get correct package segment id, based on device
1889 * ice_get_pkg_sign_type - get package segment sign type, based on device
1913 * ice_get_signing_req - get correct package requirements, based on device
1918 hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); in ice_get_signing_req()
1919 hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); in ice_get_signing_req()
1937 hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); in ice_init_pkg_info()
1941 hw->pkg_seg_id); in ice_init_pkg_info()
1944 ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); in ice_init_pkg_info()
1960 hw->pkg_ver = meta->ver; in ice_init_pkg_info()
1961 memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); in ice_init_pkg_info()
1964 meta->ver.major, meta->ver.minor, meta->ver.update, in ice_init_pkg_info()
1965 meta->ver.draft, meta->name); in ice_init_pkg_info()
1967 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; in ice_init_pkg_info()
1968 memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); in ice_init_pkg_info()
1971 seg_hdr->seg_format_ver.major, in ice_init_pkg_info()
1972 seg_hdr->seg_format_ver.minor, in ice_init_pkg_info()
1973 seg_hdr->seg_format_ver.update, in ice_init_pkg_info()
1974 seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); in ice_init_pkg_info()
2000 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { in ice_get_pkg_info()
2005 if (pkg_info->pkg_info[i].is_active) { in ice_get_pkg_info()
2007 hw->active_pkg_ver = pkg_info->pkg_info[i].ver; in ice_get_pkg_info()
2008 hw->active_track_id = in ice_get_pkg_info()
2009 le32_to_cpu(pkg_info->pkg_info[i].track_id); in ice_get_pkg_info()
2010 memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, in ice_get_pkg_info()
2011 sizeof(pkg_info->pkg_info[i].name)); in ice_get_pkg_info()
2012 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; in ice_get_pkg_info()
2014 if (pkg_info->pkg_info[i].is_active_at_boot) in ice_get_pkg_info()
2016 if (pkg_info->pkg_info[i].is_modified) in ice_get_pkg_info()
2018 if (pkg_info->pkg_info[i].is_in_nvm) in ice_get_pkg_info()
2022 pkg_info->pkg_info[i].ver.major, in ice_get_pkg_info()
2023 pkg_info->pkg_info[i].ver.minor, in ice_get_pkg_info()
2024 pkg_info->pkg_info[i].ver.update, in ice_get_pkg_info()
2025 pkg_info->pkg_info[i].ver.draft, in ice_get_pkg_info()
2026 pkg_info->pkg_info[i].name, flags); in ice_get_pkg_info()
2051 state = ice_chk_pkg_version(&hw->pkg_ver); in ice_chk_pkg_compat()
2058 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, in ice_chk_pkg_compat()
2069 for (i = 0; i < le32_to_cpu(pkg->count); i++) { in ice_chk_pkg_compat()
2071 if (!pkg->pkg_info[i].is_in_nvm) in ice_chk_pkg_compat()
2073 if ((*seg)->hdr.seg_format_ver.major != in ice_chk_pkg_compat()
2074 pkg->pkg_info[i].ver.major || in ice_chk_pkg_compat()
2075 (*seg)->hdr.seg_format_ver.minor > in ice_chk_pkg_compat()
2076 pkg->pkg_info[i].ver.minor) { in ice_chk_pkg_compat()
2091 * @ice_seg: pointer to the segment of the package scan (non-NULL)
2105 memset(&hw->tnl, 0, sizeof(hw->tnl)); in ice_init_pkg_hints()
2130 /* Cache the appropriate boost TCAM entry pointers for tunnels */ in ice_init_pkg_hints()
2131 for (i = 0; i < hw->tnl.count; i++) { in ice_init_pkg_hints()
2132 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, in ice_init_pkg_hints()
2133 &hw->tnl.tbl[i].boost_entry); in ice_init_pkg_hints()
2134 if (hw->tnl.tbl[i].boost_entry) { in ice_init_pkg_hints()
2135 hw->tnl.tbl[i].valid = true; in ice_init_pkg_hints()
2136 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) in ice_init_pkg_hints()
2137 hw->tnl.valid_count[hw->tnl.tbl[i].type]++; in ice_init_pkg_hints()
2141 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ in ice_init_pkg_hints()
2142 for (i = 0; i < hw->dvm_upd.count; i++) in ice_init_pkg_hints()
2143 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, in ice_init_pkg_hints()
2144 &hw->dvm_upd.tbl[i].boost_entry); in ice_init_pkg_hints()
2148 * ice_fill_hw_ptype - fill the enabled PTYPE bit information
2153 struct ice_marker_ptype_tcam_entry *tcam; in ice_fill_hw_ptype() local
2154 struct ice_seg *seg = hw->seg; in ice_fill_hw_ptype()
2157 bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); in ice_fill_hw_ptype()
2164 tcam = ice_pkg_enum_entry(seg, &state, in ice_fill_hw_ptype()
2167 if (tcam && in ice_fill_hw_ptype()
2168 le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && in ice_fill_hw_ptype()
2169 le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) in ice_fill_hw_ptype()
2170 set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); in ice_fill_hw_ptype()
2173 } while (tcam); in ice_fill_hw_ptype()
2177 * ice_init_pkg - initialize/download package
2225 if (hw->pkg_has_signing_seg && in ice_init_pkg()
2226 !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) in ice_init_pkg()
2241 "package previously loaded - no work.\n"); in ice_init_pkg()
2255 hw->seg = seg; in ice_init_pkg()
2272 * ice_copy_and_init_pkg - initialize/download a copy of the package
2311 hw->pkg_copy = buf_copy; in ice_copy_and_init_pkg()
2312 hw->pkg_size = len; in ice_copy_and_init_pkg()
2319 * ice_get_set_tx_topo - get or set Tx topology
2325 * @set: 0-get, 1-set topology
2342 cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; in ice_get_set_tx_topo()
2345 cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | in ice_get_set_tx_topo()
2352 cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; in ice_get_set_tx_topo()
2369 * ice_cfg_tx_topo - Initialize new Tx topology if available
2392 return -EINVAL; in ice_cfg_tx_topo()
2395 if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { in ice_cfg_tx_topo()
2397 return -EOPNOTSUPP; in ice_cfg_tx_topo()
2402 return -ENOMEM; in ice_cfg_tx_topo()
2415 hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { in ice_cfg_tx_topo()
2417 return -EEXIST; in ice_cfg_tx_topo()
2422 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { in ice_cfg_tx_topo()
2424 return -EEXIST; in ice_cfg_tx_topo()
2432 return -EEXIST; in ice_cfg_tx_topo()
2437 hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { in ice_cfg_tx_topo()
2447 return -EIO; in ice_cfg_tx_topo()
2455 return -EIO; in ice_cfg_tx_topo()
2458 if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { in ice_cfg_tx_topo()
2460 seg->buf_table.buf_count); in ice_cfg_tx_topo()
2461 return -EIO; in ice_cfg_tx_topo()
2464 section = ice_pkg_val_buf(seg->buf_table.buf_array); in ice_cfg_tx_topo()
2465 if (!section || le32_to_cpu(section->section_entry[0].type) != in ice_cfg_tx_topo()
2468 return -EIO; in ice_cfg_tx_topo()
2471 size = le16_to_cpu(section->section_entry[0].size); in ice_cfg_tx_topo()
2472 offset = le16_to_cpu(section->section_entry[0].offset); in ice_cfg_tx_topo()
2475 return -EIO; in ice_cfg_tx_topo()
2481 return -EIO; in ice_cfg_tx_topo()
2503 /* Reset is in progress, re-init the HW again */ in ice_cfg_tx_topo()