Lines Matching +full:fw +full:- +full:cfg
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
11 #include "iwl-drv.h"
15 #include "iwl-io.h"
16 #include "iwl-prph.h"
17 #include "iwl-csr.h"
18 #include "iwl-fh.h"
20 * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
37 u8 *pos = (void *)(*dump_data)->data;
42 if (!iwl_trans_grab_nic_access(fwrt->trans))
45 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
46 (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
52 iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
53 *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
60 iwl_trans_release_nic_access(fwrt->trans);
72 fifo_hdr = (void *)(*dump_data)->data;
73 fifo_data = (void *)fifo_hdr->data;
81 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
82 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
84 fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
85 fifo_hdr->available_bytes =
86 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
88 fifo_hdr->wr_ptr =
89 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
91 fifo_hdr->rd_ptr =
92 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
94 fifo_hdr->fence_ptr =
95 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
97 fifo_hdr->fence_mode =
98 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
102 iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
104 iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
106 iwl_trans_write_prph(fwrt->trans,
112 fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
127 fifo_hdr = (void *)(*dump_data)->data;
128 fifo_data = (void *)fifo_hdr->data;
136 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
137 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
139 fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
140 fifo_hdr->available_bytes =
141 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
143 fifo_hdr->wr_ptr =
144 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
146 fifo_hdr->rd_ptr =
147 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
149 fifo_hdr->fence_ptr =
150 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
152 fifo_hdr->fence_mode =
153 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
157 iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
160 /* Dummy-read to advance the read pointer to the head */
161 iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
165 fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
169 if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
170 fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
179 struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
183 if (!iwl_trans_grab_nic_access(fwrt->trans))
189 cfg->lmac[0].rxfifo1_size, 0, 0);
191 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
193 fwrt->trans->trans_cfg->umac_prph_offset, 1);
195 if (fwrt->smem_cfg.num_lmacs > 1)
197 cfg->lmac[1].rxfifo1_size,
201 iwl_trans_release_nic_access(fwrt->trans);
208 struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
215 if (!iwl_trans_grab_nic_access(fwrt->trans))
220 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
222 iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
224 cfg->lmac[0].txfifo_size[i], 0, i);
228 if (fwrt->smem_cfg.num_lmacs > 1) {
229 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
232 iwl_trans_write_prph(fwrt->trans,
236 cfg->lmac[1].txfifo_size[i],
238 i + cfg->num_txfifo_entries);
244 fw_has_capa(&fwrt->fw->ucode_capa,
248 i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
250 fifo_hdr = (void *)(*dump_data)->data;
251 fifo_data = (void *)fifo_hdr->data;
252 fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
259 (*dump_data)->type =
261 (*dump_data)->len =
264 fifo_hdr->fifo_num = cpu_to_le32(i);
267 iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
268 fwrt->smem_cfg.num_txfifo_entries);
270 fifo_hdr->available_bytes =
271 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
273 fifo_hdr->wr_ptr =
274 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
276 fifo_hdr->rd_ptr =
277 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
279 fifo_hdr->fence_ptr =
280 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
282 fifo_hdr->fence_mode =
283 cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
287 iwl_trans_write_prph(fwrt->trans,
291 /* Dummy-read to advance the read pointer to head */
292 iwl_trans_read_prph(fwrt->trans,
299 iwl_trans_read_prph(fwrt->trans,
305 iwl_trans_release_nic_access(fwrt->trans);
527 struct iwl_trans *trans = fwrt->trans;
542 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
545 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
546 (*data)->len = cpu_to_le32(sizeof(*prph) +
548 prph = (void *)(*data)->data;
549 prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
553 iwl_prph_dump_addr[i].end -
555 (void *)prph->data);
564 * alloc_sgtable - allocates scallerlist table in the given size,
595 size -= PAGE_SIZE;
614 iwl_prph_dump_addr[i].end -
630 if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
633 } else if (fwrt->trans->trans_cfg->device_family >=
641 if (fwrt->trans->trans_cfg->mq_rx_supported) {
657 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
658 (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
659 dump_mem = (void *)(*dump_data)->data;
660 dump_mem->type = cpu_to_le32(type);
661 dump_mem->offset = cpu_to_le32(ofs);
662 iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
665 if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
666 fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs,
667 dump_mem->data, len);
669 IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
688 ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
691 if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
692 mem_cfg->num_lmacs = MAX_NUM_LMAC;
694 for (i = 0; i < mem_cfg->num_lmacs; i++)
695 ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
712 if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
713 mem_cfg->num_lmacs = MAX_NUM_LMAC;
715 for (i = 0; i < mem_cfg->num_lmacs; i++) {
718 for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
719 ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
725 fw_has_capa(&fwrt->fw->ucode_capa,
729 for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
730 ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
742 for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
745 fwrt->fw_paging_db[i].fw_paging_block;
746 dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
748 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
749 (*data)->len = cpu_to_le32(sizeof(*paging) +
751 paging = (void *)(*data)->data;
752 paging->index = cpu_to_le32(i);
753 dma_sync_single_for_cpu(fwrt->trans->dev, addr,
756 memcpy(paging->data, page_address(pages),
758 dma_sync_single_for_device(fwrt->trans->dev, addr,
763 if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
764 fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
765 fwrt->fw_paging_db[i].fw_offs,
766 paging->data,
782 const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
783 struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
785 u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
786 u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
787 0 : fwrt->trans->cfg->dccm2_len;
790 /* SRAM - include stack CCM if driver knows the values for it */
791 if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
794 if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
796 img = &fwrt->fw->img[fwrt->cur_fw_img];
797 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
798 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
800 sram_ofs = fwrt->trans->cfg->dccm_offset;
801 sram_len = fwrt->trans->cfg->dccm_len;
805 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
814 if (fwrt->trans->trans_cfg->device_family ==
832 if (!fwrt->fw->dbg.n_mem_tlv)
839 for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
843 /* Make room for fw's virtual image pages, if it exists */
845 file_len += fwrt->num_of_paging_blk *
850 if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
852 fwrt->trans->cfg->d3_debug_data_length * 2;
856 if (data->monitor_only) {
862 data->desc)
864 data->desc->len;
870 fw_error_dump->fwrt_ptr = dump_file;
872 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
873 dump_data = (void *)dump_file->data;
876 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
877 dump_data->len = cpu_to_le32(sizeof(*dump_info));
878 dump_info = (void *)dump_data->data;
879 dump_info->hw_type =
880 cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
881 dump_info->hw_step =
882 cpu_to_le32(fwrt->trans->hw_rev_step);
883 memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
884 sizeof(dump_info->fw_human_readable));
885 strscpy_pad(dump_info->dev_human_readable, fwrt->trans->name,
886 sizeof(dump_info->dev_human_readable));
888 strscpy_pad(dump_info->bus_human_readable, fwrt->dev->bus->name,
889 sizeof(dump_info->bus_human_readable));
891 strscpy_pad(dump_info->bus_human_readable, "<bus>",
892 sizeof(dump_info->bus_human_readable));
894 dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
895 dump_info->lmac_err_id[0] =
896 cpu_to_le32(fwrt->dump.lmac_err_id[0]);
897 if (fwrt->smem_cfg.num_lmacs > 1)
898 dump_info->lmac_err_id[1] =
899 cpu_to_le32(fwrt->dump.lmac_err_id[1]);
900 dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
907 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
908 dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
909 dump_smem_cfg = (void *)dump_data->data;
910 dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
911 dump_smem_cfg->num_txfifo_entries =
912 cpu_to_le32(mem_cfg->num_txfifo_entries);
915 u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
918 dump_smem_cfg->lmac[i].txfifo_size[j] =
920 dump_smem_cfg->lmac[i].rxfifo1_size =
921 cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
923 dump_smem_cfg->rxfifo2_size =
924 cpu_to_le32(mem_cfg->rxfifo2_size);
925 dump_smem_cfg->internal_txfifo_addr =
926 cpu_to_le32(mem_cfg->internal_txfifo_addr);
928 dump_smem_cfg->internal_txfifo_size[i] =
929 cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
935 /* We only dump the FIFOs if the FW is in error state */
945 data->desc) {
946 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
947 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
948 data->desc->len);
949 dump_trig = (void *)dump_data->data;
950 memcpy(dump_trig, &data->desc->trig_desc,
951 sizeof(*dump_trig) + data->desc->len);
957 if (data->monitor_only)
962 fwrt->fw->dbg.mem_tlv;
964 if (!fwrt->fw->dbg.n_mem_tlv)
968 for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
977 fwrt->trans->cfg->smem_offset,
981 fwrt->trans->cfg->dccm2_offset,
985 if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
986 u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
987 size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
989 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
990 dump_data->len = cpu_to_le32(data_size * 2);
992 memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
994 kfree(fwrt->dump.d3_debug_data);
995 fwrt->dump.d3_debug_data = NULL;
997 iwl_trans_read_mem_bytes(fwrt->trans, addr,
998 dump_data->data + data_size,
1001 if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
1002 fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr,
1003 dump_data->data + data_size,
1009 /* Dump fw's virtual image */
1017 dump_file->file_len = cpu_to_le32(file_len);
1022 * struct iwl_dump_ini_region_data - region data
1036 __le32 *val = range->data;
1039 range->internal_base_addr = cpu_to_le32(addr);
1040 range->range_data_size = size;
1042 *val++ = cpu_to_le32(iwl_read_prph(fwrt->trans, addr + i));
1044 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1052 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1053 u32 addr = le32_to_cpu(reg->addrs[idx]) +
1054 le32_to_cpu(reg->dev_addr.offset);
1057 reg->dev_addr.size);
1065 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1066 struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs;
1067 u32 addr = le32_to_cpu(reg->dev_addr_range.offset) +
1079 __le32 *val = range->data;
1087 range->internal_base_addr = cpu_to_le32(addr);
1088 range->range_data_size = size;
1090 if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
1096 if (!iwl_trans_grab_nic_access(fwrt->trans))
1097 return -EBUSY;
1100 dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr);
1110 iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr,
1112 prph_val = iwl_read_prph_no_grab(fwrt->trans,
1117 iwl_trans_release_nic_access(fwrt->trans);
1118 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1126 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1127 u32 addr = le32_to_cpu(reg->addrs[idx]);
1130 reg->dev_addr.size,
1131 reg->dev_addr.offset);
1139 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1140 struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs;
1145 reg->dev_addr_range.offset);
1152 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1154 __le32 *val = range->data;
1155 u32 addr = le32_to_cpu(reg->addrs[idx]) +
1156 le32_to_cpu(reg->dev_addr.offset);
1159 range->internal_base_addr = cpu_to_le32(addr);
1160 range->range_data_size = reg->dev_addr.size;
1161 for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4)
1162 *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
1164 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1171 struct iwl_trans *trans = fwrt->trans;
1172 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1174 __le32 *val = range->data;
1175 u32 addr = le32_to_cpu(reg->addrs[idx]) +
1176 le32_to_cpu(reg->dev_addr.offset);
1179 range->internal_base_addr = cpu_to_le32(addr);
1180 range->range_data_size = reg->dev_addr.size;
1181 for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
1192 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1199 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1201 u32 addr = le32_to_cpu(reg->addrs[idx]) +
1202 le32_to_cpu(reg->dev_addr.offset);
1204 range->internal_base_addr = cpu_to_le32(addr);
1205 range->range_data_size = reg->dev_addr.size;
1206 iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
1207 le32_to_cpu(reg->dev_addr.size));
1209 if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM &&
1210 fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
1211 fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
1212 range->data,
1213 le32_to_cpu(reg->dev_addr.size));
1215 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1221 struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
1223 dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
1224 u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
1226 range->page_num = cpu_to_le32(idx);
1227 range->range_data_size = cpu_to_le32(page_size);
1228 dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
1230 memcpy(range->data, page_address(page), page_size);
1231 dma_sync_single_for_device(fwrt->trans->dev, addr, page_size,
1234 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1247 if (!fwrt->trans->trans_cfg->gen2)
1251 page_size = fwrt->trans->init_dram.paging[idx].size;
1253 range->page_num = cpu_to_le32(idx);
1254 range->range_data_size = cpu_to_le32(page_size);
1255 memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
1258 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1266 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1269 u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
1271 frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx];
1273 range->dram_base_addr = cpu_to_le64(frag->physical);
1274 range->range_data_size = cpu_to_le32(frag->size);
1276 memcpy(range->data, frag->block, frag->size);
1278 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1285 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1287 u32 addr = le32_to_cpu(reg->internal_buffer.base_addr);
1289 range->internal_base_addr = cpu_to_le32(addr);
1290 range->range_data_size = reg->internal_buffer.size;
1291 iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
1292 le32_to_cpu(reg->internal_buffer.size));
1294 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1300 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1301 struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
1302 struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
1303 int txf_num = cfg->num_txfifo_entries;
1304 int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size);
1305 u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]);
1308 if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) {
1310 le32_to_cpu(reg->fifos.offset));
1314 iter->internal_txf = 0;
1315 iter->fifo_size = 0;
1316 iter->fifo = -1;
1317 if (le32_to_cpu(reg->fifos.offset))
1318 iter->lmac = 1;
1320 iter->lmac = 0;
1323 if (!iter->internal_txf) {
1324 for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
1325 iter->fifo_size =
1326 cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
1327 if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
1330 iter->fifo--;
1333 iter->internal_txf = 1;
1335 if (!fw_has_capa(&fwrt->fw->ucode_capa,
1339 for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) {
1340 iter->fifo_size =
1341 cfg->internal_txfifo_size[iter->fifo - txf_num];
1342 if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
1353 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1355 struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
1356 struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
1357 u32 offs = le32_to_cpu(reg->fifos.offset), addr;
1358 u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
1364 return -EIO;
1366 if (!iwl_trans_grab_nic_access(fwrt->trans))
1367 return -EBUSY;
1369 range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
1370 range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
1371 range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
1373 iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
1380 addr = le32_to_cpu(reg->addrs[i]) + offs;
1382 reg_dump->addr = cpu_to_le32(addr);
1383 reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
1389 if (reg->fifos.hdr_only) {
1390 range->range_data_size = cpu_to_le32(registers_size);
1395 iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs,
1398 /* Dummy-read to advance the read pointer to the head */
1399 iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs);
1404 for (i = 0; i < iter->fifo_size; i += sizeof(*data))
1405 *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
1407 if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
1408 fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
1409 reg_dump, iter->fifo_size);
1412 iwl_trans_release_nic_access(fwrt->trans);
1414 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1422 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1424 __le32 *val = range->data;
1425 __le32 offset = reg->dev_addr.offset;
1427 u32 addr = le32_to_cpu(reg->addrs[idx]);
1431 range->internal_base_addr = cpu_to_le32(addr);
1432 range->range_data_size = reg->dev_addr.size;
1434 if (!iwl_trans_grab_nic_access(fwrt->trans))
1435 return -EBUSY;
1440 dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr);
1442 for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
1450 iwl_write_prph_no_grab(fwrt->trans, indirect_rd_wr_addr,
1454 prph_val = iwl_read_prph_no_grab(fwrt->trans,
1460 iwl_trans_release_nic_access(fwrt->trans);
1461 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1474 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1475 u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
1476 u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
1490 fifo_idx = ffs(fid1) - 1;
1495 data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size;
1496 data->fifo_num = fifo_idx;
1500 fifo_idx = ffs(fid2) - 1;
1501 if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
1514 data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT;
1518 data->size = fwrt->smem_cfg.rxfifo2_size;
1519 data->offset = iwl_umac_prph(fwrt->trans,
1523 data->size = fwrt->smem_cfg.rxfifo2_control_size;
1524 data->offset = iwl_umac_prph(fwrt->trans,
1535 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1538 struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
1539 u32 offs = le32_to_cpu(reg->fifos.offset), addr;
1540 u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
1550 return -EIO;
1552 if (!iwl_trans_grab_nic_access(fwrt->trans))
1553 return -EBUSY;
1555 range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
1556 range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
1557 range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
1564 addr = le32_to_cpu(reg->addrs[i]) + offs;
1566 reg_dump->addr = cpu_to_le32(addr);
1567 reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
1573 if (reg->fifos.hdr_only) {
1574 range->range_data_size = cpu_to_le32(registers_size);
1581 iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
1583 iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1);
1585 iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs,
1592 *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
1595 iwl_trans_release_nic_access(fwrt->trans);
1597 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1605 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1606 struct iwl_fw_ini_region_err_table *err_table = ®->err_table;
1608 u32 addr = le32_to_cpu(err_table->base_addr) +
1609 le32_to_cpu(err_table->offset);
1611 range->internal_base_addr = cpu_to_le32(addr);
1612 range->range_data_size = err_table->size;
1613 iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
1614 le32_to_cpu(err_table->size));
1616 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1624 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1626 ®->special_mem;
1629 u32 addr = le32_to_cpu(special_mem->base_addr) +
1630 le32_to_cpu(special_mem->offset);
1632 range->internal_base_addr = cpu_to_le32(addr);
1633 range->range_data_size = special_mem->size;
1634 iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
1635 le32_to_cpu(special_mem->size));
1637 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1645 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1647 __le32 *val = range->data;
1651 if (!iwl_trans_grab_nic_access(fwrt->trans))
1652 return -EBUSY;
1654 range->range_data_size = reg->dev_addr.size;
1655 for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
1656 prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
1660 iwl_trans_release_nic_access(fwrt->trans);
1661 return -EBUSY;
1665 iwl_trans_release_nic_access(fwrt->trans);
1666 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1674 struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
1678 return -EIO;
1682 memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr));
1683 range->range_data_size = cpu_to_le32(pkt_len);
1685 memcpy(range->data, pkt->data, pkt_len);
1687 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1696 u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
1697 u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
1698 u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
1699 u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
1702 range->range_data_size = cpu_to_le32(size_to_dump);
1703 if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
1706 return -1;
1709 fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
1710 fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
1712 iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
1714 return sizeof(*range) + le32_to_cpu(range->range_data_size);
1724 dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
1726 return dump->data;
1730 * mask_apply_and_normalize - applies mask on val and normalize the result
1741 return (val & mask) >> (ffs(mask) - 1);
1752 offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100;
1754 if (!reg_info || !reg_info->addr || !reg_info->mask)
1757 val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs);
1759 return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask));
1767 if (!iwl_trans_grab_nic_access(fwrt->trans)) {
1772 data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
1773 &addrs->write_ptr);
1774 if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1775 u32 wrt_ptr = le32_to_cpu(data->write_ptr);
1777 data->write_ptr = cpu_to_le32(wrt_ptr >> 2);
1779 data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id,
1780 &addrs->cycle_cnt);
1781 data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
1782 &addrs->cur_frag);
1784 iwl_trans_release_nic_access(fwrt->trans);
1786 data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
1788 return data->data;
1797 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1798 u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
1801 &fwrt->trans->cfg->mon_dram_regs);
1810 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1811 u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
1814 &fwrt->trans->cfg->mon_smem_regs);
1828 &fwrt->trans->cfg->mon_dbgi_regs);
1836 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1839 dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
1840 dump->version = reg->err_table.version;
1842 return dump->data;
1850 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1853 dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
1854 dump->type = reg->special_mem.type;
1855 dump->version = reg->special_mem.version;
1857 return dump->data;
1867 dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
1869 return dump->data;
1875 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1877 return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
1884 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1887 return iwl_tlv_array_len_with_size(reg_data->reg_tlv, reg, size);
1893 if (fwrt->trans->trans_cfg->gen2) {
1894 if (fwrt->trans->init_dram.paging_cnt)
1895 return fwrt->trans->init_dram.paging_cnt - 1;
1900 return fwrt->num_of_paging_blk;
1907 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1909 u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
1912 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
1914 for (i = 0; i < fw_mon->num_frags; i++) {
1915 if (!fw_mon->frags[i].size)
1947 u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
1948 u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
1949 u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
1964 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1965 u32 size = le32_to_cpu(reg->dev_addr.size);
1979 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
1980 struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs;
2005 if (fwrt->trans->trans_cfg->gen2)
2006 size += fwrt->trans->init_dram.paging[i].size;
2008 size += fwrt->fw_paging_db[i].fw_paging_size;
2018 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2020 u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
2023 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
2025 for (i = 0; i < fw_mon->num_frags; i++) {
2026 struct iwl_dram_data *frag = &fw_mon->frags[i];
2028 if (!frag->size)
2031 size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size;
2044 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2047 size = le32_to_cpu(reg->internal_buffer.size);
2060 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2061 u32 size = le32_to_cpu(reg->dev_addr.size);
2074 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2075 struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
2076 u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
2084 if (!reg->fifos.hdr_only)
2085 size += iter->fifo_size;
2097 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2099 u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
2104 if (reg->fifos.hdr_only)
2120 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2121 u32 size = le32_to_cpu(reg->err_table.size);
2134 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2135 u32 size = le32_to_cpu(reg->special_mem.size);
2150 if (!reg_data->dump_data->fw_pkt)
2153 size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt);
2166 u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
2167 u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
2168 u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
2187 * struct iwl_dump_ini_mem_ops - ini memory dump operations
2209 * iwl_dump_ini_mem - dump memory region
2211 * @fwrt: fw runtime struct
2224 struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
2228 u32 type = reg->type;
2229 u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
2239 if (le32_to_cpu(reg->hdr.version) >= 2) {
2240 u32 dp = le32_get_bits(reg->id,
2246 "WRT: no dump - type %d and policy mismatch=%d\n",
2252 "WRT: no dump - type %d and policy mismatch=%d\n",
2258 "WRT: no dump - type %d and policy mismatch=%d\n",
2264 if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
2265 !ops->fill_range) {
2270 size = ops->get_size(fwrt, reg_data);
2281 entry->size = sizeof(*tlv) + size;
2283 tlv = (void *)entry->data;
2284 tlv->type = reg->type;
2285 tlv->sub_type = reg->sub_type;
2286 tlv->sub_type_ver = reg->sub_type_ver;
2287 tlv->reserved = reg->reserved;
2288 tlv->len = cpu_to_le32(size);
2290 num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
2292 header = (void *)tlv->data;
2293 header->region_id = cpu_to_le32(id);
2294 header->num_of_ranges = cpu_to_le32(num_of_ranges);
2295 header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
2296 memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
2299 range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
2307 header_size = range - (u8 *)header;
2322 free_size -= header_size;
2325 int range_size = ops->fill_range(fwrt, reg_data, range,
2342 free_size -= range_size;
2346 list_add_tail(&entry->list, list);
2348 return entry->size;
2369 list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
2378 entry->size = size;
2380 tlv = (void *)entry->data;
2381 tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE);
2382 tlv->len = cpu_to_le32(size - sizeof(*tlv));
2384 dump = (void *)tlv->data;
2386 dump->version = cpu_to_le32(IWL_INI_DUMP_VER);
2387 dump->time_point = trigger->time_point;
2388 dump->trigger_reason = trigger->trigger_reason;
2389 dump->external_cfg_state =
2390 cpu_to_le32(fwrt->trans->dbg.external_ini_cfg);
2392 dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
2393 dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
2395 dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step);
2401 hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
2403 u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
2415 dump->hw_type = cpu_to_le32(hw_type);
2417 dump->rf_id_flavor =
2418 cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
2419 dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id));
2420 dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id));
2421 dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id));
2423 dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major);
2424 dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor);
2425 dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major);
2426 dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
2428 dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
2429 dump->regions_mask = trigger->regions_mask &
2430 ~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk);
2432 dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
2433 memcpy(dump->build_tag, fwrt->fw->human_readable,
2434 sizeof(dump->build_tag));
2436 cfg_name = dump->cfg_names;
2437 dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names);
2438 list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
2440 (void *)node->tlv.data;
2442 BUILD_BUG_ON(sizeof(cfg_name->cfg_name) !=
2443 sizeof(debug_info->debug_cfg_name));
2445 cfg_name->image_type = debug_info->image_type;
2446 cfg_name->cfg_name_len =
2447 cpu_to_le32(sizeof(cfg_name->cfg_name));
2448 memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
2449 sizeof(cfg_name->cfg_name));
2456 list_add(&entry->list, list);
2458 return entry->size;
2466 u32 len = strnlen(fwrt->trans->dbg.dump_file_name_ext,
2469 if (!fwrt->trans->dbg.dump_file_name_ext_valid)
2476 entry->size = sizeof(*tlv) + len;
2478 tlv = (void *)entry->data;
2479 tlv->type = cpu_to_le32(IWL_INI_DUMP_NAME_TYPE);
2480 tlv->len = cpu_to_le32(len);
2481 memcpy(tlv->data, fwrt->trans->dbg.dump_file_name_ext, len);
2484 list_add_tail(&entry->list, list);
2486 fwrt->trans->dbg.dump_file_name_ext_valid = false;
2488 return entry->size;
2614 struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
2615 enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
2624 u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
2625 ~(fwrt->trans->dbg.unsupported_region_msk);
2627 BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
2628 BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
2629 ARRAY_SIZE(fwrt->trans->dbg.active_regions));
2631 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
2638 reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
2645 reg = (void *)reg_data.reg_tlv->data;
2646 reg_type = reg->type;
2660 * DRAM_IMR can be collected only for FW/HW error timepoint
2661 * when fw is not alive. In addition, it must be collected
2668 imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
2697 enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
2698 u32 usec = le32_to_cpu(trig->ignore_consec);
2700 if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
2713 struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
2719 !le64_to_cpu(trigger->regions_mask))
2726 entry->size = sizeof(*hdr);
2734 hdr = (void *)entry->data;
2735 hdr->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER);
2736 hdr->file_len = cpu_to_le32(size + entry->size);
2738 list_add(&entry->list, list);
2740 return le32_to_cpu(hdr->file_len);
2749 fwrt->dump.lmac_err_id[0] = 0;
2750 if (fwrt->smem_cfg.num_lmacs > 1)
2751 fwrt->dump.lmac_err_id[1] = 0;
2752 fwrt->dump.umac_err_id = 0;
2762 u32 dump_mask = fwrt->fw->dbg.dump_mask;
2768 if (dump_data->monitor_only)
2771 fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask,
2772 fwrt->sanitize_ops,
2773 fwrt->sanitize_ctx);
2774 file_len = le32_to_cpu(dump_file->file_len);
2778 file_len += fw_error_dump.trans_ptr->len;
2779 dump_file->file_len = cpu_to_le32(file_len);
2791 fw_error_dump.trans_ptr->data,
2792 fw_error_dump.trans_ptr->len,
2794 dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
2805 list_entry(list->next, typeof(*entry), list);
2807 list_del(&entry->list);
2814 dump_data->trig = NULL;
2815 kfree(dump_data->fw_pkt);
2816 dump_data->fw_pkt = NULL;
2841 entry->data, entry->size, offs);
2842 offs += entry->size;
2844 dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
2865 if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
2875 if (fwrt->dump.active_wks == ~0UL)
2876 return -EBUSY;
2878 idx = ffz(fwrt->dump.active_wks);
2881 test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
2882 return -EBUSY;
2884 wk_data = &fwrt->dump.wks[idx];
2886 if (WARN_ON(wk_data->dump_data.desc))
2887 iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc);
2889 wk_data->dump_data.desc = desc;
2890 wk_data->dump_data.monitor_only = monitor_only;
2893 le32_to_cpu(desc->trig_desc.type));
2895 queue_delayed_work(system_unbound_wq, &wk_data->wk,
2905 if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
2906 return -EIO;
2908 if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
2911 return -EIO;
2924 return -ENOMEM;
2926 iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
2927 iwl_dump_error_desc->len = 0;
2937 iwl_trans_sync_nmi(fwrt->trans);
2953 u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
2955 if (!le16_to_cpu(trigger->occurrences))
2958 if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
2961 iwl_force_nmi(fwrt->trans);
2965 trigger->occurrences = cpu_to_le16(occurrences);
2966 monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
2969 delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
2974 return -ENOMEM;
2977 desc->len = len;
2978 desc->trig_desc.type = cpu_to_le32(trig);
2979 memcpy(desc->trig_desc.data, str, len);
2992 if (iwl_trans_dbg_ini_valid(fwrt->trans))
2998 buf[sizeof(buf) - 1] = '\0';
3005 if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
3006 buf[sizeof(buf) - 1] = '\0';
3011 ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
3027 if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
3029 return -EINVAL;
3031 /* EARLY START - firmware's configuration is hard coded */
3032 if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
3033 !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
3037 if (!fwrt->fw->dbg.conf_tlv[conf_id])
3038 return -EINVAL;
3040 if (fwrt->dump.conf != FW_DBG_INVALID)
3041 IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n",
3042 fwrt->dump.conf);
3044 /* Send all HCMDs for configuring the FW debug */
3045 ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
3046 for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
3049 .id = cmd->id,
3050 .len = { le16_to_cpu(cmd->len), },
3051 .data = { cmd->data, },
3054 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
3059 ptr += le16_to_cpu(cmd->len);
3062 fwrt->dump.conf = conf_id;
3079 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
3082 if (fw_has_capa(&fwrt->fw->ucode_capa,
3086 iwl_trans_send_cmd(fwrt->trans, &hcmd);
3097 &fwrt->dump.wks[wk_idx].dump_data;
3098 if (!test_bit(wk_idx, &fwrt->dump.active_wks))
3101 /* also checks 'desc' for pre-ini mode, since that shadows in union */
3102 if (!dump_data->trig) {
3107 if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
3108 IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
3112 /* there's no point in fw dump if the bus is dead */
3113 if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
3114 IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
3121 if (iwl_trans_dbg_ini_valid(fwrt->trans))
3122 iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
3124 iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
3129 if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
3130 u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
3131 u32 time_point = le32_to_cpu(dump_data->trig->time_point);
3139 if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
3140 iwl_force_nmi(fwrt->trans);
3143 if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
3146 iwl_fw_free_dump_desc(fwrt, dump_data->desc);
3147 dump_data->desc = NULL;
3150 clear_bit(wk_idx, &fwrt->dump.active_wks);
3157 struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
3158 enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
3165 return -EINVAL;
3168 delay = le32_to_cpu(trig->dump_delay);
3169 occur = le32_to_cpu(trig->occurrences);
3173 trig->occurrences = cpu_to_le32(--occur);
3179 if (fwrt->dump.active_wks == ~0UL)
3180 return -EBUSY;
3182 idx = ffz(fwrt->dump.active_wks);
3185 test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
3186 return -EBUSY;
3188 fwrt->dump.wks[idx].dump_data = *dump_data;
3201 &fwrt->dump.wks[idx].wk,
3212 container_of(wks, typeof(*fwrt), dump.wks[wks->idx]);
3217 if (fwrt->ops && fwrt->ops->dump_start)
3218 fwrt->ops->dump_start(fwrt->ops_ctx);
3220 iwl_fw_dbg_collect_sync(fwrt, wks->idx);
3222 if (fwrt->ops && fwrt->ops->dump_end)
3223 fwrt->ops->dump_end(fwrt->ops_ctx);
3228 const struct iwl_cfg *cfg = fwrt->trans->cfg;
3233 if (!fwrt->dump.d3_debug_data) {
3234 fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
3236 if (!fwrt->dump.d3_debug_data) {
3244 iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
3245 fwrt->dump.d3_debug_data,
3246 cfg->d3_debug_data_length);
3248 if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
3249 fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
3250 cfg->d3_debug_data_base_addr,
3251 fwrt->dump.d3_debug_data,
3252 cfg->d3_debug_data_length);
3260 iwl_dbg_tlv_del_timers(fwrt->trans);
3287 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
3293 params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE);
3294 params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL);
3309 return -EIO;
3311 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
3316 iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
3317 iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
3335 int cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
3342 * this is for finding the match between fw and kernel logs
3344 marker.timestamp = cpu_to_le64(fwrt->timestamp.seq++);
3351 return -EINVAL;
3357 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
3360 resp = (void *)hcmd.resp_pkt->data;
3361 IWL_DEBUG_INFO(fwrt, "FW GP2 time: %u\n",
3362 le32_to_cpu(resp->gp2));
3374 if (!iwl_trans_fw_running(fwrt->trans))
3377 if (fw_has_capa(&fwrt->fw->ucode_capa,
3381 ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop);
3383 iwl_fw_dbg_stop_recording(fwrt->trans, params);
3385 ret = iwl_fw_dbg_restart_recording(fwrt->trans, params);
3390 fwrt->trans->dbg.rec_on = false;
3409 u32 preset = u32_get_bits(fwrt->trans->dbg.domains_bitmap,
3413 if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
3416 if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1))
3419 iwl_trans_send_cmd(fwrt->trans, &hcmd);
3429 if (fw_has_api(&fwrt->fw->ucode_capa,
3434 iwl_trans_send_cmd(fwrt->trans, &hcmd);