1bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2018-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb */ 5bfcc09ddSBjoern A. Zeeb #include <linux/firmware.h> 6bfcc09ddSBjoern A. Zeeb #include "iwl-drv.h" 7bfcc09ddSBjoern A. Zeeb #include "iwl-trans.h" 8bfcc09ddSBjoern A. Zeeb #include "iwl-dbg-tlv.h" 9bfcc09ddSBjoern A. Zeeb #include "fw/dbg.h" 10bfcc09ddSBjoern A. Zeeb #include "fw/runtime.h" 11bfcc09ddSBjoern A. Zeeb 12bfcc09ddSBjoern A. Zeeb /** 13bfcc09ddSBjoern A. Zeeb * enum iwl_dbg_tlv_type - debug TLV types 14bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV 15bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV 16bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_HCMD: host command TLV 17bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_REGION: region TLV 18bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV 19bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV 20bfcc09ddSBjoern A. Zeeb * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs 21bfcc09ddSBjoern A. Zeeb */ 22bfcc09ddSBjoern A. Zeeb enum iwl_dbg_tlv_type { 23bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_DEBUG_INFO = 24bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE, 25bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_BUF_ALLOC, 26bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_HCMD, 27bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_REGION, 28bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_TRIGGER, 29bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_CONF_SET, 30bfcc09ddSBjoern A. Zeeb IWL_DBG_TLV_TYPE_NUM, 31bfcc09ddSBjoern A. Zeeb }; 32bfcc09ddSBjoern A. Zeeb 33bfcc09ddSBjoern A. Zeeb /** 34bfcc09ddSBjoern A. Zeeb * struct iwl_dbg_tlv_ver_data - debug TLV version struct 35bfcc09ddSBjoern A. Zeeb * @min_ver: min version supported 36bfcc09ddSBjoern A. Zeeb * @max_ver: max version supported 37bfcc09ddSBjoern A. Zeeb */ 38bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_ver_data { 39bfcc09ddSBjoern A. Zeeb int min_ver; 40bfcc09ddSBjoern A. Zeeb int max_ver; 41bfcc09ddSBjoern A. Zeeb }; 42bfcc09ddSBjoern A. Zeeb 43bfcc09ddSBjoern A. Zeeb /** 44bfcc09ddSBjoern A. Zeeb * struct iwl_dbg_tlv_timer_node - timer node struct 45bfcc09ddSBjoern A. Zeeb * @list: list of &struct iwl_dbg_tlv_timer_node 46bfcc09ddSBjoern A. Zeeb * @timer: timer 47bfcc09ddSBjoern A. Zeeb * @fwrt: &struct iwl_fw_runtime 48bfcc09ddSBjoern A. Zeeb * @tlv: TLV attach to the timer node 49bfcc09ddSBjoern A. Zeeb */ 50bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_timer_node { 51bfcc09ddSBjoern A. Zeeb struct list_head list; 52bfcc09ddSBjoern A. Zeeb struct timer_list timer; 53bfcc09ddSBjoern A. Zeeb struct iwl_fw_runtime *fwrt; 54bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv *tlv; 55bfcc09ddSBjoern A. Zeeb }; 56bfcc09ddSBjoern A. Zeeb 57bfcc09ddSBjoern A. Zeeb static const struct iwl_dbg_tlv_ver_data 58bfcc09ddSBjoern A. Zeeb dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { 59bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,}, 60bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,}, 61bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, 62d9836fb4SBjoern A. Zeeb [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,}, 63bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, 64bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,}, 65bfcc09ddSBjoern A. Zeeb }; 66bfcc09ddSBjoern A. Zeeb 67*a4128aadSBjoern A. Zeeb /* add a new TLV node, returning it so it can be modified */ 68*a4128aadSBjoern A. Zeeb static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, 69bfcc09ddSBjoern A. Zeeb struct list_head *list) 70bfcc09ddSBjoern A. Zeeb { 71bfcc09ddSBjoern A. Zeeb u32 len = le32_to_cpu(tlv->length); 72bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node; 73bfcc09ddSBjoern A. Zeeb 74*a4128aadSBjoern A. Zeeb node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL); 75bfcc09ddSBjoern A. Zeeb if (!node) 76*a4128aadSBjoern A. Zeeb return NULL; 77bfcc09ddSBjoern A. Zeeb 78d9836fb4SBjoern A. Zeeb memcpy(&node->tlv, tlv, sizeof(node->tlv)); 79d9836fb4SBjoern A. Zeeb memcpy(node->tlv.data, tlv->data, len); 80bfcc09ddSBjoern A. Zeeb list_add_tail(&node->list, list); 81bfcc09ddSBjoern A. Zeeb 82*a4128aadSBjoern A. Zeeb return &node->tlv; 83bfcc09ddSBjoern A. Zeeb } 84bfcc09ddSBjoern A. Zeeb 85bfcc09ddSBjoern A. Zeeb static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv) 86bfcc09ddSBjoern A. Zeeb { 87bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; 88bfcc09ddSBjoern A. Zeeb u32 type = le32_to_cpu(tlv->type); 89bfcc09ddSBjoern A. Zeeb u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; 90bfcc09ddSBjoern A. Zeeb u32 ver = le32_to_cpu(hdr->version); 91bfcc09ddSBjoern A. Zeeb 92bfcc09ddSBjoern A. Zeeb if (ver < dbg_ver_table[tlv_idx].min_ver || 93bfcc09ddSBjoern A. Zeeb ver > dbg_ver_table[tlv_idx].max_ver) 94bfcc09ddSBjoern A. Zeeb return false; 95bfcc09ddSBjoern A. Zeeb 96bfcc09ddSBjoern A. Zeeb return true; 97bfcc09ddSBjoern A. Zeeb } 98bfcc09ddSBjoern A. Zeeb 99bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans, 100bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) 101bfcc09ddSBjoern A. Zeeb { 102bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data; 103bfcc09ddSBjoern A. Zeeb 104bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(tlv->length) != sizeof(*debug_info)) 105bfcc09ddSBjoern A. Zeeb return -EINVAL; 106bfcc09ddSBjoern A. Zeeb 107*a4128aadSBjoern A. Zeeb /* we use this as a string, ensure input was NUL terminated */ 108*a4128aadSBjoern A. Zeeb if (strnlen(debug_info->debug_cfg_name, 109*a4128aadSBjoern A. Zeeb sizeof(debug_info->debug_cfg_name)) == 110*a4128aadSBjoern A. Zeeb sizeof(debug_info->debug_cfg_name)) 111*a4128aadSBjoern A. Zeeb return -EINVAL; 112*a4128aadSBjoern A. Zeeb 113bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n", 114bfcc09ddSBjoern A. Zeeb debug_info->debug_cfg_name); 115bfcc09ddSBjoern A. Zeeb 116*a4128aadSBjoern A. Zeeb if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list)) 117*a4128aadSBjoern A. Zeeb return -ENOMEM; 118*a4128aadSBjoern A. Zeeb return 0; 119bfcc09ddSBjoern A. Zeeb } 120bfcc09ddSBjoern A. Zeeb 121bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, 122bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) 123bfcc09ddSBjoern A. Zeeb { 124bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data; 125bfcc09ddSBjoern A. Zeeb u32 buf_location; 126bfcc09ddSBjoern A. Zeeb u32 alloc_id; 127bfcc09ddSBjoern A. Zeeb 128bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(tlv->length) != sizeof(*alloc)) 129bfcc09ddSBjoern A. Zeeb return -EINVAL; 130bfcc09ddSBjoern A. Zeeb 131bfcc09ddSBjoern A. Zeeb buf_location = le32_to_cpu(alloc->buf_location); 132bfcc09ddSBjoern A. Zeeb alloc_id = le32_to_cpu(alloc->alloc_id); 133bfcc09ddSBjoern A. Zeeb 134bfcc09ddSBjoern A. Zeeb if (buf_location == IWL_FW_INI_LOCATION_INVALID || 135bfcc09ddSBjoern A. Zeeb buf_location >= IWL_FW_INI_LOCATION_NUM) 136bfcc09ddSBjoern A. Zeeb goto err; 137bfcc09ddSBjoern A. Zeeb 138bfcc09ddSBjoern A. Zeeb if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID || 139bfcc09ddSBjoern A. Zeeb alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 140bfcc09ddSBjoern A. Zeeb goto err; 141bfcc09ddSBjoern A. Zeeb 142bfcc09ddSBjoern A. Zeeb if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH && 143bfcc09ddSBjoern A. Zeeb alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) 144bfcc09ddSBjoern A. Zeeb goto err; 145bfcc09ddSBjoern A. Zeeb 146bfcc09ddSBjoern A. Zeeb if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH && 147bfcc09ddSBjoern A. Zeeb alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) 148bfcc09ddSBjoern A. Zeeb goto err; 149bfcc09ddSBjoern A. Zeeb 1509af1bba4SBjoern A. Zeeb if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH && 1519af1bba4SBjoern A. Zeeb alloc->req_size == 0) { 1529af1bba4SBjoern A. Zeeb IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n"); 1539af1bba4SBjoern A. Zeeb return -EINVAL; 1549af1bba4SBjoern A. Zeeb } 1559af1bba4SBjoern A. Zeeb 156bfcc09ddSBjoern A. Zeeb trans->dbg.fw_mon_cfg[alloc_id] = *alloc; 157bfcc09ddSBjoern A. Zeeb 158bfcc09ddSBjoern A. Zeeb return 0; 159bfcc09ddSBjoern A. Zeeb err: 160bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, 161bfcc09ddSBjoern A. Zeeb "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n", 162bfcc09ddSBjoern A. Zeeb alloc_id, buf_location); 163bfcc09ddSBjoern A. Zeeb return -EINVAL; 164bfcc09ddSBjoern A. Zeeb } 165bfcc09ddSBjoern A. Zeeb 166bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans, 167bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) 168bfcc09ddSBjoern A. Zeeb { 169bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data; 170bfcc09ddSBjoern A. Zeeb u32 tp = le32_to_cpu(hcmd->time_point); 171bfcc09ddSBjoern A. Zeeb 172bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(tlv->length) <= sizeof(*hcmd)) 173bfcc09ddSBjoern A. Zeeb return -EINVAL; 174bfcc09ddSBjoern A. Zeeb 175bfcc09ddSBjoern A. Zeeb /* Host commands can not be sent in early time point since the FW 176bfcc09ddSBjoern A. Zeeb * is not ready 177bfcc09ddSBjoern A. Zeeb */ 178bfcc09ddSBjoern A. Zeeb if (tp == IWL_FW_INI_TIME_POINT_INVALID || 179bfcc09ddSBjoern A. Zeeb tp >= IWL_FW_INI_TIME_POINT_NUM || 180bfcc09ddSBjoern A. Zeeb tp == IWL_FW_INI_TIME_POINT_EARLY) { 181bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, 182bfcc09ddSBjoern A. Zeeb "WRT: Invalid time point %u for host command TLV\n", 183bfcc09ddSBjoern A. Zeeb tp); 184bfcc09ddSBjoern A. Zeeb return -EINVAL; 185bfcc09ddSBjoern A. Zeeb } 186bfcc09ddSBjoern A. Zeeb 187*a4128aadSBjoern A. Zeeb if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list)) 188*a4128aadSBjoern A. Zeeb return -ENOMEM; 189*a4128aadSBjoern A. Zeeb return 0; 190bfcc09ddSBjoern A. Zeeb } 191bfcc09ddSBjoern A. Zeeb 192bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, 193bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) 194bfcc09ddSBjoern A. Zeeb { 195bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data; 196bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv **active_reg; 197bfcc09ddSBjoern A. Zeeb u32 id = le32_to_cpu(reg->id); 198d9836fb4SBjoern A. Zeeb u8 type = reg->type; 199bfcc09ddSBjoern A. Zeeb u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length); 200bfcc09ddSBjoern A. Zeeb 201bfcc09ddSBjoern A. Zeeb /* 202d9836fb4SBjoern A. Zeeb * The higher part of the ID from version 2 is debug policy. 203d9836fb4SBjoern A. Zeeb * The id will be only lsb 16 bits, so mask it out. 204bfcc09ddSBjoern A. Zeeb */ 205d9836fb4SBjoern A. Zeeb if (le32_to_cpu(reg->hdr.version) >= 2) 206d9836fb4SBjoern A. Zeeb id &= IWL_FW_INI_REGION_ID_MASK; 207bfcc09ddSBjoern A. Zeeb 208bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(tlv->length) < sizeof(*reg)) 209bfcc09ddSBjoern A. Zeeb return -EINVAL; 210bfcc09ddSBjoern A. Zeeb 211bfcc09ddSBjoern A. Zeeb /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */ 212bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n", 213bfcc09ddSBjoern A. Zeeb IWL_FW_INI_MAX_NAME, reg->name); 214bfcc09ddSBjoern A. Zeeb 215bfcc09ddSBjoern A. Zeeb if (id >= IWL_FW_INI_MAX_REGION_ID) { 216bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, "WRT: Invalid region id %u\n", id); 217bfcc09ddSBjoern A. Zeeb return -EINVAL; 218bfcc09ddSBjoern A. Zeeb } 219bfcc09ddSBjoern A. Zeeb 220bfcc09ddSBjoern A. Zeeb if (type <= IWL_FW_INI_REGION_INVALID || 221bfcc09ddSBjoern A. Zeeb type >= IWL_FW_INI_REGION_NUM) { 222bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, "WRT: Invalid region type %u\n", type); 223bfcc09ddSBjoern A. Zeeb return -EINVAL; 224bfcc09ddSBjoern A. Zeeb } 225bfcc09ddSBjoern A. Zeeb 226d9836fb4SBjoern A. Zeeb if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) { 227d9836fb4SBjoern A. Zeeb trans->dbg.imr_data.sram_addr = 228d9836fb4SBjoern A. Zeeb le32_to_cpu(reg->internal_buffer.base_addr); 229d9836fb4SBjoern A. Zeeb trans->dbg.imr_data.sram_size = 230d9836fb4SBjoern A. Zeeb le32_to_cpu(reg->internal_buffer.size); 231d9836fb4SBjoern A. Zeeb } 232d9836fb4SBjoern A. Zeeb 233d9836fb4SBjoern A. Zeeb 234bfcc09ddSBjoern A. Zeeb active_reg = &trans->dbg.active_regions[id]; 235bfcc09ddSBjoern A. Zeeb if (*active_reg) { 236bfcc09ddSBjoern A. Zeeb IWL_WARN(trans, "WRT: Overriding region id %u\n", id); 237bfcc09ddSBjoern A. Zeeb 238bfcc09ddSBjoern A. Zeeb kfree(*active_reg); 239bfcc09ddSBjoern A. Zeeb } 240bfcc09ddSBjoern A. Zeeb 241bfcc09ddSBjoern A. Zeeb *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL); 242bfcc09ddSBjoern A. Zeeb if (!*active_reg) 243bfcc09ddSBjoern A. Zeeb return -ENOMEM; 244bfcc09ddSBjoern A. Zeeb 245bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type); 246bfcc09ddSBjoern A. Zeeb 247bfcc09ddSBjoern A. Zeeb return 0; 248bfcc09ddSBjoern A. Zeeb } 249bfcc09ddSBjoern A. Zeeb 250bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, 251bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) 252bfcc09ddSBjoern A. Zeeb { 253bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data; 254bfcc09ddSBjoern A. Zeeb u32 tp = le32_to_cpu(trig->time_point); 255d9836fb4SBjoern A. Zeeb u32 rf = le32_to_cpu(trig->reset_fw); 256*a4128aadSBjoern A. Zeeb struct iwl_ucode_tlv *new_tlv; 257bfcc09ddSBjoern A. Zeeb 258bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(tlv->length) < sizeof(*trig)) 259bfcc09ddSBjoern A. Zeeb return -EINVAL; 260bfcc09ddSBjoern A. Zeeb 261bfcc09ddSBjoern A. Zeeb if (tp <= IWL_FW_INI_TIME_POINT_INVALID || 262bfcc09ddSBjoern A. Zeeb tp >= IWL_FW_INI_TIME_POINT_NUM) { 263bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, 264bfcc09ddSBjoern A. Zeeb "WRT: Invalid time point %u for trigger TLV\n", 265bfcc09ddSBjoern A. Zeeb tp); 266bfcc09ddSBjoern A. Zeeb return -EINVAL; 267bfcc09ddSBjoern A. Zeeb } 268bfcc09ddSBjoern A. Zeeb 269d9836fb4SBjoern A. Zeeb IWL_DEBUG_FW(trans, 270d9836fb4SBjoern A. Zeeb "WRT: time point %u for trigger TLV with reset_fw %u\n", 271d9836fb4SBjoern A. Zeeb tp, rf); 272d9836fb4SBjoern A. Zeeb trans->dbg.last_tp_resetfw = 0xFF; 273*a4128aadSBjoern A. Zeeb 274*a4128aadSBjoern A. Zeeb new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); 275*a4128aadSBjoern A. Zeeb if (!new_tlv) 276bfcc09ddSBjoern A. Zeeb return -ENOMEM; 277*a4128aadSBjoern A. Zeeb 278*a4128aadSBjoern A. Zeeb if (!le32_to_cpu(trig->occurrences)) { 279*a4128aadSBjoern A. Zeeb struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data; 280*a4128aadSBjoern A. Zeeb 281*a4128aadSBjoern A. Zeeb new_trig->occurrences = cpu_to_le32(-1); 282bfcc09ddSBjoern A. Zeeb } 283bfcc09ddSBjoern A. Zeeb 284*a4128aadSBjoern A. Zeeb return 0; 285bfcc09ddSBjoern A. Zeeb } 286bfcc09ddSBjoern A. Zeeb 287bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_config_set(struct iwl_trans *trans, 288bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) 289bfcc09ddSBjoern A. Zeeb { 290bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data; 291bfcc09ddSBjoern A. Zeeb u32 tp = le32_to_cpu(conf_set->time_point); 292bfcc09ddSBjoern A. Zeeb u32 type = le32_to_cpu(conf_set->set_type); 293bfcc09ddSBjoern A. Zeeb 294bfcc09ddSBjoern A. Zeeb if (tp <= IWL_FW_INI_TIME_POINT_INVALID || 295bfcc09ddSBjoern A. Zeeb tp >= IWL_FW_INI_TIME_POINT_NUM) { 296bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 297bfcc09ddSBjoern A. Zeeb "WRT: Invalid time point %u for config set TLV\n", tp); 298bfcc09ddSBjoern A. Zeeb return -EINVAL; 299bfcc09ddSBjoern A. Zeeb } 300bfcc09ddSBjoern A. Zeeb 301bfcc09ddSBjoern A. Zeeb if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID || 302bfcc09ddSBjoern A. Zeeb type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) { 303bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 304bfcc09ddSBjoern A. Zeeb "WRT: Invalid config set type %u for config set TLV\n", type); 305bfcc09ddSBjoern A. Zeeb return -EINVAL; 306bfcc09ddSBjoern A. Zeeb } 307bfcc09ddSBjoern A. Zeeb 308*a4128aadSBjoern A. Zeeb if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list)) 309*a4128aadSBjoern A. Zeeb return -ENOMEM; 310*a4128aadSBjoern A. Zeeb return 0; 311bfcc09ddSBjoern A. Zeeb } 312bfcc09ddSBjoern A. Zeeb 313bfcc09ddSBjoern A. Zeeb static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, 314bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv) = { 315bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info, 316bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc, 317bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd, 318bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region, 319bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger, 320bfcc09ddSBjoern A. Zeeb [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set, 321bfcc09ddSBjoern A. Zeeb }; 322bfcc09ddSBjoern A. Zeeb 323bfcc09ddSBjoern A. Zeeb void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, 324bfcc09ddSBjoern A. Zeeb bool ext) 325bfcc09ddSBjoern A. Zeeb { 326bfcc09ddSBjoern A. Zeeb enum iwl_ini_cfg_state *cfg_state = ext ? 327bfcc09ddSBjoern A. Zeeb &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; 328d9836fb4SBjoern A. Zeeb const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; 329d9836fb4SBjoern A. Zeeb u32 type; 330d9836fb4SBjoern A. Zeeb u32 tlv_idx; 331d9836fb4SBjoern A. Zeeb u32 domain; 332bfcc09ddSBjoern A. Zeeb int ret; 333bfcc09ddSBjoern A. Zeeb 334d9836fb4SBjoern A. Zeeb if (le32_to_cpu(tlv->length) < sizeof(*hdr)) 335d9836fb4SBjoern A. Zeeb return; 336d9836fb4SBjoern A. Zeeb 337d9836fb4SBjoern A. Zeeb type = le32_to_cpu(tlv->type); 338d9836fb4SBjoern A. Zeeb tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; 339d9836fb4SBjoern A. Zeeb domain = le32_to_cpu(hdr->domain); 340d9836fb4SBjoern A. Zeeb 341bfcc09ddSBjoern A. Zeeb if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON && 342bfcc09ddSBjoern A. Zeeb !(domain & trans->dbg.domains_bitmap)) { 343bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 344bfcc09ddSBjoern A. Zeeb "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n", 345bfcc09ddSBjoern A. Zeeb domain, trans->dbg.domains_bitmap); 346bfcc09ddSBjoern A. Zeeb return; 347bfcc09ddSBjoern A. Zeeb } 348bfcc09ddSBjoern A. Zeeb 349bfcc09ddSBjoern A. Zeeb if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) { 350bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type); 351bfcc09ddSBjoern A. Zeeb goto out_err; 352bfcc09ddSBjoern A. Zeeb } 353bfcc09ddSBjoern A. Zeeb 354bfcc09ddSBjoern A. Zeeb if (!iwl_dbg_tlv_ver_support(tlv)) { 355bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type, 356bfcc09ddSBjoern A. Zeeb le32_to_cpu(hdr->version)); 357bfcc09ddSBjoern A. Zeeb goto out_err; 358bfcc09ddSBjoern A. Zeeb } 359bfcc09ddSBjoern A. Zeeb 360bfcc09ddSBjoern A. Zeeb ret = dbg_tlv_alloc[tlv_idx](trans, tlv); 361bfcc09ddSBjoern A. Zeeb if (ret) { 3629af1bba4SBjoern A. Zeeb IWL_WARN(trans, 363bfcc09ddSBjoern A. Zeeb "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n", 364bfcc09ddSBjoern A. Zeeb type, ret, ext); 365bfcc09ddSBjoern A. Zeeb goto out_err; 366bfcc09ddSBjoern A. Zeeb } 367bfcc09ddSBjoern A. Zeeb 368bfcc09ddSBjoern A. Zeeb if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED) 369bfcc09ddSBjoern A. Zeeb *cfg_state = IWL_INI_CFG_STATE_LOADED; 370bfcc09ddSBjoern A. Zeeb 371bfcc09ddSBjoern A. Zeeb return; 372bfcc09ddSBjoern A. Zeeb 373bfcc09ddSBjoern A. Zeeb out_err: 374bfcc09ddSBjoern A. Zeeb *cfg_state = IWL_INI_CFG_STATE_CORRUPTED; 375bfcc09ddSBjoern A. Zeeb } 376bfcc09ddSBjoern A. Zeeb 377bfcc09ddSBjoern A. Zeeb void iwl_dbg_tlv_del_timers(struct iwl_trans *trans) 378bfcc09ddSBjoern A. Zeeb { 379bfcc09ddSBjoern A. Zeeb struct list_head *timer_list = &trans->dbg.periodic_trig_list; 380bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_timer_node *node, *tmp; 381bfcc09ddSBjoern A. Zeeb 382bfcc09ddSBjoern A. Zeeb list_for_each_entry_safe(node, tmp, timer_list, list) { 3839af1bba4SBjoern A. Zeeb timer_shutdown_sync(&node->timer); 384bfcc09ddSBjoern A. Zeeb list_del(&node->list); 385bfcc09ddSBjoern A. Zeeb kfree(node); 386bfcc09ddSBjoern A. Zeeb } 387bfcc09ddSBjoern A. Zeeb } 388bfcc09ddSBjoern A. Zeeb IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers); 389bfcc09ddSBjoern A. Zeeb 390bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans, 391bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_allocation_id alloc_id) 392bfcc09ddSBjoern A. Zeeb { 393bfcc09ddSBjoern A. Zeeb struct iwl_fw_mon *fw_mon; 394bfcc09ddSBjoern A. Zeeb int i; 395bfcc09ddSBjoern A. Zeeb 396bfcc09ddSBjoern A. Zeeb if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID || 397bfcc09ddSBjoern A. Zeeb alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 398bfcc09ddSBjoern A. Zeeb return; 399bfcc09ddSBjoern A. Zeeb 400bfcc09ddSBjoern A. Zeeb fw_mon = &trans->dbg.fw_mon_ini[alloc_id]; 401bfcc09ddSBjoern A. Zeeb 402bfcc09ddSBjoern A. Zeeb for (i = 0; i < fw_mon->num_frags; i++) { 403bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *frag = &fw_mon->frags[i]; 404bfcc09ddSBjoern A. Zeeb 405bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, frag->size, frag->block, 406bfcc09ddSBjoern A. Zeeb frag->physical); 407bfcc09ddSBjoern A. Zeeb 408bfcc09ddSBjoern A. Zeeb frag->physical = 0; 409bfcc09ddSBjoern A. Zeeb frag->block = NULL; 410bfcc09ddSBjoern A. Zeeb frag->size = 0; 411bfcc09ddSBjoern A. Zeeb } 412bfcc09ddSBjoern A. Zeeb 413bfcc09ddSBjoern A. Zeeb kfree(fw_mon->frags); 414bfcc09ddSBjoern A. Zeeb fw_mon->frags = NULL; 415bfcc09ddSBjoern A. Zeeb fw_mon->num_frags = 0; 416bfcc09ddSBjoern A. Zeeb } 417bfcc09ddSBjoern A. Zeeb 418bfcc09ddSBjoern A. Zeeb void iwl_dbg_tlv_free(struct iwl_trans *trans) 419bfcc09ddSBjoern A. Zeeb { 420bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp; 421bfcc09ddSBjoern A. Zeeb int i; 422bfcc09ddSBjoern A. Zeeb 423bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_del_timers(trans); 424bfcc09ddSBjoern A. Zeeb 425bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { 426bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv **active_reg = 427bfcc09ddSBjoern A. Zeeb &trans->dbg.active_regions[i]; 428bfcc09ddSBjoern A. Zeeb 429bfcc09ddSBjoern A. Zeeb kfree(*active_reg); 430bfcc09ddSBjoern A. Zeeb *active_reg = NULL; 431bfcc09ddSBjoern A. Zeeb } 432bfcc09ddSBjoern A. Zeeb 433bfcc09ddSBjoern A. Zeeb list_for_each_entry_safe(tlv_node, tlv_node_tmp, 434bfcc09ddSBjoern A. Zeeb &trans->dbg.debug_info_tlv_list, list) { 435bfcc09ddSBjoern A. Zeeb list_del(&tlv_node->list); 436bfcc09ddSBjoern A. Zeeb kfree(tlv_node); 437bfcc09ddSBjoern A. Zeeb } 438bfcc09ddSBjoern A. Zeeb 439bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) { 440bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_time_point_data *tp = 441bfcc09ddSBjoern A. Zeeb &trans->dbg.time_point[i]; 442bfcc09ddSBjoern A. Zeeb 443bfcc09ddSBjoern A. Zeeb list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list, 444bfcc09ddSBjoern A. Zeeb list) { 445bfcc09ddSBjoern A. Zeeb list_del(&tlv_node->list); 446bfcc09ddSBjoern A. Zeeb kfree(tlv_node); 447bfcc09ddSBjoern A. Zeeb } 448bfcc09ddSBjoern A. Zeeb 449bfcc09ddSBjoern A. Zeeb list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list, 450bfcc09ddSBjoern A. Zeeb list) { 451bfcc09ddSBjoern A. Zeeb list_del(&tlv_node->list); 452bfcc09ddSBjoern A. Zeeb kfree(tlv_node); 453bfcc09ddSBjoern A. Zeeb } 454bfcc09ddSBjoern A. Zeeb 455bfcc09ddSBjoern A. Zeeb list_for_each_entry_safe(tlv_node, tlv_node_tmp, 456bfcc09ddSBjoern A. Zeeb &tp->active_trig_list, list) { 457bfcc09ddSBjoern A. Zeeb list_del(&tlv_node->list); 458bfcc09ddSBjoern A. Zeeb kfree(tlv_node); 459bfcc09ddSBjoern A. Zeeb } 460bfcc09ddSBjoern A. Zeeb 461bfcc09ddSBjoern A. Zeeb list_for_each_entry_safe(tlv_node, tlv_node_tmp, 462bfcc09ddSBjoern A. Zeeb &tp->config_list, list) { 463bfcc09ddSBjoern A. Zeeb list_del(&tlv_node->list); 464bfcc09ddSBjoern A. Zeeb kfree(tlv_node); 465bfcc09ddSBjoern A. Zeeb } 466bfcc09ddSBjoern A. Zeeb 467bfcc09ddSBjoern A. Zeeb } 468bfcc09ddSBjoern A. Zeeb 469bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++) 470bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_fragments_free(trans, i); 471bfcc09ddSBjoern A. Zeeb } 472bfcc09ddSBjoern A. Zeeb 473bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, 474bfcc09ddSBjoern A. Zeeb size_t len) 475bfcc09ddSBjoern A. Zeeb { 476bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *tlv; 477bfcc09ddSBjoern A. Zeeb u32 tlv_len; 478bfcc09ddSBjoern A. Zeeb 479bfcc09ddSBjoern A. Zeeb while (len >= sizeof(*tlv)) { 480bfcc09ddSBjoern A. Zeeb len -= sizeof(*tlv); 481bfcc09ddSBjoern A. Zeeb tlv = (const void *)data; 482bfcc09ddSBjoern A. Zeeb 483bfcc09ddSBjoern A. Zeeb tlv_len = le32_to_cpu(tlv->length); 484bfcc09ddSBjoern A. Zeeb 485bfcc09ddSBjoern A. Zeeb if (len < tlv_len) { 486bfcc09ddSBjoern A. Zeeb IWL_ERR(trans, "invalid TLV len: %zd/%u\n", 487bfcc09ddSBjoern A. Zeeb len, tlv_len); 488bfcc09ddSBjoern A. Zeeb return -EINVAL; 489bfcc09ddSBjoern A. Zeeb } 490bfcc09ddSBjoern A. Zeeb len -= ALIGN(tlv_len, 4); 491bfcc09ddSBjoern A. Zeeb data += sizeof(*tlv) + ALIGN(tlv_len, 4); 492bfcc09ddSBjoern A. Zeeb 493bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_alloc(trans, tlv, true); 494bfcc09ddSBjoern A. Zeeb } 495bfcc09ddSBjoern A. Zeeb 496bfcc09ddSBjoern A. Zeeb return 0; 497bfcc09ddSBjoern A. Zeeb } 498bfcc09ddSBjoern A. Zeeb 499bfcc09ddSBjoern A. Zeeb void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) 500bfcc09ddSBjoern A. Zeeb { 501bfcc09ddSBjoern A. Zeeb const struct firmware *fw; 502bfcc09ddSBjoern A. Zeeb const char *yoyo_bin = "iwl-debug-yoyo.bin"; 503bfcc09ddSBjoern A. Zeeb int res; 504bfcc09ddSBjoern A. Zeeb 505bfcc09ddSBjoern A. Zeeb if (!iwlwifi_mod_params.enable_ini || 506d9836fb4SBjoern A. Zeeb trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) 507bfcc09ddSBjoern A. Zeeb return; 508bfcc09ddSBjoern A. Zeeb 509bfcc09ddSBjoern A. Zeeb res = firmware_request_nowarn(&fw, yoyo_bin, dev); 510bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin); 511bfcc09ddSBjoern A. Zeeb 512bfcc09ddSBjoern A. Zeeb if (res) 513bfcc09ddSBjoern A. Zeeb return; 514bfcc09ddSBjoern A. Zeeb 515*a4128aadSBjoern A. Zeeb trans->dbg.yoyo_bin_loaded = true; 516*a4128aadSBjoern A. Zeeb 517bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); 518bfcc09ddSBjoern A. Zeeb 519bfcc09ddSBjoern A. Zeeb release_firmware(fw); 520bfcc09ddSBjoern A. Zeeb } 521bfcc09ddSBjoern A. Zeeb 522bfcc09ddSBjoern A. Zeeb void iwl_dbg_tlv_init(struct iwl_trans *trans) 523bfcc09ddSBjoern A. Zeeb { 524bfcc09ddSBjoern A. Zeeb int i; 525bfcc09ddSBjoern A. Zeeb 526bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list); 527bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&trans->dbg.periodic_trig_list); 528bfcc09ddSBjoern A. Zeeb 529bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) { 530bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_time_point_data *tp = 531bfcc09ddSBjoern A. Zeeb &trans->dbg.time_point[i]; 532bfcc09ddSBjoern A. Zeeb 533bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&tp->trig_list); 534bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&tp->hcmd_list); 535bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&tp->active_trig_list); 536bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&tp->config_list); 537bfcc09ddSBjoern A. Zeeb } 538bfcc09ddSBjoern A. Zeeb } 539bfcc09ddSBjoern A. Zeeb 540bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt, 541bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *frag, u32 pages) 542bfcc09ddSBjoern A. Zeeb { 543bfcc09ddSBjoern A. Zeeb void *block = NULL; 544bfcc09ddSBjoern A. Zeeb dma_addr_t physical; 545bfcc09ddSBjoern A. Zeeb 546bfcc09ddSBjoern A. Zeeb if (!frag || frag->size || !pages) 547bfcc09ddSBjoern A. Zeeb return -EIO; 548bfcc09ddSBjoern A. Zeeb 549bfcc09ddSBjoern A. Zeeb /* 550bfcc09ddSBjoern A. Zeeb * We try to allocate as many pages as we can, starting with 551bfcc09ddSBjoern A. Zeeb * the requested amount and going down until we can allocate 552bfcc09ddSBjoern A. Zeeb * something. Because of DIV_ROUND_UP(), pages will never go 553bfcc09ddSBjoern A. Zeeb * down to 0 and stop the loop, so stop when pages reaches 1, 554bfcc09ddSBjoern A. Zeeb * which is too small anyway. 555bfcc09ddSBjoern A. Zeeb */ 556bfcc09ddSBjoern A. Zeeb while (pages > 1) { 557bfcc09ddSBjoern A. Zeeb block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE, 558bfcc09ddSBjoern A. Zeeb &physical, 559bfcc09ddSBjoern A. Zeeb GFP_KERNEL | __GFP_NOWARN); 560bfcc09ddSBjoern A. Zeeb if (block) 561bfcc09ddSBjoern A. Zeeb break; 562bfcc09ddSBjoern A. Zeeb 563bfcc09ddSBjoern A. Zeeb IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n", 564bfcc09ddSBjoern A. Zeeb pages * PAGE_SIZE); 565bfcc09ddSBjoern A. Zeeb 566bfcc09ddSBjoern A. Zeeb pages = DIV_ROUND_UP(pages, 2); 567bfcc09ddSBjoern A. Zeeb } 568bfcc09ddSBjoern A. Zeeb 569bfcc09ddSBjoern A. Zeeb if (!block) 570bfcc09ddSBjoern A. Zeeb return -ENOMEM; 571bfcc09ddSBjoern A. Zeeb 572bfcc09ddSBjoern A. Zeeb frag->physical = physical; 573bfcc09ddSBjoern A. Zeeb frag->block = block; 574bfcc09ddSBjoern A. Zeeb frag->size = pages * PAGE_SIZE; 575bfcc09ddSBjoern A. Zeeb 576bfcc09ddSBjoern A. Zeeb return pages; 577bfcc09ddSBjoern A. Zeeb } 578bfcc09ddSBjoern A. Zeeb 579bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, 580bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_allocation_id alloc_id) 581bfcc09ddSBjoern A. Zeeb { 582bfcc09ddSBjoern A. Zeeb struct iwl_fw_mon *fw_mon; 583bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; 584bfcc09ddSBjoern A. Zeeb u32 num_frags, remain_pages, frag_pages; 585bfcc09ddSBjoern A. Zeeb int i; 586bfcc09ddSBjoern A. Zeeb 587bfcc09ddSBjoern A. Zeeb if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID || 588bfcc09ddSBjoern A. Zeeb alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 589bfcc09ddSBjoern A. Zeeb return -EIO; 590bfcc09ddSBjoern A. Zeeb 591bfcc09ddSBjoern A. Zeeb fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id]; 592bfcc09ddSBjoern A. Zeeb fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; 593bfcc09ddSBjoern A. Zeeb 5949af1bba4SBjoern A. Zeeb if (fw_mon->num_frags) { 5959af1bba4SBjoern A. Zeeb for (i = 0; i < fw_mon->num_frags; i++) 5969af1bba4SBjoern A. Zeeb memset(fw_mon->frags[i].block, 0, 5979af1bba4SBjoern A. Zeeb fw_mon->frags[i].size); 5989af1bba4SBjoern A. Zeeb return 0; 5999af1bba4SBjoern A. Zeeb } 6009af1bba4SBjoern A. Zeeb 6019af1bba4SBjoern A. Zeeb if (fw_mon_cfg->buf_location != 602bfcc09ddSBjoern A. Zeeb cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH)) 603bfcc09ddSBjoern A. Zeeb return 0; 604bfcc09ddSBjoern A. Zeeb 605bfcc09ddSBjoern A. Zeeb num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num); 606d9836fb4SBjoern A. Zeeb if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { 607bfcc09ddSBjoern A. Zeeb if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) 608bfcc09ddSBjoern A. Zeeb return -EIO; 609bfcc09ddSBjoern A. Zeeb num_frags = 1; 6109af1bba4SBjoern A. Zeeb } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && 6119af1bba4SBjoern A. Zeeb alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) { 6129af1bba4SBjoern A. Zeeb return -EIO; 613bfcc09ddSBjoern A. Zeeb } 614bfcc09ddSBjoern A. Zeeb 615bfcc09ddSBjoern A. Zeeb remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size), 616bfcc09ddSBjoern A. Zeeb PAGE_SIZE); 617bfcc09ddSBjoern A. Zeeb num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS); 618bfcc09ddSBjoern A. Zeeb num_frags = min_t(u32, num_frags, remain_pages); 619bfcc09ddSBjoern A. Zeeb frag_pages = DIV_ROUND_UP(remain_pages, num_frags); 620bfcc09ddSBjoern A. Zeeb 621bfcc09ddSBjoern A. Zeeb fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL); 622bfcc09ddSBjoern A. Zeeb if (!fw_mon->frags) 623bfcc09ddSBjoern A. Zeeb return -ENOMEM; 624bfcc09ddSBjoern A. Zeeb 625bfcc09ddSBjoern A. Zeeb for (i = 0; i < num_frags; i++) { 626bfcc09ddSBjoern A. Zeeb int pages = min_t(u32, frag_pages, remain_pages); 627bfcc09ddSBjoern A. Zeeb 628bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 629bfcc09ddSBjoern A. Zeeb "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n", 630bfcc09ddSBjoern A. Zeeb alloc_id, i, pages * PAGE_SIZE); 631bfcc09ddSBjoern A. Zeeb 632bfcc09ddSBjoern A. Zeeb pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i], 633bfcc09ddSBjoern A. Zeeb pages); 634bfcc09ddSBjoern A. Zeeb if (pages < 0) { 635bfcc09ddSBjoern A. Zeeb u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) - 636bfcc09ddSBjoern A. Zeeb (remain_pages * PAGE_SIZE); 637bfcc09ddSBjoern A. Zeeb 638bfcc09ddSBjoern A. Zeeb if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) { 639bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_fragments_free(fwrt->trans, 640bfcc09ddSBjoern A. Zeeb alloc_id); 641bfcc09ddSBjoern A. Zeeb return pages; 642bfcc09ddSBjoern A. Zeeb } 643bfcc09ddSBjoern A. Zeeb break; 644bfcc09ddSBjoern A. Zeeb } 645bfcc09ddSBjoern A. Zeeb 646bfcc09ddSBjoern A. Zeeb remain_pages -= pages; 647bfcc09ddSBjoern A. Zeeb fw_mon->num_frags++; 648bfcc09ddSBjoern A. Zeeb } 649bfcc09ddSBjoern A. Zeeb 650bfcc09ddSBjoern A. Zeeb return 0; 651bfcc09ddSBjoern A. Zeeb } 652bfcc09ddSBjoern A. Zeeb 653bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt, 654bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_allocation_id alloc_id) 655bfcc09ddSBjoern A. Zeeb { 656bfcc09ddSBjoern A. Zeeb struct iwl_fw_mon *fw_mon; 657bfcc09ddSBjoern A. Zeeb u32 remain_frags, num_commands; 658bfcc09ddSBjoern A. Zeeb int i, fw_mon_idx = 0; 659bfcc09ddSBjoern A. Zeeb 660bfcc09ddSBjoern A. Zeeb if (!fw_has_capa(&fwrt->fw->ucode_capa, 661bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) 662bfcc09ddSBjoern A. Zeeb return 0; 663bfcc09ddSBjoern A. Zeeb 664bfcc09ddSBjoern A. Zeeb if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID || 665bfcc09ddSBjoern A. Zeeb alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 666bfcc09ddSBjoern A. Zeeb return -EIO; 667bfcc09ddSBjoern A. Zeeb 668bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != 669bfcc09ddSBjoern A. Zeeb IWL_FW_INI_LOCATION_DRAM_PATH) 670bfcc09ddSBjoern A. Zeeb return 0; 671bfcc09ddSBjoern A. Zeeb 672bfcc09ddSBjoern A. Zeeb fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; 673bfcc09ddSBjoern A. Zeeb 674bfcc09ddSBjoern A. Zeeb /* the first fragment of DBGC1 is given to the FW via register 675bfcc09ddSBjoern A. Zeeb * or context info 676bfcc09ddSBjoern A. Zeeb */ 677bfcc09ddSBjoern A. Zeeb if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1) 678bfcc09ddSBjoern A. Zeeb fw_mon_idx++; 679bfcc09ddSBjoern A. Zeeb 680bfcc09ddSBjoern A. Zeeb remain_frags = fw_mon->num_frags - fw_mon_idx; 681bfcc09ddSBjoern A. Zeeb if (!remain_frags) 682bfcc09ddSBjoern A. Zeeb return 0; 683bfcc09ddSBjoern A. Zeeb 684bfcc09ddSBjoern A. Zeeb num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); 685bfcc09ddSBjoern A. Zeeb 686bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n", 687bfcc09ddSBjoern A. Zeeb alloc_id); 688bfcc09ddSBjoern A. Zeeb 689bfcc09ddSBjoern A. Zeeb for (i = 0; i < num_commands; i++) { 690bfcc09ddSBjoern A. Zeeb u32 num_frags = min_t(u32, remain_frags, 691bfcc09ddSBjoern A. Zeeb BUF_ALLOC_MAX_NUM_FRAGS); 692bfcc09ddSBjoern A. Zeeb struct iwl_buf_alloc_cmd data = { 693bfcc09ddSBjoern A. Zeeb .alloc_id = cpu_to_le32(alloc_id), 694bfcc09ddSBjoern A. Zeeb .num_frags = cpu_to_le32(num_frags), 695bfcc09ddSBjoern A. Zeeb .buf_location = 696bfcc09ddSBjoern A. Zeeb cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH), 697bfcc09ddSBjoern A. Zeeb }; 698bfcc09ddSBjoern A. Zeeb struct iwl_host_cmd hcmd = { 699bfcc09ddSBjoern A. Zeeb .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION), 700bfcc09ddSBjoern A. Zeeb .data[0] = &data, 701bfcc09ddSBjoern A. Zeeb .len[0] = sizeof(data), 702bfcc09ddSBjoern A. Zeeb .flags = CMD_SEND_IN_RFKILL, 703bfcc09ddSBjoern A. Zeeb }; 704bfcc09ddSBjoern A. Zeeb int ret, j; 705bfcc09ddSBjoern A. Zeeb 706bfcc09ddSBjoern A. Zeeb for (j = 0; j < num_frags; j++) { 707bfcc09ddSBjoern A. Zeeb struct iwl_buf_alloc_frag *frag = &data.frags[j]; 708bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *fw_mon_frag = 709bfcc09ddSBjoern A. Zeeb &fw_mon->frags[fw_mon_idx++]; 710bfcc09ddSBjoern A. Zeeb 711bfcc09ddSBjoern A. Zeeb frag->addr = cpu_to_le64(fw_mon_frag->physical); 712bfcc09ddSBjoern A. Zeeb frag->size = cpu_to_le32(fw_mon_frag->size); 713bfcc09ddSBjoern A. Zeeb } 714bfcc09ddSBjoern A. Zeeb ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); 715bfcc09ddSBjoern A. Zeeb if (ret) 716bfcc09ddSBjoern A. Zeeb return ret; 717bfcc09ddSBjoern A. Zeeb 718bfcc09ddSBjoern A. Zeeb remain_frags -= num_frags; 719bfcc09ddSBjoern A. Zeeb } 720bfcc09ddSBjoern A. Zeeb 721bfcc09ddSBjoern A. Zeeb return 0; 722bfcc09ddSBjoern A. Zeeb } 723bfcc09ddSBjoern A. Zeeb 724bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt) 725bfcc09ddSBjoern A. Zeeb { 726bfcc09ddSBjoern A. Zeeb int ret, i; 727bfcc09ddSBjoern A. Zeeb 728bfcc09ddSBjoern A. Zeeb if (fw_has_capa(&fwrt->fw->ucode_capa, 729bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) 730bfcc09ddSBjoern A. Zeeb return; 731bfcc09ddSBjoern A. Zeeb 732bfcc09ddSBjoern A. Zeeb for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { 733bfcc09ddSBjoern A. Zeeb ret = iwl_dbg_tlv_apply_buffer(fwrt, i); 734bfcc09ddSBjoern A. Zeeb if (ret) 735bfcc09ddSBjoern A. Zeeb IWL_WARN(fwrt, 736bfcc09ddSBjoern A. Zeeb "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n", 737bfcc09ddSBjoern A. Zeeb i, ret); 738bfcc09ddSBjoern A. Zeeb } 739bfcc09ddSBjoern A. Zeeb } 740bfcc09ddSBjoern A. Zeeb 741bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt, 742bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_allocation_id alloc_id, 743bfcc09ddSBjoern A. Zeeb struct iwl_dram_info *dram_info) 744bfcc09ddSBjoern A. Zeeb { 745bfcc09ddSBjoern A. Zeeb struct iwl_fw_mon *fw_mon; 746bfcc09ddSBjoern A. Zeeb u32 remain_frags, num_frags; 747bfcc09ddSBjoern A. Zeeb int j, fw_mon_idx = 0; 748bfcc09ddSBjoern A. Zeeb struct iwl_buf_alloc_cmd *data; 749bfcc09ddSBjoern A. Zeeb 750bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != 751bfcc09ddSBjoern A. Zeeb IWL_FW_INI_LOCATION_DRAM_PATH) { 7529af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: alloc_id %u location is not in DRAM_PATH\n", 7539af1bba4SBjoern A. Zeeb alloc_id); 754bfcc09ddSBjoern A. Zeeb return -1; 755bfcc09ddSBjoern A. Zeeb } 756bfcc09ddSBjoern A. Zeeb 757bfcc09ddSBjoern A. Zeeb fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; 758bfcc09ddSBjoern A. Zeeb 759bfcc09ddSBjoern A. Zeeb /* the first fragment of DBGC1 is given to the FW via register 760bfcc09ddSBjoern A. Zeeb * or context info 761bfcc09ddSBjoern A. Zeeb */ 762bfcc09ddSBjoern A. Zeeb if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1) 763bfcc09ddSBjoern A. Zeeb fw_mon_idx++; 764bfcc09ddSBjoern A. Zeeb 765bfcc09ddSBjoern A. Zeeb remain_frags = fw_mon->num_frags - fw_mon_idx; 766bfcc09ddSBjoern A. Zeeb if (!remain_frags) 767bfcc09ddSBjoern A. Zeeb return -1; 768bfcc09ddSBjoern A. Zeeb 769bfcc09ddSBjoern A. Zeeb num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); 770bfcc09ddSBjoern A. Zeeb data = &dram_info->dram_frags[alloc_id - 1]; 771bfcc09ddSBjoern A. Zeeb data->alloc_id = cpu_to_le32(alloc_id); 772bfcc09ddSBjoern A. Zeeb data->num_frags = cpu_to_le32(num_frags); 773bfcc09ddSBjoern A. Zeeb data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH); 774bfcc09ddSBjoern A. Zeeb 775bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n", 776bfcc09ddSBjoern A. Zeeb cpu_to_le32(alloc_id), cpu_to_le32(num_frags)); 777bfcc09ddSBjoern A. Zeeb 778bfcc09ddSBjoern A. Zeeb for (j = 0; j < num_frags; j++) { 779bfcc09ddSBjoern A. Zeeb struct iwl_buf_alloc_frag *frag = &data->frags[j]; 780bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++]; 781bfcc09ddSBjoern A. Zeeb 782bfcc09ddSBjoern A. Zeeb frag->addr = cpu_to_le64(fw_mon_frag->physical); 783bfcc09ddSBjoern A. Zeeb frag->size = cpu_to_le32(fw_mon_frag->size); 784bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n"); 785bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n", 786bfcc09ddSBjoern A. Zeeb j, cpu_to_le64(fw_mon_frag->physical), 787bfcc09ddSBjoern A. Zeeb cpu_to_le32(fw_mon_frag->size)); 788bfcc09ddSBjoern A. Zeeb } 789bfcc09ddSBjoern A. Zeeb return 0; 790bfcc09ddSBjoern A. Zeeb } 791bfcc09ddSBjoern A. Zeeb 792bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt) 793bfcc09ddSBjoern A. Zeeb { 794d9836fb4SBjoern A. Zeeb int ret, i; 795d9836fb4SBjoern A. Zeeb bool dram_alloc = false; 796bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *frags = 797bfcc09ddSBjoern A. Zeeb &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0]; 798d9836fb4SBjoern A. Zeeb struct iwl_dram_info *dram_info; 799d9836fb4SBjoern A. Zeeb 800d9836fb4SBjoern A. Zeeb if (!frags || !frags->block) 801d9836fb4SBjoern A. Zeeb return; 802d9836fb4SBjoern A. Zeeb 803d9836fb4SBjoern A. Zeeb dram_info = frags->block; 804bfcc09ddSBjoern A. Zeeb 805bfcc09ddSBjoern A. Zeeb if (!fw_has_capa(&fwrt->fw->ucode_capa, 806bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) 807bfcc09ddSBjoern A. Zeeb return; 808bfcc09ddSBjoern A. Zeeb 8099af1bba4SBjoern A. Zeeb memset(dram_info, 0, sizeof(*dram_info)); 810bfcc09ddSBjoern A. Zeeb 811bfcc09ddSBjoern A. Zeeb for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1; 8129af1bba4SBjoern A. Zeeb i < IWL_FW_INI_ALLOCATION_NUM; i++) { 8139af1bba4SBjoern A. Zeeb if (fwrt->trans->dbg.fw_mon_cfg[i].buf_location == 8149af1bba4SBjoern A. Zeeb IWL_FW_INI_LOCATION_INVALID) 8159af1bba4SBjoern A. Zeeb continue; 8169af1bba4SBjoern A. Zeeb 817d9836fb4SBjoern A. Zeeb ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info); 818bfcc09ddSBjoern A. Zeeb if (!ret) 819d9836fb4SBjoern A. Zeeb dram_alloc = true; 820bfcc09ddSBjoern A. Zeeb else 8219af1bba4SBjoern A. Zeeb IWL_INFO(fwrt, 822bfcc09ddSBjoern A. Zeeb "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n", 823bfcc09ddSBjoern A. Zeeb i, ret); 824bfcc09ddSBjoern A. Zeeb } 825d9836fb4SBjoern A. Zeeb 8269af1bba4SBjoern A. Zeeb if (dram_alloc) { 8279af1bba4SBjoern A. Zeeb dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); 8289af1bba4SBjoern A. Zeeb dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); 8299af1bba4SBjoern A. Zeeb } 830bfcc09ddSBjoern A. Zeeb } 831bfcc09ddSBjoern A. Zeeb 832bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, 833bfcc09ddSBjoern A. Zeeb struct list_head *hcmd_list) 834bfcc09ddSBjoern A. Zeeb { 835bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node; 836bfcc09ddSBjoern A. Zeeb 837bfcc09ddSBjoern A. Zeeb list_for_each_entry(node, hcmd_list, list) { 838bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data; 839bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd; 840bfcc09ddSBjoern A. Zeeb u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd); 841bfcc09ddSBjoern A. Zeeb struct iwl_host_cmd cmd = { 842bfcc09ddSBjoern A. Zeeb .id = WIDE_ID(hcmd_data->group, hcmd_data->id), 843bfcc09ddSBjoern A. Zeeb .len = { hcmd_len, }, 844bfcc09ddSBjoern A. Zeeb .data = { hcmd_data->data, }, 845bfcc09ddSBjoern A. Zeeb }; 846bfcc09ddSBjoern A. Zeeb 847bfcc09ddSBjoern A. Zeeb iwl_trans_send_cmd(fwrt->trans, &cmd); 848bfcc09ddSBjoern A. Zeeb } 849bfcc09ddSBjoern A. Zeeb } 850bfcc09ddSBjoern A. Zeeb 851bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt, 852d9836fb4SBjoern A. Zeeb struct list_head *conf_list) 853bfcc09ddSBjoern A. Zeeb { 854bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node; 855bfcc09ddSBjoern A. Zeeb 856d9836fb4SBjoern A. Zeeb list_for_each_entry(node, conf_list, list) { 857bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data; 858bfcc09ddSBjoern A. Zeeb u32 count, address, value; 859bfcc09ddSBjoern A. Zeeb u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8; 860bfcc09ddSBjoern A. Zeeb u32 type = le32_to_cpu(config_list->set_type); 861bfcc09ddSBjoern A. Zeeb u32 offset = le32_to_cpu(config_list->addr_offset); 862bfcc09ddSBjoern A. Zeeb 863bfcc09ddSBjoern A. Zeeb switch (type) { 864bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: { 865bfcc09ddSBjoern A. Zeeb if (!iwl_trans_grab_nic_access(fwrt->trans)) { 866bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n"); 867bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n"); 868bfcc09ddSBjoern A. Zeeb continue; 869bfcc09ddSBjoern A. Zeeb } 870bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len); 871bfcc09ddSBjoern A. Zeeb for (count = 0; count < len; count++) { 872bfcc09ddSBjoern A. Zeeb address = le32_to_cpu(config_list->addr_val[count].address); 873bfcc09ddSBjoern A. Zeeb value = le32_to_cpu(config_list->addr_val[count].value); 874bfcc09ddSBjoern A. Zeeb iwl_trans_write_prph(fwrt->trans, address + offset, value); 875bfcc09ddSBjoern A. Zeeb } 876bfcc09ddSBjoern A. Zeeb iwl_trans_release_nic_access(fwrt->trans); 877bfcc09ddSBjoern A. Zeeb break; 878bfcc09ddSBjoern A. Zeeb } 879bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: { 880bfcc09ddSBjoern A. Zeeb for (count = 0; count < len; count++) { 881bfcc09ddSBjoern A. Zeeb address = le32_to_cpu(config_list->addr_val[count].address); 882bfcc09ddSBjoern A. Zeeb value = le32_to_cpu(config_list->addr_val[count].value); 883bfcc09ddSBjoern A. Zeeb iwl_trans_write_mem32(fwrt->trans, address + offset, value); 884bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n", 885bfcc09ddSBjoern A. Zeeb count, address, value); 886bfcc09ddSBjoern A. Zeeb } 887bfcc09ddSBjoern A. Zeeb break; 888bfcc09ddSBjoern A. Zeeb } 889bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_CONFIG_SET_TYPE_CSR: { 890bfcc09ddSBjoern A. Zeeb for (count = 0; count < len; count++) { 891bfcc09ddSBjoern A. Zeeb address = le32_to_cpu(config_list->addr_val[count].address); 892bfcc09ddSBjoern A. Zeeb value = le32_to_cpu(config_list->addr_val[count].value); 893bfcc09ddSBjoern A. Zeeb iwl_write32(fwrt->trans, address + offset, value); 894bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n", 895bfcc09ddSBjoern A. Zeeb count, address, value); 896bfcc09ddSBjoern A. Zeeb } 897bfcc09ddSBjoern A. Zeeb break; 898bfcc09ddSBjoern A. Zeeb } 899bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: { 900bfcc09ddSBjoern A. Zeeb struct iwl_dbgc1_info dram_info = {}; 901bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0]; 902d9836fb4SBjoern A. Zeeb __le64 dram_base_addr; 903d9836fb4SBjoern A. Zeeb __le32 dram_size; 904d9836fb4SBjoern A. Zeeb u64 dram_addr; 905bfcc09ddSBjoern A. Zeeb u32 ret; 906bfcc09ddSBjoern A. Zeeb 907d9836fb4SBjoern A. Zeeb if (!frags) 908d9836fb4SBjoern A. Zeeb break; 909d9836fb4SBjoern A. Zeeb 910d9836fb4SBjoern A. Zeeb dram_base_addr = cpu_to_le64(frags->physical); 911d9836fb4SBjoern A. Zeeb dram_size = cpu_to_le32(frags->size); 912d9836fb4SBjoern A. Zeeb dram_addr = le64_to_cpu(dram_base_addr); 913d9836fb4SBjoern A. Zeeb 914bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n", 915bfcc09ddSBjoern A. Zeeb dram_base_addr, dram_size); 916bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n", 917bfcc09ddSBjoern A. Zeeb le32_to_cpu(config_list->addr_offset)); 918bfcc09ddSBjoern A. Zeeb for (count = 0; count < len; count++) { 919bfcc09ddSBjoern A. Zeeb address = le32_to_cpu(config_list->addr_val[count].address); 920bfcc09ddSBjoern A. Zeeb dram_info.dbgc1_add_lsb = 921bfcc09ddSBjoern A. Zeeb cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400); 922bfcc09ddSBjoern A. Zeeb dram_info.dbgc1_add_msb = 923bfcc09ddSBjoern A. Zeeb cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32); 924bfcc09ddSBjoern A. Zeeb dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400); 925bfcc09ddSBjoern A. Zeeb ret = iwl_trans_write_mem(fwrt->trans, 926bfcc09ddSBjoern A. Zeeb address + offset, &dram_info, 4); 927bfcc09ddSBjoern A. Zeeb if (ret) { 928bfcc09ddSBjoern A. Zeeb IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n"); 929bfcc09ddSBjoern A. Zeeb break; 930bfcc09ddSBjoern A. Zeeb } 931bfcc09ddSBjoern A. Zeeb } 932bfcc09ddSBjoern A. Zeeb break; 933bfcc09ddSBjoern A. Zeeb } 934bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: { 935bfcc09ddSBjoern A. Zeeb u32 debug_token_config = 936bfcc09ddSBjoern A. Zeeb le32_to_cpu(config_list->addr_val[0].value); 937bfcc09ddSBjoern A. Zeeb 938bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n", 939bfcc09ddSBjoern A. Zeeb debug_token_config); 940bfcc09ddSBjoern A. Zeeb fwrt->trans->dbg.ucode_preset = debug_token_config; 941bfcc09ddSBjoern A. Zeeb break; 942bfcc09ddSBjoern A. Zeeb } 943bfcc09ddSBjoern A. Zeeb default: 944bfcc09ddSBjoern A. Zeeb break; 945bfcc09ddSBjoern A. Zeeb } 946bfcc09ddSBjoern A. Zeeb } 947bfcc09ddSBjoern A. Zeeb } 948bfcc09ddSBjoern A. Zeeb 949bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) 950bfcc09ddSBjoern A. Zeeb { 951bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_timer_node *timer_node = 952bfcc09ddSBjoern A. Zeeb from_timer(timer_node, t, timer); 953bfcc09ddSBjoern A. Zeeb struct iwl_fwrt_dump_data dump_data = { 954bfcc09ddSBjoern A. Zeeb .trig = (void *)timer_node->tlv->data, 955bfcc09ddSBjoern A. Zeeb }; 956bfcc09ddSBjoern A. Zeeb int ret; 957bfcc09ddSBjoern A. Zeeb 958bfcc09ddSBjoern A. Zeeb ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false); 959bfcc09ddSBjoern A. Zeeb if (!ret || ret == -EBUSY) { 960bfcc09ddSBjoern A. Zeeb u32 occur = le32_to_cpu(dump_data.trig->occurrences); 961bfcc09ddSBjoern A. Zeeb u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]); 962bfcc09ddSBjoern A. Zeeb 963bfcc09ddSBjoern A. Zeeb if (!occur) 964bfcc09ddSBjoern A. Zeeb return; 965bfcc09ddSBjoern A. Zeeb 966bfcc09ddSBjoern A. Zeeb mod_timer(t, jiffies + msecs_to_jiffies(collect_interval)); 967bfcc09ddSBjoern A. Zeeb } 968bfcc09ddSBjoern A. Zeeb } 969bfcc09ddSBjoern A. Zeeb 970bfcc09ddSBjoern A. Zeeb static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt) 971bfcc09ddSBjoern A. Zeeb { 972bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node; 973bfcc09ddSBjoern A. Zeeb struct list_head *trig_list = 974bfcc09ddSBjoern A. Zeeb &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list; 975bfcc09ddSBjoern A. Zeeb 976bfcc09ddSBjoern A. Zeeb list_for_each_entry(node, trig_list, list) { 977bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data; 978bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_timer_node *timer_node; 979bfcc09ddSBjoern A. Zeeb u32 occur = le32_to_cpu(trig->occurrences), collect_interval; 980bfcc09ddSBjoern A. Zeeb u32 min_interval = 100; 981bfcc09ddSBjoern A. Zeeb 982bfcc09ddSBjoern A. Zeeb if (!occur) 983bfcc09ddSBjoern A. Zeeb continue; 984bfcc09ddSBjoern A. Zeeb 985bfcc09ddSBjoern A. Zeeb /* make sure there is at least one dword of data for the 986bfcc09ddSBjoern A. Zeeb * interval value 987bfcc09ddSBjoern A. Zeeb */ 988bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(node->tlv.length) < 989bfcc09ddSBjoern A. Zeeb sizeof(*trig) + sizeof(__le32)) { 990bfcc09ddSBjoern A. Zeeb IWL_ERR(fwrt, 991bfcc09ddSBjoern A. Zeeb "WRT: Invalid periodic trigger data was not given\n"); 992bfcc09ddSBjoern A. Zeeb continue; 993bfcc09ddSBjoern A. Zeeb } 994bfcc09ddSBjoern A. Zeeb 995bfcc09ddSBjoern A. Zeeb if (le32_to_cpu(trig->data[0]) < min_interval) { 996bfcc09ddSBjoern A. Zeeb IWL_WARN(fwrt, 997bfcc09ddSBjoern A. Zeeb "WRT: Override min interval from %u to %u msec\n", 998bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->data[0]), min_interval); 999bfcc09ddSBjoern A. Zeeb trig->data[0] = cpu_to_le32(min_interval); 1000bfcc09ddSBjoern A. Zeeb } 1001bfcc09ddSBjoern A. Zeeb 1002bfcc09ddSBjoern A. Zeeb collect_interval = le32_to_cpu(trig->data[0]); 1003bfcc09ddSBjoern A. Zeeb 1004bfcc09ddSBjoern A. Zeeb timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL); 1005bfcc09ddSBjoern A. Zeeb if (!timer_node) { 1006bfcc09ddSBjoern A. Zeeb IWL_ERR(fwrt, 1007bfcc09ddSBjoern A. Zeeb "WRT: Failed to allocate periodic trigger\n"); 1008bfcc09ddSBjoern A. Zeeb continue; 1009bfcc09ddSBjoern A. Zeeb } 1010bfcc09ddSBjoern A. Zeeb 1011bfcc09ddSBjoern A. Zeeb timer_node->fwrt = fwrt; 1012bfcc09ddSBjoern A. Zeeb timer_node->tlv = &node->tlv; 1013bfcc09ddSBjoern A. Zeeb timer_setup(&timer_node->timer, 1014bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_periodic_trig_handler, 0); 1015bfcc09ddSBjoern A. Zeeb 1016bfcc09ddSBjoern A. Zeeb list_add_tail(&timer_node->list, 1017bfcc09ddSBjoern A. Zeeb &fwrt->trans->dbg.periodic_trig_list); 1018bfcc09ddSBjoern A. Zeeb 1019bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n"); 1020bfcc09ddSBjoern A. Zeeb 1021bfcc09ddSBjoern A. Zeeb mod_timer(&timer_node->timer, 1022bfcc09ddSBjoern A. Zeeb jiffies + msecs_to_jiffies(collect_interval)); 1023bfcc09ddSBjoern A. Zeeb } 1024bfcc09ddSBjoern A. Zeeb } 1025bfcc09ddSBjoern A. Zeeb 1026bfcc09ddSBjoern A. Zeeb static bool is_trig_data_contained(const struct iwl_ucode_tlv *new, 1027bfcc09ddSBjoern A. Zeeb const struct iwl_ucode_tlv *old) 1028bfcc09ddSBjoern A. Zeeb { 1029bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data; 1030bfcc09ddSBjoern A. Zeeb const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data; 1031bfcc09ddSBjoern A. Zeeb const __le32 *new_data = new_trig->data, *old_data = old_trig->data; 1032bfcc09ddSBjoern A. Zeeb u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data); 1033bfcc09ddSBjoern A. Zeeb u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data); 1034bfcc09ddSBjoern A. Zeeb int i, j; 1035bfcc09ddSBjoern A. Zeeb 1036bfcc09ddSBjoern A. Zeeb for (i = 0; i < new_dwords_num; i++) { 1037bfcc09ddSBjoern A. Zeeb bool match = false; 1038bfcc09ddSBjoern A. Zeeb 1039bfcc09ddSBjoern A. Zeeb for (j = 0; j < old_dwords_num; j++) { 1040bfcc09ddSBjoern A. Zeeb if (new_data[i] == old_data[j]) { 1041bfcc09ddSBjoern A. Zeeb match = true; 1042bfcc09ddSBjoern A. Zeeb break; 1043bfcc09ddSBjoern A. Zeeb } 1044bfcc09ddSBjoern A. Zeeb } 1045bfcc09ddSBjoern A. Zeeb if (!match) 1046bfcc09ddSBjoern A. Zeeb return false; 1047bfcc09ddSBjoern A. Zeeb } 1048bfcc09ddSBjoern A. Zeeb 1049bfcc09ddSBjoern A. Zeeb return true; 1050bfcc09ddSBjoern A. Zeeb } 1051bfcc09ddSBjoern A. Zeeb 1052bfcc09ddSBjoern A. Zeeb static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt, 1053bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv *trig_tlv, 1054bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node) 1055bfcc09ddSBjoern A. Zeeb { 1056bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv *node_tlv = &node->tlv; 1057bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data; 1058bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data; 1059bfcc09ddSBjoern A. Zeeb u32 policy = le32_to_cpu(trig->apply_policy); 1060bfcc09ddSBjoern A. Zeeb u32 size = le32_to_cpu(trig_tlv->length); 1061bfcc09ddSBjoern A. Zeeb u32 trig_data_len = size - sizeof(*trig); 1062bfcc09ddSBjoern A. Zeeb u32 offset = 0; 1063bfcc09ddSBjoern A. Zeeb 1064bfcc09ddSBjoern A. Zeeb if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) { 1065bfcc09ddSBjoern A. Zeeb u32 data_len = le32_to_cpu(node_tlv->length) - 1066bfcc09ddSBjoern A. Zeeb sizeof(*node_trig); 1067bfcc09ddSBjoern A. Zeeb 1068bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1069bfcc09ddSBjoern A. Zeeb "WRT: Appending trigger data (time point %u)\n", 1070bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1071bfcc09ddSBjoern A. Zeeb 1072bfcc09ddSBjoern A. Zeeb offset += data_len; 1073bfcc09ddSBjoern A. Zeeb size += data_len; 1074bfcc09ddSBjoern A. Zeeb } else { 1075bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1076bfcc09ddSBjoern A. Zeeb "WRT: Overriding trigger data (time point %u)\n", 1077bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1078bfcc09ddSBjoern A. Zeeb } 1079bfcc09ddSBjoern A. Zeeb 1080bfcc09ddSBjoern A. Zeeb if (size != le32_to_cpu(node_tlv->length)) { 1081bfcc09ddSBjoern A. Zeeb struct list_head *prev = node->list.prev; 1082bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *tmp; 1083bfcc09ddSBjoern A. Zeeb 1084bfcc09ddSBjoern A. Zeeb list_del(&node->list); 1085bfcc09ddSBjoern A. Zeeb 1086bfcc09ddSBjoern A. Zeeb tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL); 1087bfcc09ddSBjoern A. Zeeb if (!tmp) { 1088bfcc09ddSBjoern A. Zeeb IWL_WARN(fwrt, 1089bfcc09ddSBjoern A. Zeeb "WRT: No memory to override trigger (time point %u)\n", 1090bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1091bfcc09ddSBjoern A. Zeeb 1092bfcc09ddSBjoern A. Zeeb list_add(&node->list, prev); 1093bfcc09ddSBjoern A. Zeeb 1094bfcc09ddSBjoern A. Zeeb return -ENOMEM; 1095bfcc09ddSBjoern A. Zeeb } 1096bfcc09ddSBjoern A. Zeeb 1097bfcc09ddSBjoern A. Zeeb list_add(&tmp->list, prev); 1098bfcc09ddSBjoern A. Zeeb node_tlv = &tmp->tlv; 1099bfcc09ddSBjoern A. Zeeb node_trig = (void *)node_tlv->data; 1100bfcc09ddSBjoern A. Zeeb } 1101bfcc09ddSBjoern A. Zeeb 1102*a4128aadSBjoern A. Zeeb memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len); 1103bfcc09ddSBjoern A. Zeeb node_tlv->length = cpu_to_le32(size); 1104bfcc09ddSBjoern A. Zeeb 1105bfcc09ddSBjoern A. Zeeb if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) { 1106bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1107bfcc09ddSBjoern A. Zeeb "WRT: Overriding trigger configuration (time point %u)\n", 1108bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1109bfcc09ddSBjoern A. Zeeb 1110bfcc09ddSBjoern A. Zeeb /* the first 11 dwords are configuration related */ 1111bfcc09ddSBjoern A. Zeeb memcpy(node_trig, trig, sizeof(__le32) * 11); 1112bfcc09ddSBjoern A. Zeeb } 1113bfcc09ddSBjoern A. Zeeb 1114bfcc09ddSBjoern A. Zeeb if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) { 1115bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1116bfcc09ddSBjoern A. Zeeb "WRT: Overriding trigger regions (time point %u)\n", 1117bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1118bfcc09ddSBjoern A. Zeeb 1119bfcc09ddSBjoern A. Zeeb node_trig->regions_mask = trig->regions_mask; 1120bfcc09ddSBjoern A. Zeeb } else { 1121bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1122bfcc09ddSBjoern A. Zeeb "WRT: Appending trigger regions (time point %u)\n", 1123bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1124bfcc09ddSBjoern A. Zeeb 1125bfcc09ddSBjoern A. Zeeb node_trig->regions_mask |= trig->regions_mask; 1126bfcc09ddSBjoern A. Zeeb } 1127bfcc09ddSBjoern A. Zeeb 1128bfcc09ddSBjoern A. Zeeb return 0; 1129bfcc09ddSBjoern A. Zeeb } 1130bfcc09ddSBjoern A. Zeeb 1131bfcc09ddSBjoern A. Zeeb static int 1132bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt, 1133bfcc09ddSBjoern A. Zeeb struct list_head *trig_list, 1134bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv *trig_tlv) 1135bfcc09ddSBjoern A. Zeeb { 1136bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data; 1137bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node, *match = NULL; 1138bfcc09ddSBjoern A. Zeeb u32 policy = le32_to_cpu(trig->apply_policy); 1139bfcc09ddSBjoern A. Zeeb 1140bfcc09ddSBjoern A. Zeeb list_for_each_entry(node, trig_list, list) { 1141bfcc09ddSBjoern A. Zeeb if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT)) 1142bfcc09ddSBjoern A. Zeeb break; 1143bfcc09ddSBjoern A. Zeeb 1144bfcc09ddSBjoern A. Zeeb if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) || 1145bfcc09ddSBjoern A. Zeeb is_trig_data_contained(trig_tlv, &node->tlv)) { 1146bfcc09ddSBjoern A. Zeeb match = node; 1147bfcc09ddSBjoern A. Zeeb break; 1148bfcc09ddSBjoern A. Zeeb } 1149bfcc09ddSBjoern A. Zeeb } 1150bfcc09ddSBjoern A. Zeeb 1151bfcc09ddSBjoern A. Zeeb if (!match) { 1152bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n", 1153bfcc09ddSBjoern A. Zeeb le32_to_cpu(trig->time_point)); 1154*a4128aadSBjoern A. Zeeb if (!iwl_dbg_tlv_add(trig_tlv, trig_list)) 1155*a4128aadSBjoern A. Zeeb return -ENOMEM; 1156*a4128aadSBjoern A. Zeeb return 0; 1157bfcc09ddSBjoern A. Zeeb } 1158bfcc09ddSBjoern A. Zeeb 1159bfcc09ddSBjoern A. Zeeb return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match); 1160bfcc09ddSBjoern A. Zeeb } 1161bfcc09ddSBjoern A. Zeeb 1162bfcc09ddSBjoern A. Zeeb static void 1163bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt, 1164bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_time_point_data *tp) 1165bfcc09ddSBjoern A. Zeeb { 1166bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node; 1167bfcc09ddSBjoern A. Zeeb struct list_head *trig_list = &tp->trig_list; 1168bfcc09ddSBjoern A. Zeeb struct list_head *active_trig_list = &tp->active_trig_list; 1169bfcc09ddSBjoern A. Zeeb 1170bfcc09ddSBjoern A. Zeeb list_for_each_entry(node, trig_list, list) { 1171bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv *tlv = &node->tlv; 1172bfcc09ddSBjoern A. Zeeb 1173bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv); 1174bfcc09ddSBjoern A. Zeeb } 1175bfcc09ddSBjoern A. Zeeb } 1176bfcc09ddSBjoern A. Zeeb 1177bfcc09ddSBjoern A. Zeeb static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, 1178bfcc09ddSBjoern A. Zeeb struct iwl_fwrt_dump_data *dump_data, 1179bfcc09ddSBjoern A. Zeeb union iwl_dbg_tlv_tp_data *tp_data, 1180bfcc09ddSBjoern A. Zeeb u32 trig_data) 1181bfcc09ddSBjoern A. Zeeb { 1182bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *pkt = tp_data->fw_pkt; 1183bfcc09ddSBjoern A. Zeeb struct iwl_cmd_header *wanted_hdr = (void *)&trig_data; 1184bfcc09ddSBjoern A. Zeeb 1185bfcc09ddSBjoern A. Zeeb if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd && 1186bfcc09ddSBjoern A. Zeeb pkt->hdr.group_id == wanted_hdr->group_id)) { 1187bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *fw_pkt = 1188bfcc09ddSBjoern A. Zeeb kmemdup(pkt, 1189bfcc09ddSBjoern A. Zeeb sizeof(*pkt) + iwl_rx_packet_payload_len(pkt), 1190bfcc09ddSBjoern A. Zeeb GFP_ATOMIC); 1191bfcc09ddSBjoern A. Zeeb 1192bfcc09ddSBjoern A. Zeeb if (!fw_pkt) 1193bfcc09ddSBjoern A. Zeeb return false; 1194bfcc09ddSBjoern A. Zeeb 1195bfcc09ddSBjoern A. Zeeb dump_data->fw_pkt = fw_pkt; 1196bfcc09ddSBjoern A. Zeeb 1197bfcc09ddSBjoern A. Zeeb return true; 1198bfcc09ddSBjoern A. Zeeb } 1199bfcc09ddSBjoern A. Zeeb 1200bfcc09ddSBjoern A. Zeeb return false; 1201bfcc09ddSBjoern A. Zeeb } 1202bfcc09ddSBjoern A. Zeeb 1203bfcc09ddSBjoern A. Zeeb static int 1204bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, 1205bfcc09ddSBjoern A. Zeeb struct list_head *active_trig_list, 1206bfcc09ddSBjoern A. Zeeb union iwl_dbg_tlv_tp_data *tp_data, 1207bfcc09ddSBjoern A. Zeeb bool (*data_check)(struct iwl_fw_runtime *fwrt, 1208bfcc09ddSBjoern A. Zeeb struct iwl_fwrt_dump_data *dump_data, 1209bfcc09ddSBjoern A. Zeeb union iwl_dbg_tlv_tp_data *tp_data, 1210bfcc09ddSBjoern A. Zeeb u32 trig_data)) 1211bfcc09ddSBjoern A. Zeeb { 1212bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_node *node; 1213bfcc09ddSBjoern A. Zeeb 1214bfcc09ddSBjoern A. Zeeb list_for_each_entry(node, active_trig_list, list) { 1215bfcc09ddSBjoern A. Zeeb struct iwl_fwrt_dump_data dump_data = { 1216bfcc09ddSBjoern A. Zeeb .trig = (void *)node->tlv.data, 1217bfcc09ddSBjoern A. Zeeb }; 1218bfcc09ddSBjoern A. Zeeb u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig, 1219bfcc09ddSBjoern A. Zeeb data); 1220bfcc09ddSBjoern A. Zeeb int ret, i; 1221d9836fb4SBjoern A. Zeeb u32 tp = le32_to_cpu(dump_data.trig->time_point); 1222d9836fb4SBjoern A. Zeeb 1223bfcc09ddSBjoern A. Zeeb 1224bfcc09ddSBjoern A. Zeeb if (!num_data) { 1225bfcc09ddSBjoern A. Zeeb ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); 1226bfcc09ddSBjoern A. Zeeb if (ret) 1227bfcc09ddSBjoern A. Zeeb return ret; 1228bfcc09ddSBjoern A. Zeeb } 1229bfcc09ddSBjoern A. Zeeb 1230bfcc09ddSBjoern A. Zeeb for (i = 0; i < num_data; i++) { 1231bfcc09ddSBjoern A. Zeeb if (!data_check || 1232bfcc09ddSBjoern A. Zeeb data_check(fwrt, &dump_data, tp_data, 1233bfcc09ddSBjoern A. Zeeb le32_to_cpu(dump_data.trig->data[i]))) { 1234bfcc09ddSBjoern A. Zeeb ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); 1235bfcc09ddSBjoern A. Zeeb if (ret) 1236bfcc09ddSBjoern A. Zeeb return ret; 1237bfcc09ddSBjoern A. Zeeb 1238bfcc09ddSBjoern A. Zeeb break; 1239bfcc09ddSBjoern A. Zeeb } 1240bfcc09ddSBjoern A. Zeeb } 1241bfcc09ddSBjoern A. Zeeb 1242*a4128aadSBjoern A. Zeeb fwrt->trans->dbg.restart_required = false; 1243d9836fb4SBjoern A. Zeeb 1244d9836fb4SBjoern A. Zeeb if (fwrt->trans->trans_cfg->device_family == 1245d9836fb4SBjoern A. Zeeb IWL_DEVICE_FAMILY_9000) { 1246*a4128aadSBjoern A. Zeeb fwrt->trans->dbg.restart_required = true; 1247d9836fb4SBjoern A. Zeeb } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT && 1248d9836fb4SBjoern A. Zeeb fwrt->trans->dbg.last_tp_resetfw == 1249d9836fb4SBjoern A. Zeeb IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { 1250*a4128aadSBjoern A. Zeeb fwrt->trans->dbg.restart_required = false; 1251d9836fb4SBjoern A. Zeeb fwrt->trans->dbg.last_tp_resetfw = 0xFF; 1252d9836fb4SBjoern A. Zeeb } else if (le32_to_cpu(dump_data.trig->reset_fw) == 1253d9836fb4SBjoern A. Zeeb IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { 1254*a4128aadSBjoern A. Zeeb fwrt->trans->dbg.restart_required = true; 1255d9836fb4SBjoern A. Zeeb } else if (le32_to_cpu(dump_data.trig->reset_fw) == 1256d9836fb4SBjoern A. Zeeb IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { 1257*a4128aadSBjoern A. Zeeb fwrt->trans->dbg.restart_required = false; 1258d9836fb4SBjoern A. Zeeb fwrt->trans->dbg.last_tp_resetfw = 1259d9836fb4SBjoern A. Zeeb le32_to_cpu(dump_data.trig->reset_fw); 1260d9836fb4SBjoern A. Zeeb } else if (le32_to_cpu(dump_data.trig->reset_fw) == 1261d9836fb4SBjoern A. Zeeb IWL_FW_INI_RESET_FW_MODE_NOTHING) { 1262*a4128aadSBjoern A. Zeeb /* nothing */ 1263d9836fb4SBjoern A. Zeeb } else { 1264d9836fb4SBjoern A. Zeeb IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", 1265d9836fb4SBjoern A. Zeeb le32_to_cpu(dump_data.trig->reset_fw)); 1266d9836fb4SBjoern A. Zeeb } 1267d9836fb4SBjoern A. Zeeb } 1268bfcc09ddSBjoern A. Zeeb return 0; 1269bfcc09ddSBjoern A. Zeeb } 1270bfcc09ddSBjoern A. Zeeb 1271*a4128aadSBjoern A. Zeeb void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) 1272bfcc09ddSBjoern A. Zeeb { 1273bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest; 1274bfcc09ddSBjoern A. Zeeb int ret, i; 1275bfcc09ddSBjoern A. Zeeb u32 failed_alloc = 0; 1276bfcc09ddSBjoern A. Zeeb 12779af1bba4SBjoern A. Zeeb if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) { 1278bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1279bfcc09ddSBjoern A. Zeeb "WRT: Generating active triggers list, domain 0x%x\n", 1280bfcc09ddSBjoern A. Zeeb fwrt->trans->dbg.domains_bitmap); 1281bfcc09ddSBjoern A. Zeeb 1282bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) { 1283bfcc09ddSBjoern A. Zeeb struct iwl_dbg_tlv_time_point_data *tp = 1284bfcc09ddSBjoern A. Zeeb &fwrt->trans->dbg.time_point[i]; 1285bfcc09ddSBjoern A. Zeeb 1286bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_gen_active_trig_list(fwrt, tp); 1287bfcc09ddSBjoern A. Zeeb } 12889af1bba4SBjoern A. Zeeb } else if (*ini_dest != IWL_FW_INI_LOCATION_DRAM_PATH) { 12899af1bba4SBjoern A. Zeeb /* For DRAM, go through the loop below to clear all the buffers 12909af1bba4SBjoern A. Zeeb * properly on restart, otherwise garbage may be left there and 12919af1bba4SBjoern A. Zeeb * leak into new debug dumps. 12929af1bba4SBjoern A. Zeeb */ 12939af1bba4SBjoern A. Zeeb return; 12949af1bba4SBjoern A. Zeeb } 1295bfcc09ddSBjoern A. Zeeb 1296bfcc09ddSBjoern A. Zeeb *ini_dest = IWL_FW_INI_LOCATION_INVALID; 1297bfcc09ddSBjoern A. Zeeb for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { 1298bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 1299bfcc09ddSBjoern A. Zeeb &fwrt->trans->dbg.fw_mon_cfg[i]; 1300bfcc09ddSBjoern A. Zeeb u32 dest = le32_to_cpu(fw_mon_cfg->buf_location); 1301bfcc09ddSBjoern A. Zeeb 1302bfcc09ddSBjoern A. Zeeb if (dest == IWL_FW_INI_LOCATION_INVALID) { 1303bfcc09ddSBjoern A. Zeeb failed_alloc |= BIT(i); 1304bfcc09ddSBjoern A. Zeeb continue; 1305bfcc09ddSBjoern A. Zeeb } 1306bfcc09ddSBjoern A. Zeeb 1307bfcc09ddSBjoern A. Zeeb if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) 1308bfcc09ddSBjoern A. Zeeb *ini_dest = dest; 1309bfcc09ddSBjoern A. Zeeb 1310bfcc09ddSBjoern A. Zeeb if (dest != *ini_dest) 1311bfcc09ddSBjoern A. Zeeb continue; 1312bfcc09ddSBjoern A. Zeeb 1313bfcc09ddSBjoern A. Zeeb ret = iwl_dbg_tlv_alloc_fragments(fwrt, i); 1314bfcc09ddSBjoern A. Zeeb 1315bfcc09ddSBjoern A. Zeeb if (ret) { 1316bfcc09ddSBjoern A. Zeeb IWL_WARN(fwrt, 1317bfcc09ddSBjoern A. Zeeb "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n", 1318bfcc09ddSBjoern A. Zeeb i, ret); 1319bfcc09ddSBjoern A. Zeeb failed_alloc |= BIT(i); 1320bfcc09ddSBjoern A. Zeeb } 1321bfcc09ddSBjoern A. Zeeb } 1322bfcc09ddSBjoern A. Zeeb 1323bfcc09ddSBjoern A. Zeeb if (!failed_alloc) 1324bfcc09ddSBjoern A. Zeeb return; 1325bfcc09ddSBjoern A. Zeeb 1326bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) { 1327bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_region_tlv *reg; 1328bfcc09ddSBjoern A. Zeeb struct iwl_ucode_tlv **active_reg = 1329bfcc09ddSBjoern A. Zeeb &fwrt->trans->dbg.active_regions[i]; 1330bfcc09ddSBjoern A. Zeeb u32 reg_type; 1331bfcc09ddSBjoern A. Zeeb 1332bfcc09ddSBjoern A. Zeeb if (!*active_reg) { 1333bfcc09ddSBjoern A. Zeeb fwrt->trans->dbg.unsupported_region_msk |= BIT(i); 1334bfcc09ddSBjoern A. Zeeb continue; 1335bfcc09ddSBjoern A. Zeeb } 1336bfcc09ddSBjoern A. Zeeb 1337bfcc09ddSBjoern A. Zeeb reg = (void *)(*active_reg)->data; 1338d9836fb4SBjoern A. Zeeb reg_type = reg->type; 1339bfcc09ddSBjoern A. Zeeb 1340bfcc09ddSBjoern A. Zeeb if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER || 1341bfcc09ddSBjoern A. Zeeb !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc)) 1342bfcc09ddSBjoern A. Zeeb continue; 1343bfcc09ddSBjoern A. Zeeb 1344bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(fwrt, 1345bfcc09ddSBjoern A. Zeeb "WRT: removing allocation id %d from region id %d\n", 1346bfcc09ddSBjoern A. Zeeb le32_to_cpu(reg->dram_alloc_id), i); 1347bfcc09ddSBjoern A. Zeeb 13489af1bba4SBjoern A. Zeeb failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id)); 1349bfcc09ddSBjoern A. Zeeb fwrt->trans->dbg.unsupported_region_msk |= BIT(i); 1350bfcc09ddSBjoern A. Zeeb 1351bfcc09ddSBjoern A. Zeeb kfree(*active_reg); 1352bfcc09ddSBjoern A. Zeeb *active_reg = NULL; 1353bfcc09ddSBjoern A. Zeeb } 1354bfcc09ddSBjoern A. Zeeb } 1355bfcc09ddSBjoern A. Zeeb 1356bfcc09ddSBjoern A. Zeeb void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, 1357bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_time_point tp_id, 1358bfcc09ddSBjoern A. Zeeb union iwl_dbg_tlv_tp_data *tp_data, 1359bfcc09ddSBjoern A. Zeeb bool sync) 1360bfcc09ddSBjoern A. Zeeb { 1361bfcc09ddSBjoern A. Zeeb struct list_head *hcmd_list, *trig_list, *conf_list; 1362bfcc09ddSBjoern A. Zeeb 1363bfcc09ddSBjoern A. Zeeb if (!iwl_trans_dbg_ini_valid(fwrt->trans) || 1364bfcc09ddSBjoern A. Zeeb tp_id == IWL_FW_INI_TIME_POINT_INVALID || 1365bfcc09ddSBjoern A. Zeeb tp_id >= IWL_FW_INI_TIME_POINT_NUM) 1366bfcc09ddSBjoern A. Zeeb return; 1367bfcc09ddSBjoern A. Zeeb 1368bfcc09ddSBjoern A. Zeeb hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list; 1369bfcc09ddSBjoern A. Zeeb trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list; 1370bfcc09ddSBjoern A. Zeeb conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list; 1371bfcc09ddSBjoern A. Zeeb 1372bfcc09ddSBjoern A. Zeeb switch (tp_id) { 1373bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_TIME_POINT_EARLY: 1374bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_init_cfg(fwrt); 1375bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_apply_config(fwrt, conf_list); 1376bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_update_drams(fwrt); 1377bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); 1378bfcc09ddSBjoern A. Zeeb break; 1379bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: 1380bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_apply_buffers(fwrt); 1381bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1382bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_apply_config(fwrt, conf_list); 1383bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); 1384bfcc09ddSBjoern A. Zeeb break; 1385bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_TIME_POINT_PERIODIC: 1386bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_set_periodic_trigs(fwrt); 1387bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1388bfcc09ddSBjoern A. Zeeb break; 1389bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: 1390bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: 1391bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: 1392bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1393bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_apply_config(fwrt, conf_list); 1394bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, 1395bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_check_fw_pkt); 1396bfcc09ddSBjoern A. Zeeb break; 1397bfcc09ddSBjoern A. Zeeb default: 1398bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1399bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_apply_config(fwrt, conf_list); 1400bfcc09ddSBjoern A. Zeeb iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); 1401bfcc09ddSBjoern A. Zeeb break; 1402bfcc09ddSBjoern A. Zeeb } 1403bfcc09ddSBjoern A. Zeeb } 1404bfcc09ddSBjoern A. Zeeb IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point); 1405