xref: /linux/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c (revision 7bb377107c72a40ab7505341f8626c8eb79a0cb7)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright (C) 2018 - 2020 Intel Corporation
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright (C) 2018 - 2020 Intel Corporation
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  *
38  *  * Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  *  * Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in
42  *    the documentation and/or other materials provided with the
43  *    distribution.
44  *  * Neither the name Intel Corporation nor the names of its
45  *    contributors may be used to endorse or promote products derived
46  *    from this software without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
51  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
52  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
54  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
58  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  *****************************************************************************/
61 
62 #include <linux/firmware.h>
63 #include "iwl-drv.h"
64 #include "iwl-trans.h"
65 #include "iwl-dbg-tlv.h"
66 #include "fw/dbg.h"
67 #include "fw/runtime.h"
68 
69 /**
70  * enum iwl_dbg_tlv_type - debug TLV types
71  * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
72  * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
73  * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
74  * @IWL_DBG_TLV_TYPE_REGION: region TLV
75  * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
76  * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
77  */
78 enum iwl_dbg_tlv_type {
79 	IWL_DBG_TLV_TYPE_DEBUG_INFO =
80 		IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
81 	IWL_DBG_TLV_TYPE_BUF_ALLOC,
82 	IWL_DBG_TLV_TYPE_HCMD,
83 	IWL_DBG_TLV_TYPE_REGION,
84 	IWL_DBG_TLV_TYPE_TRIGGER,
85 	IWL_DBG_TLV_TYPE_NUM,
86 };
87 
88 /**
89  * struct iwl_dbg_tlv_ver_data -  debug TLV version struct
90  * @min_ver: min version supported
91  * @max_ver: max version supported
92  */
93 struct iwl_dbg_tlv_ver_data {
94 	int min_ver;
95 	int max_ver;
96 };
97 
98 /**
99  * struct iwl_dbg_tlv_timer_node - timer node struct
100  * @list: list of &struct iwl_dbg_tlv_timer_node
101  * @timer: timer
102  * @fwrt: &struct iwl_fw_runtime
103  * @tlv: TLV attach to the timer node
104  */
105 struct iwl_dbg_tlv_timer_node {
106 	struct list_head list;
107 	struct timer_list timer;
108 	struct iwl_fw_runtime *fwrt;
109 	struct iwl_ucode_tlv *tlv;
110 };
111 
112 static const struct iwl_dbg_tlv_ver_data
113 dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
114 	[IWL_DBG_TLV_TYPE_DEBUG_INFO]	= {.min_ver = 1, .max_ver = 1,},
115 	[IWL_DBG_TLV_TYPE_BUF_ALLOC]	= {.min_ver = 1, .max_ver = 1,},
116 	[IWL_DBG_TLV_TYPE_HCMD]		= {.min_ver = 1, .max_ver = 1,},
117 	[IWL_DBG_TLV_TYPE_REGION]	= {.min_ver = 1, .max_ver = 1,},
118 	[IWL_DBG_TLV_TYPE_TRIGGER]	= {.min_ver = 1, .max_ver = 1,},
119 };
120 
121 static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
122 {
123 	u32 len = le32_to_cpu(tlv->length);
124 	struct iwl_dbg_tlv_node *node;
125 
126 	node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
127 	if (!node)
128 		return -ENOMEM;
129 
130 	memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
131 	list_add_tail(&node->list, list);
132 
133 	return 0;
134 }
135 
136 static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
137 {
138 	struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
139 	u32 type = le32_to_cpu(tlv->type);
140 	u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
141 	u32 ver = le32_to_cpu(hdr->version);
142 
143 	if (ver < dbg_ver_table[tlv_idx].min_ver ||
144 	    ver > dbg_ver_table[tlv_idx].max_ver)
145 		return false;
146 
147 	return true;
148 }
149 
150 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
151 					struct iwl_ucode_tlv *tlv)
152 {
153 	struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
154 
155 	if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
156 		return -EINVAL;
157 
158 	IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
159 		     debug_info->debug_cfg_name);
160 
161 	return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
162 }
163 
164 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
165 				       struct iwl_ucode_tlv *tlv)
166 {
167 	struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
168 	u32 buf_location = le32_to_cpu(alloc->buf_location);
169 	u32 alloc_id = le32_to_cpu(alloc->alloc_id);
170 
171 	if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
172 	    (buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
173 	     buf_location != IWL_FW_INI_LOCATION_DRAM_PATH &&
174 	     buf_location != IWL_FW_INI_LOCATION_NPK_PATH)) {
175 		IWL_ERR(trans,
176 			"WRT: Invalid allocation TLV\n");
177 		return -EINVAL;
178 	}
179 
180 	if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
181 	     buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
182 	     alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) {
183 		IWL_ERR(trans,
184 			"WRT: Allocation TLV for SMEM/NPK path must have id %u (current: %u)\n",
185 			IWL_FW_INI_ALLOCATION_ID_DBGC1, alloc_id);
186 		return -EINVAL;
187 	}
188 
189 	if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
190 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM) {
191 		IWL_ERR(trans,
192 			"WRT: Invalid allocation id %u for allocation TLV\n",
193 			alloc_id);
194 		return -EINVAL;
195 	}
196 
197 	trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
198 
199 	return 0;
200 }
201 
202 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
203 				  struct iwl_ucode_tlv *tlv)
204 {
205 	struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
206 	u32 tp = le32_to_cpu(hcmd->time_point);
207 
208 	if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
209 		return -EINVAL;
210 
211 	/* Host commands can not be sent in early time point since the FW
212 	 * is not ready
213 	 */
214 	if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
215 	    tp >= IWL_FW_INI_TIME_POINT_NUM ||
216 	    tp == IWL_FW_INI_TIME_POINT_EARLY) {
217 		IWL_ERR(trans,
218 			"WRT: Invalid time point %u for host command TLV\n",
219 			tp);
220 		return -EINVAL;
221 	}
222 
223 	return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
224 }
225 
226 static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
227 				    struct iwl_ucode_tlv *tlv)
228 {
229 	struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
230 	struct iwl_ucode_tlv **active_reg;
231 	u32 id = le32_to_cpu(reg->id);
232 	u32 type = le32_to_cpu(reg->type);
233 	u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
234 
235 	if (le32_to_cpu(tlv->length) < sizeof(*reg))
236 		return -EINVAL;
237 
238 	if (id >= IWL_FW_INI_MAX_REGION_ID) {
239 		IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
240 		return -EINVAL;
241 	}
242 
243 	if (type <= IWL_FW_INI_REGION_INVALID ||
244 	    type >= IWL_FW_INI_REGION_NUM) {
245 		IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
246 		return -EINVAL;
247 	}
248 
249 	if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
250 	    !trans->ops->read_config32) {
251 		IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
252 		return -EOPNOTSUPP;
253 	}
254 
255 	active_reg = &trans->dbg.active_regions[id];
256 	if (*active_reg) {
257 		IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
258 
259 		kfree(*active_reg);
260 	}
261 
262 	*active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
263 	if (!*active_reg)
264 		return -ENOMEM;
265 
266 	IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
267 
268 	return 0;
269 }
270 
271 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
272 				     struct iwl_ucode_tlv *tlv)
273 {
274 	struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
275 	u32 tp = le32_to_cpu(trig->time_point);
276 
277 	if (le32_to_cpu(tlv->length) < sizeof(*trig))
278 		return -EINVAL;
279 
280 	if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
281 	    tp >= IWL_FW_INI_TIME_POINT_NUM) {
282 		IWL_ERR(trans,
283 			"WRT: Invalid time point %u for trigger TLV\n",
284 			tp);
285 		return -EINVAL;
286 	}
287 
288 	if (!le32_to_cpu(trig->occurrences))
289 		trig->occurrences = cpu_to_le32(-1);
290 
291 	return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
292 }
293 
294 static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
295 			      struct iwl_ucode_tlv *tlv) = {
296 	[IWL_DBG_TLV_TYPE_DEBUG_INFO]	= iwl_dbg_tlv_alloc_debug_info,
297 	[IWL_DBG_TLV_TYPE_BUF_ALLOC]	= iwl_dbg_tlv_alloc_buf_alloc,
298 	[IWL_DBG_TLV_TYPE_HCMD]		= iwl_dbg_tlv_alloc_hcmd,
299 	[IWL_DBG_TLV_TYPE_REGION]	= iwl_dbg_tlv_alloc_region,
300 	[IWL_DBG_TLV_TYPE_TRIGGER]	= iwl_dbg_tlv_alloc_trigger,
301 };
302 
303 void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
304 		       bool ext)
305 {
306 	struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
307 	u32 type = le32_to_cpu(tlv->type);
308 	u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
309 	u32 domain = le32_to_cpu(hdr->domain);
310 	enum iwl_ini_cfg_state *cfg_state = ext ?
311 		&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
312 	int ret;
313 
314 	if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
315 	    !(domain & trans->dbg.domains_bitmap)) {
316 		IWL_DEBUG_FW(trans,
317 			     "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
318 			     domain, trans->dbg.domains_bitmap);
319 		return;
320 	}
321 
322 	if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
323 		IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
324 		goto out_err;
325 	}
326 
327 	if (!iwl_dbg_tlv_ver_support(tlv)) {
328 		IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
329 			le32_to_cpu(hdr->version));
330 		goto out_err;
331 	}
332 
333 	ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
334 	if (ret) {
335 		IWL_ERR(trans,
336 			"WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
337 			type, ret, ext);
338 		goto out_err;
339 	}
340 
341 	if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
342 		*cfg_state = IWL_INI_CFG_STATE_LOADED;
343 
344 	return;
345 
346 out_err:
347 	*cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
348 }
349 
350 void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
351 {
352 	struct list_head *timer_list = &trans->dbg.periodic_trig_list;
353 	struct iwl_dbg_tlv_timer_node *node, *tmp;
354 
355 	list_for_each_entry_safe(node, tmp, timer_list, list) {
356 		del_timer(&node->timer);
357 		list_del(&node->list);
358 		kfree(node);
359 	}
360 }
361 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
362 
363 static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
364 				       enum iwl_fw_ini_allocation_id alloc_id)
365 {
366 	struct iwl_fw_mon *fw_mon;
367 	int i;
368 
369 	if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
370 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
371 		return;
372 
373 	fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
374 
375 	for (i = 0; i < fw_mon->num_frags; i++) {
376 		struct iwl_dram_data *frag = &fw_mon->frags[i];
377 
378 		dma_free_coherent(trans->dev, frag->size, frag->block,
379 				  frag->physical);
380 
381 		frag->physical = 0;
382 		frag->block = NULL;
383 		frag->size = 0;
384 	}
385 
386 	kfree(fw_mon->frags);
387 	fw_mon->frags = NULL;
388 	fw_mon->num_frags = 0;
389 }
390 
391 void iwl_dbg_tlv_free(struct iwl_trans *trans)
392 {
393 	struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
394 	int i;
395 
396 	iwl_dbg_tlv_del_timers(trans);
397 
398 	for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
399 		struct iwl_ucode_tlv **active_reg =
400 			&trans->dbg.active_regions[i];
401 
402 		kfree(*active_reg);
403 		*active_reg = NULL;
404 	}
405 
406 	list_for_each_entry_safe(tlv_node, tlv_node_tmp,
407 				 &trans->dbg.debug_info_tlv_list, list) {
408 		list_del(&tlv_node->list);
409 		kfree(tlv_node);
410 	}
411 
412 	for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
413 		struct iwl_dbg_tlv_time_point_data *tp =
414 			&trans->dbg.time_point[i];
415 
416 		list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
417 					 list) {
418 			list_del(&tlv_node->list);
419 			kfree(tlv_node);
420 		}
421 
422 		list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
423 					 list) {
424 			list_del(&tlv_node->list);
425 			kfree(tlv_node);
426 		}
427 
428 		list_for_each_entry_safe(tlv_node, tlv_node_tmp,
429 					 &tp->active_trig_list, list) {
430 			list_del(&tlv_node->list);
431 			kfree(tlv_node);
432 		}
433 	}
434 
435 	for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
436 		iwl_dbg_tlv_fragments_free(trans, i);
437 }
438 
439 static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
440 				 size_t len)
441 {
442 	struct iwl_ucode_tlv *tlv;
443 	u32 tlv_len;
444 
445 	while (len >= sizeof(*tlv)) {
446 		len -= sizeof(*tlv);
447 		tlv = (void *)data;
448 
449 		tlv_len = le32_to_cpu(tlv->length);
450 
451 		if (len < tlv_len) {
452 			IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
453 				len, tlv_len);
454 			return -EINVAL;
455 		}
456 		len -= ALIGN(tlv_len, 4);
457 		data += sizeof(*tlv) + ALIGN(tlv_len, 4);
458 
459 		iwl_dbg_tlv_alloc(trans, tlv, true);
460 	}
461 
462 	return 0;
463 }
464 
465 void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
466 {
467 	const struct firmware *fw;
468 	int res;
469 
470 	if (!iwlwifi_mod_params.enable_ini)
471 		return;
472 
473 	res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
474 	if (res)
475 		return;
476 
477 	iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
478 
479 	release_firmware(fw);
480 }
481 
482 void iwl_dbg_tlv_init(struct iwl_trans *trans)
483 {
484 	int i;
485 
486 	INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
487 	INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
488 
489 	for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
490 		struct iwl_dbg_tlv_time_point_data *tp =
491 			&trans->dbg.time_point[i];
492 
493 		INIT_LIST_HEAD(&tp->trig_list);
494 		INIT_LIST_HEAD(&tp->hcmd_list);
495 		INIT_LIST_HEAD(&tp->active_trig_list);
496 	}
497 }
498 
499 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
500 				      struct iwl_dram_data *frag, u32 pages)
501 {
502 	void *block = NULL;
503 	dma_addr_t physical;
504 
505 	if (!frag || frag->size || !pages)
506 		return -EIO;
507 
508 	/*
509 	 * We try to allocate as many pages as we can, starting with
510 	 * the requested amount and going down until we can allocate
511 	 * something.  Because of DIV_ROUND_UP(), pages will never go
512 	 * down to 0 and stop the loop, so stop when pages reaches 1,
513 	 * which is too small anyway.
514 	 */
515 	while (pages > 1) {
516 		block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
517 					   &physical,
518 					   GFP_KERNEL | __GFP_NOWARN);
519 		if (block)
520 			break;
521 
522 		IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
523 			 pages * PAGE_SIZE);
524 
525 		pages = DIV_ROUND_UP(pages, 2);
526 	}
527 
528 	if (!block)
529 		return -ENOMEM;
530 
531 	frag->physical = physical;
532 	frag->block = block;
533 	frag->size = pages * PAGE_SIZE;
534 
535 	return pages;
536 }
537 
538 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
539 				       enum iwl_fw_ini_allocation_id alloc_id)
540 {
541 	struct iwl_fw_mon *fw_mon;
542 	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
543 	u32 num_frags, remain_pages, frag_pages;
544 	int i;
545 
546 	if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
547 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
548 		return -EIO;
549 
550 	fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
551 	fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
552 
553 	if (fw_mon->num_frags ||
554 	    fw_mon_cfg->buf_location !=
555 	    cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
556 		return 0;
557 
558 	num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
559 	if (!fw_has_capa(&fwrt->fw->ucode_capa,
560 			 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
561 		if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
562 			return -EIO;
563 		num_frags = 1;
564 	}
565 
566 	remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
567 				    PAGE_SIZE);
568 	num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
569 	num_frags = min_t(u32, num_frags, remain_pages);
570 	frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
571 
572 	fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
573 	if (!fw_mon->frags)
574 		return -ENOMEM;
575 
576 	for (i = 0; i < num_frags; i++) {
577 		int pages = min_t(u32, frag_pages, remain_pages);
578 
579 		IWL_DEBUG_FW(fwrt,
580 			     "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
581 			     alloc_id, i, pages * PAGE_SIZE);
582 
583 		pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
584 						   pages);
585 		if (pages < 0) {
586 			u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
587 				(remain_pages * PAGE_SIZE);
588 
589 			if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
590 				iwl_dbg_tlv_fragments_free(fwrt->trans,
591 							   alloc_id);
592 				return pages;
593 			}
594 			break;
595 		}
596 
597 		remain_pages -= pages;
598 		fw_mon->num_frags++;
599 	}
600 
601 	return 0;
602 }
603 
604 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
605 				    enum iwl_fw_ini_allocation_id alloc_id)
606 {
607 	struct iwl_fw_mon *fw_mon;
608 	u32 remain_frags, num_commands;
609 	int i, fw_mon_idx = 0;
610 
611 	if (!fw_has_capa(&fwrt->fw->ucode_capa,
612 			 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
613 		return 0;
614 
615 	if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
616 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
617 		return -EIO;
618 
619 	if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
620 	    IWL_FW_INI_LOCATION_DRAM_PATH)
621 		return 0;
622 
623 	fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
624 
625 	/* the first fragment of DBGC1 is given to the FW via register
626 	 * or context info
627 	 */
628 	if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
629 		fw_mon_idx++;
630 
631 	remain_frags = fw_mon->num_frags - fw_mon_idx;
632 	if (!remain_frags)
633 		return 0;
634 
635 	num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
636 
637 	IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
638 		     alloc_id);
639 
640 	for (i = 0; i < num_commands; i++) {
641 		u32 num_frags = min_t(u32, remain_frags,
642 				      BUF_ALLOC_MAX_NUM_FRAGS);
643 		struct iwl_buf_alloc_cmd data = {
644 			.alloc_id = cpu_to_le32(alloc_id),
645 			.num_frags = cpu_to_le32(num_frags),
646 			.buf_location =
647 				cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
648 		};
649 		struct iwl_host_cmd hcmd = {
650 			.id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
651 			.data[0] = &data,
652 			.len[0] = sizeof(data),
653 		};
654 		int ret, j;
655 
656 		for (j = 0; j < num_frags; j++) {
657 			struct iwl_buf_alloc_frag *frag = &data.frags[j];
658 			struct iwl_dram_data *fw_mon_frag =
659 				&fw_mon->frags[fw_mon_idx++];
660 
661 			frag->addr = cpu_to_le64(fw_mon_frag->physical);
662 			frag->size = cpu_to_le32(fw_mon_frag->size);
663 		}
664 		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
665 		if (ret)
666 			return ret;
667 
668 		remain_frags -= num_frags;
669 	}
670 
671 	return 0;
672 }
673 
674 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
675 {
676 	int ret, i;
677 
678 	for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
679 		ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
680 		if (ret)
681 			IWL_WARN(fwrt,
682 				 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
683 				 i, ret);
684 	}
685 }
686 
687 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
688 				   struct list_head *hcmd_list)
689 {
690 	struct iwl_dbg_tlv_node *node;
691 
692 	list_for_each_entry(node, hcmd_list, list) {
693 		struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
694 		struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
695 		u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
696 		struct iwl_host_cmd cmd = {
697 			.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
698 			.len = { hcmd_len, },
699 			.data = { hcmd_data->data, },
700 		};
701 
702 		iwl_trans_send_cmd(fwrt->trans, &cmd);
703 	}
704 }
705 
706 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
707 {
708 	struct iwl_dbg_tlv_timer_node *timer_node =
709 		from_timer(timer_node, t, timer);
710 	struct iwl_fwrt_dump_data dump_data = {
711 		.trig = (void *)timer_node->tlv->data,
712 	};
713 	int ret;
714 
715 	ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
716 	if (!ret || ret == -EBUSY) {
717 		u32 occur = le32_to_cpu(dump_data.trig->occurrences);
718 		u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
719 
720 		if (!occur)
721 			return;
722 
723 		mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
724 	}
725 }
726 
727 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
728 {
729 	struct iwl_dbg_tlv_node *node;
730 	struct list_head *trig_list =
731 		&fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
732 
733 	list_for_each_entry(node, trig_list, list) {
734 		struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
735 		struct iwl_dbg_tlv_timer_node *timer_node;
736 		u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
737 		u32 min_interval = 100;
738 
739 		if (!occur)
740 			continue;
741 
742 		/* make sure there is at least one dword of data for the
743 		 * interval value
744 		 */
745 		if (le32_to_cpu(node->tlv.length) <
746 		    sizeof(*trig) + sizeof(__le32)) {
747 			IWL_ERR(fwrt,
748 				"WRT: Invalid periodic trigger data was not given\n");
749 			continue;
750 		}
751 
752 		if (le32_to_cpu(trig->data[0]) < min_interval) {
753 			IWL_WARN(fwrt,
754 				 "WRT: Override min interval from %u to %u msec\n",
755 				 le32_to_cpu(trig->data[0]), min_interval);
756 			trig->data[0] = cpu_to_le32(min_interval);
757 		}
758 
759 		collect_interval = le32_to_cpu(trig->data[0]);
760 
761 		timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
762 		if (!timer_node) {
763 			IWL_ERR(fwrt,
764 				"WRT: Failed to allocate periodic trigger\n");
765 			continue;
766 		}
767 
768 		timer_node->fwrt = fwrt;
769 		timer_node->tlv = &node->tlv;
770 		timer_setup(&timer_node->timer,
771 			    iwl_dbg_tlv_periodic_trig_handler, 0);
772 
773 		list_add_tail(&timer_node->list,
774 			      &fwrt->trans->dbg.periodic_trig_list);
775 
776 		IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
777 
778 		mod_timer(&timer_node->timer,
779 			  jiffies + msecs_to_jiffies(collect_interval));
780 	}
781 }
782 
783 static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
784 				   struct iwl_ucode_tlv *old)
785 {
786 	struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
787 	struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
788 	__le32 *new_data = new_trig->data, *old_data = old_trig->data;
789 	u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
790 	u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data);
791 	int i, j;
792 
793 	for (i = 0; i < new_dwords_num; i++) {
794 		bool match = false;
795 
796 		for (j = 0; j < old_dwords_num; j++) {
797 			if (new_data[i] == old_data[j]) {
798 				match = true;
799 				break;
800 			}
801 		}
802 		if (!match)
803 			return false;
804 	}
805 
806 	return true;
807 }
808 
809 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
810 					  struct iwl_ucode_tlv *trig_tlv,
811 					  struct iwl_dbg_tlv_node *node)
812 {
813 	struct iwl_ucode_tlv *node_tlv = &node->tlv;
814 	struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
815 	struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
816 	u32 policy = le32_to_cpu(trig->apply_policy);
817 	u32 size = le32_to_cpu(trig_tlv->length);
818 	u32 trig_data_len = size - sizeof(*trig);
819 	u32 offset = 0;
820 
821 	if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
822 		u32 data_len = le32_to_cpu(node_tlv->length) -
823 			sizeof(*node_trig);
824 
825 		IWL_DEBUG_FW(fwrt,
826 			     "WRT: Appending trigger data (time point %u)\n",
827 			     le32_to_cpu(trig->time_point));
828 
829 		offset += data_len;
830 		size += data_len;
831 	} else {
832 		IWL_DEBUG_FW(fwrt,
833 			     "WRT: Overriding trigger data (time point %u)\n",
834 			     le32_to_cpu(trig->time_point));
835 	}
836 
837 	if (size != le32_to_cpu(node_tlv->length)) {
838 		struct list_head *prev = node->list.prev;
839 		struct iwl_dbg_tlv_node *tmp;
840 
841 		list_del(&node->list);
842 
843 		tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
844 		if (!tmp) {
845 			IWL_WARN(fwrt,
846 				 "WRT: No memory to override trigger (time point %u)\n",
847 				 le32_to_cpu(trig->time_point));
848 
849 			list_add(&node->list, prev);
850 
851 			return -ENOMEM;
852 		}
853 
854 		list_add(&tmp->list, prev);
855 		node_tlv = &tmp->tlv;
856 		node_trig = (void *)node_tlv->data;
857 	}
858 
859 	memcpy(node_trig->data + offset, trig->data, trig_data_len);
860 	node_tlv->length = cpu_to_le32(size);
861 
862 	if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
863 		IWL_DEBUG_FW(fwrt,
864 			     "WRT: Overriding trigger configuration (time point %u)\n",
865 			     le32_to_cpu(trig->time_point));
866 
867 		/* the first 11 dwords are configuration related */
868 		memcpy(node_trig, trig, sizeof(__le32) * 11);
869 	}
870 
871 	if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
872 		IWL_DEBUG_FW(fwrt,
873 			     "WRT: Overriding trigger regions (time point %u)\n",
874 			     le32_to_cpu(trig->time_point));
875 
876 		node_trig->regions_mask = trig->regions_mask;
877 	} else {
878 		IWL_DEBUG_FW(fwrt,
879 			     "WRT: Appending trigger regions (time point %u)\n",
880 			     le32_to_cpu(trig->time_point));
881 
882 		node_trig->regions_mask |= trig->regions_mask;
883 	}
884 
885 	return 0;
886 }
887 
888 static int
889 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
890 			       struct list_head *trig_list,
891 			       struct iwl_ucode_tlv *trig_tlv)
892 {
893 	struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
894 	struct iwl_dbg_tlv_node *node, *match = NULL;
895 	u32 policy = le32_to_cpu(trig->apply_policy);
896 
897 	list_for_each_entry(node, trig_list, list) {
898 		if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
899 			break;
900 
901 		if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
902 		    is_trig_data_contained(trig_tlv, &node->tlv)) {
903 			match = node;
904 			break;
905 		}
906 	}
907 
908 	if (!match) {
909 		IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
910 			     le32_to_cpu(trig->time_point));
911 		return iwl_dbg_tlv_add(trig_tlv, trig_list);
912 	}
913 
914 	return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
915 }
916 
917 static void
918 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
919 				 struct iwl_dbg_tlv_time_point_data *tp)
920 {
921 	struct iwl_dbg_tlv_node *node;
922 	struct list_head *trig_list = &tp->trig_list;
923 	struct list_head *active_trig_list = &tp->active_trig_list;
924 
925 	list_for_each_entry(node, trig_list, list) {
926 		struct iwl_ucode_tlv *tlv = &node->tlv;
927 
928 		iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
929 	}
930 }
931 
932 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
933 				     struct iwl_fwrt_dump_data *dump_data,
934 				     union iwl_dbg_tlv_tp_data *tp_data,
935 				     u32 trig_data)
936 {
937 	struct iwl_rx_packet *pkt = tp_data->fw_pkt;
938 	struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
939 
940 	if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
941 		    (pkt->hdr.cmd == wanted_hdr->cmd &&
942 		     pkt->hdr.group_id == wanted_hdr->group_id))) {
943 		struct iwl_rx_packet *fw_pkt =
944 			kmemdup(pkt,
945 				sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
946 				GFP_ATOMIC);
947 
948 		if (!fw_pkt)
949 			return false;
950 
951 		dump_data->fw_pkt = fw_pkt;
952 
953 		return true;
954 	}
955 
956 	return false;
957 }
958 
959 static int
960 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
961 		       struct list_head *active_trig_list,
962 		       union iwl_dbg_tlv_tp_data *tp_data,
963 		       bool (*data_check)(struct iwl_fw_runtime *fwrt,
964 					  struct iwl_fwrt_dump_data *dump_data,
965 					  union iwl_dbg_tlv_tp_data *tp_data,
966 					  u32 trig_data))
967 {
968 	struct iwl_dbg_tlv_node *node;
969 
970 	list_for_each_entry(node, active_trig_list, list) {
971 		struct iwl_fwrt_dump_data dump_data = {
972 			.trig = (void *)node->tlv.data,
973 		};
974 		u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
975 						 data);
976 		int ret, i;
977 
978 		if (!num_data) {
979 			ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
980 			if (ret)
981 				return ret;
982 		}
983 
984 		for (i = 0; i < num_data; i++) {
985 			if (!data_check ||
986 			    data_check(fwrt, &dump_data, tp_data,
987 				       le32_to_cpu(dump_data.trig->data[i]))) {
988 				ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
989 				if (ret)
990 					return ret;
991 
992 				break;
993 			}
994 		}
995 	}
996 
997 	return 0;
998 }
999 
1000 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
1001 {
1002 	enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
1003 	int ret, i;
1004 
1005 	IWL_DEBUG_FW(fwrt,
1006 		     "WRT: Generating active triggers list, domain 0x%x\n",
1007 		     fwrt->trans->dbg.domains_bitmap);
1008 
1009 	for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
1010 		struct iwl_dbg_tlv_time_point_data *tp =
1011 			&fwrt->trans->dbg.time_point[i];
1012 
1013 		iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
1014 	}
1015 
1016 	*ini_dest = IWL_FW_INI_LOCATION_INVALID;
1017 	for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
1018 		struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
1019 			&fwrt->trans->dbg.fw_mon_cfg[i];
1020 		u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
1021 
1022 		if (dest == IWL_FW_INI_LOCATION_INVALID)
1023 			continue;
1024 
1025 		if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
1026 			*ini_dest = dest;
1027 
1028 		if (dest != *ini_dest)
1029 			continue;
1030 
1031 		ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
1032 		if (ret)
1033 			IWL_WARN(fwrt,
1034 				 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1035 				 i, ret);
1036 	}
1037 }
1038 
1039 void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
1040 			    enum iwl_fw_ini_time_point tp_id,
1041 			    union iwl_dbg_tlv_tp_data *tp_data)
1042 {
1043 	struct list_head *hcmd_list, *trig_list;
1044 
1045 	if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
1046 	    tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
1047 	    tp_id >= IWL_FW_INI_TIME_POINT_NUM)
1048 		return;
1049 
1050 	hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
1051 	trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
1052 
1053 	switch (tp_id) {
1054 	case IWL_FW_INI_TIME_POINT_EARLY:
1055 		iwl_dbg_tlv_init_cfg(fwrt);
1056 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
1057 		break;
1058 	case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
1059 		iwl_dbg_tlv_apply_buffers(fwrt);
1060 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1061 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
1062 		break;
1063 	case IWL_FW_INI_TIME_POINT_PERIODIC:
1064 		iwl_dbg_tlv_set_periodic_trigs(fwrt);
1065 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1066 		break;
1067 	case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
1068 	case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
1069 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1070 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
1071 				       iwl_dbg_tlv_check_fw_pkt);
1072 		break;
1073 	default:
1074 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1075 		iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
1076 		break;
1077 	}
1078 }
1079 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);
1080