xref: /linux/drivers/net/wireless/intel/iwlwifi/fw/dbg.c (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018        Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <linux/devcoredump.h>
65 #include "iwl-drv.h"
66 #include "runtime.h"
67 #include "dbg.h"
68 #include "debugfs.h"
69 #include "iwl-io.h"
70 #include "iwl-prph.h"
71 #include "iwl-csr.h"
72 
73 /**
74  * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
75  *
76  * @fwrt_ptr: pointer to the buffer coming from fwrt
77  * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
78  *	transport's data.
79  * @trans_len: length of the valid data in trans_ptr
80  * @fwrt_len: length of the valid data in fwrt_ptr
81  */
82 struct iwl_fw_dump_ptrs {
83 	struct iwl_trans_dump_data *trans_ptr;
84 	void *fwrt_ptr;
85 	u32 fwrt_len;
86 };
87 
88 #define RADIO_REG_MAX_READ 0x2ad
89 static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
90 				struct iwl_fw_error_dump_data **dump_data)
91 {
92 	u8 *pos = (void *)(*dump_data)->data;
93 	unsigned long flags;
94 	int i;
95 
96 	IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
97 
98 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
99 		return;
100 
101 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
102 	(*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
103 
104 	for (i = 0; i < RADIO_REG_MAX_READ; i++) {
105 		u32 rd_cmd = RADIO_RSP_RD_CMD;
106 
107 		rd_cmd |= i << RADIO_RSP_ADDR_POS;
108 		iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
109 		*pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
110 
111 		pos++;
112 	}
113 
114 	*dump_data = iwl_fw_error_next_data(*dump_data);
115 
116 	iwl_trans_release_nic_access(fwrt->trans, &flags);
117 }
118 
119 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
120 			      struct iwl_fw_error_dump_data **dump_data,
121 			      int size, u32 offset, int fifo_num)
122 {
123 	struct iwl_fw_error_dump_fifo *fifo_hdr;
124 	u32 *fifo_data;
125 	u32 fifo_len;
126 	int i;
127 
128 	fifo_hdr = (void *)(*dump_data)->data;
129 	fifo_data = (void *)fifo_hdr->data;
130 	fifo_len = size;
131 
132 	/* No need to try to read the data if the length is 0 */
133 	if (fifo_len == 0)
134 		return;
135 
136 	/* Add a TLV for the RXF */
137 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
138 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
139 
140 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
141 	fifo_hdr->available_bytes =
142 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
143 						RXF_RD_D_SPACE + offset));
144 	fifo_hdr->wr_ptr =
145 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
146 						RXF_RD_WR_PTR + offset));
147 	fifo_hdr->rd_ptr =
148 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
149 						RXF_RD_RD_PTR + offset));
150 	fifo_hdr->fence_ptr =
151 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
152 						RXF_RD_FENCE_PTR + offset));
153 	fifo_hdr->fence_mode =
154 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
155 						RXF_SET_FENCE_MODE + offset));
156 
157 	/* Lock fence */
158 	iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
159 	/* Set fence pointer to the same place like WR pointer */
160 	iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
161 	/* Set fence offset */
162 	iwl_trans_write_prph(fwrt->trans,
163 			     RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
164 
165 	/* Read FIFO */
166 	fifo_len /= sizeof(u32); /* Size in DWORDS */
167 	for (i = 0; i < fifo_len; i++)
168 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
169 						 RXF_FIFO_RD_FENCE_INC +
170 						 offset);
171 	*dump_data = iwl_fw_error_next_data(*dump_data);
172 }
173 
174 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
175 			      struct iwl_fw_error_dump_data **dump_data,
176 			      int size, u32 offset, int fifo_num)
177 {
178 	struct iwl_fw_error_dump_fifo *fifo_hdr;
179 	u32 *fifo_data;
180 	u32 fifo_len;
181 	int i;
182 
183 	fifo_hdr = (void *)(*dump_data)->data;
184 	fifo_data = (void *)fifo_hdr->data;
185 	fifo_len = size;
186 
187 	/* No need to try to read the data if the length is 0 */
188 	if (fifo_len == 0)
189 		return;
190 
191 	/* Add a TLV for the FIFO */
192 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
193 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
194 
195 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
196 	fifo_hdr->available_bytes =
197 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
198 						TXF_FIFO_ITEM_CNT + offset));
199 	fifo_hdr->wr_ptr =
200 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
201 						TXF_WR_PTR + offset));
202 	fifo_hdr->rd_ptr =
203 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
204 						TXF_RD_PTR + offset));
205 	fifo_hdr->fence_ptr =
206 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
207 						TXF_FENCE_PTR + offset));
208 	fifo_hdr->fence_mode =
209 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
210 						TXF_LOCK_FENCE + offset));
211 
212 	/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
213 	iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
214 			     TXF_WR_PTR + offset);
215 
216 	/* Dummy-read to advance the read pointer to the head */
217 	iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
218 
219 	/* Read FIFO */
220 	fifo_len /= sizeof(u32); /* Size in DWORDS */
221 	for (i = 0; i < fifo_len; i++)
222 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
223 						  TXF_READ_MODIFY_DATA +
224 						  offset);
225 	*dump_data = iwl_fw_error_next_data(*dump_data);
226 }
227 
228 static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
229 			    struct iwl_fw_error_dump_data **dump_data)
230 {
231 	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
232 	unsigned long flags;
233 
234 	IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
235 
236 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
237 		return;
238 
239 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
240 		/* Pull RXF1 */
241 		iwl_fwrt_dump_rxf(fwrt, dump_data,
242 				  cfg->lmac[0].rxfifo1_size, 0, 0);
243 		/* Pull RXF2 */
244 		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
245 				  RXF_DIFF_FROM_PREV, 1);
246 		/* Pull LMAC2 RXF1 */
247 		if (fwrt->smem_cfg.num_lmacs > 1)
248 			iwl_fwrt_dump_rxf(fwrt, dump_data,
249 					  cfg->lmac[1].rxfifo1_size,
250 					  LMAC2_PRPH_OFFSET, 2);
251 	}
252 
253 	iwl_trans_release_nic_access(fwrt->trans, &flags);
254 }
255 
256 static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
257 			    struct iwl_fw_error_dump_data **dump_data)
258 {
259 	struct iwl_fw_error_dump_fifo *fifo_hdr;
260 	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
261 	u32 *fifo_data;
262 	u32 fifo_len;
263 	unsigned long flags;
264 	int i, j;
265 
266 	IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
267 
268 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
269 		return;
270 
271 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
272 		/* Pull TXF data from LMAC1 */
273 		for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
274 			/* Mark the number of TXF we're pulling now */
275 			iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
276 			iwl_fwrt_dump_txf(fwrt, dump_data,
277 					  cfg->lmac[0].txfifo_size[i], 0, i);
278 		}
279 
280 		/* Pull TXF data from LMAC2 */
281 		if (fwrt->smem_cfg.num_lmacs > 1) {
282 			for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
283 			     i++) {
284 				/* Mark the number of TXF we're pulling now */
285 				iwl_trans_write_prph(fwrt->trans,
286 						     TXF_LARC_NUM +
287 						     LMAC2_PRPH_OFFSET, i);
288 				iwl_fwrt_dump_txf(fwrt, dump_data,
289 						  cfg->lmac[1].txfifo_size[i],
290 						  LMAC2_PRPH_OFFSET,
291 						  i + cfg->num_txfifo_entries);
292 			}
293 		}
294 	}
295 
296 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
297 	    fw_has_capa(&fwrt->fw->ucode_capa,
298 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
299 		/* Pull UMAC internal TXF data from all TXFs */
300 		for (i = 0;
301 		     i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
302 		     i++) {
303 			fifo_hdr = (void *)(*dump_data)->data;
304 			fifo_data = (void *)fifo_hdr->data;
305 			fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
306 
307 			/* No need to try to read the data if the length is 0 */
308 			if (fifo_len == 0)
309 				continue;
310 
311 			/* Add a TLV for the internal FIFOs */
312 			(*dump_data)->type =
313 				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
314 			(*dump_data)->len =
315 				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
316 
317 			fifo_hdr->fifo_num = cpu_to_le32(i);
318 
319 			/* Mark the number of TXF we're pulling now */
320 			iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
321 				fwrt->smem_cfg.num_txfifo_entries);
322 
323 			fifo_hdr->available_bytes =
324 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
325 								TXF_CPU2_FIFO_ITEM_CNT));
326 			fifo_hdr->wr_ptr =
327 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
328 								TXF_CPU2_WR_PTR));
329 			fifo_hdr->rd_ptr =
330 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
331 								TXF_CPU2_RD_PTR));
332 			fifo_hdr->fence_ptr =
333 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
334 								TXF_CPU2_FENCE_PTR));
335 			fifo_hdr->fence_mode =
336 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
337 								TXF_CPU2_LOCK_FENCE));
338 
339 			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
340 			iwl_trans_write_prph(fwrt->trans,
341 					     TXF_CPU2_READ_MODIFY_ADDR,
342 					     TXF_CPU2_WR_PTR);
343 
344 			/* Dummy-read to advance the read pointer to head */
345 			iwl_trans_read_prph(fwrt->trans,
346 					    TXF_CPU2_READ_MODIFY_DATA);
347 
348 			/* Read FIFO */
349 			fifo_len /= sizeof(u32); /* Size in DWORDS */
350 			for (j = 0; j < fifo_len; j++)
351 				fifo_data[j] =
352 					iwl_trans_read_prph(fwrt->trans,
353 							    TXF_CPU2_READ_MODIFY_DATA);
354 			*dump_data = iwl_fw_error_next_data(*dump_data);
355 		}
356 	}
357 
358 	iwl_trans_release_nic_access(fwrt->trans, &flags);
359 }
360 
361 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
362 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
363 
364 struct iwl_prph_range {
365 	u32 start, end;
366 };
367 
368 static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
369 	{ .start = 0x00a00000, .end = 0x00a00000 },
370 	{ .start = 0x00a0000c, .end = 0x00a00024 },
371 	{ .start = 0x00a0002c, .end = 0x00a0003c },
372 	{ .start = 0x00a00410, .end = 0x00a00418 },
373 	{ .start = 0x00a00420, .end = 0x00a00420 },
374 	{ .start = 0x00a00428, .end = 0x00a00428 },
375 	{ .start = 0x00a00430, .end = 0x00a0043c },
376 	{ .start = 0x00a00444, .end = 0x00a00444 },
377 	{ .start = 0x00a004c0, .end = 0x00a004cc },
378 	{ .start = 0x00a004d8, .end = 0x00a004d8 },
379 	{ .start = 0x00a004e0, .end = 0x00a004f0 },
380 	{ .start = 0x00a00840, .end = 0x00a00840 },
381 	{ .start = 0x00a00850, .end = 0x00a00858 },
382 	{ .start = 0x00a01004, .end = 0x00a01008 },
383 	{ .start = 0x00a01010, .end = 0x00a01010 },
384 	{ .start = 0x00a01018, .end = 0x00a01018 },
385 	{ .start = 0x00a01024, .end = 0x00a01024 },
386 	{ .start = 0x00a0102c, .end = 0x00a01034 },
387 	{ .start = 0x00a0103c, .end = 0x00a01040 },
388 	{ .start = 0x00a01048, .end = 0x00a01094 },
389 	{ .start = 0x00a01c00, .end = 0x00a01c20 },
390 	{ .start = 0x00a01c58, .end = 0x00a01c58 },
391 	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
392 	{ .start = 0x00a01c28, .end = 0x00a01c54 },
393 	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
394 	{ .start = 0x00a01c60, .end = 0x00a01cdc },
395 	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
396 	{ .start = 0x00a01d18, .end = 0x00a01d20 },
397 	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
398 	{ .start = 0x00a01d40, .end = 0x00a01d5c },
399 	{ .start = 0x00a01d80, .end = 0x00a01d80 },
400 	{ .start = 0x00a01d98, .end = 0x00a01d9c },
401 	{ .start = 0x00a01da8, .end = 0x00a01da8 },
402 	{ .start = 0x00a01db8, .end = 0x00a01df4 },
403 	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
404 	{ .start = 0x00a01e00, .end = 0x00a01e2c },
405 	{ .start = 0x00a01e40, .end = 0x00a01e60 },
406 	{ .start = 0x00a01e68, .end = 0x00a01e6c },
407 	{ .start = 0x00a01e74, .end = 0x00a01e74 },
408 	{ .start = 0x00a01e84, .end = 0x00a01e90 },
409 	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
410 	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
411 	{ .start = 0x00a01f00, .end = 0x00a01f1c },
412 	{ .start = 0x00a01f44, .end = 0x00a01ffc },
413 	{ .start = 0x00a02000, .end = 0x00a02048 },
414 	{ .start = 0x00a02068, .end = 0x00a020f0 },
415 	{ .start = 0x00a02100, .end = 0x00a02118 },
416 	{ .start = 0x00a02140, .end = 0x00a0214c },
417 	{ .start = 0x00a02168, .end = 0x00a0218c },
418 	{ .start = 0x00a021c0, .end = 0x00a021c0 },
419 	{ .start = 0x00a02400, .end = 0x00a02410 },
420 	{ .start = 0x00a02418, .end = 0x00a02420 },
421 	{ .start = 0x00a02428, .end = 0x00a0242c },
422 	{ .start = 0x00a02434, .end = 0x00a02434 },
423 	{ .start = 0x00a02440, .end = 0x00a02460 },
424 	{ .start = 0x00a02468, .end = 0x00a024b0 },
425 	{ .start = 0x00a024c8, .end = 0x00a024cc },
426 	{ .start = 0x00a02500, .end = 0x00a02504 },
427 	{ .start = 0x00a0250c, .end = 0x00a02510 },
428 	{ .start = 0x00a02540, .end = 0x00a02554 },
429 	{ .start = 0x00a02580, .end = 0x00a025f4 },
430 	{ .start = 0x00a02600, .end = 0x00a0260c },
431 	{ .start = 0x00a02648, .end = 0x00a02650 },
432 	{ .start = 0x00a02680, .end = 0x00a02680 },
433 	{ .start = 0x00a026c0, .end = 0x00a026d0 },
434 	{ .start = 0x00a02700, .end = 0x00a0270c },
435 	{ .start = 0x00a02804, .end = 0x00a02804 },
436 	{ .start = 0x00a02818, .end = 0x00a0281c },
437 	{ .start = 0x00a02c00, .end = 0x00a02db4 },
438 	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
439 	{ .start = 0x00a03000, .end = 0x00a03014 },
440 	{ .start = 0x00a0301c, .end = 0x00a0302c },
441 	{ .start = 0x00a03034, .end = 0x00a03038 },
442 	{ .start = 0x00a03040, .end = 0x00a03048 },
443 	{ .start = 0x00a03060, .end = 0x00a03068 },
444 	{ .start = 0x00a03070, .end = 0x00a03074 },
445 	{ .start = 0x00a0307c, .end = 0x00a0307c },
446 	{ .start = 0x00a03080, .end = 0x00a03084 },
447 	{ .start = 0x00a0308c, .end = 0x00a03090 },
448 	{ .start = 0x00a03098, .end = 0x00a03098 },
449 	{ .start = 0x00a030a0, .end = 0x00a030a0 },
450 	{ .start = 0x00a030a8, .end = 0x00a030b4 },
451 	{ .start = 0x00a030bc, .end = 0x00a030bc },
452 	{ .start = 0x00a030c0, .end = 0x00a0312c },
453 	{ .start = 0x00a03c00, .end = 0x00a03c5c },
454 	{ .start = 0x00a04400, .end = 0x00a04454 },
455 	{ .start = 0x00a04460, .end = 0x00a04474 },
456 	{ .start = 0x00a044c0, .end = 0x00a044ec },
457 	{ .start = 0x00a04500, .end = 0x00a04504 },
458 	{ .start = 0x00a04510, .end = 0x00a04538 },
459 	{ .start = 0x00a04540, .end = 0x00a04548 },
460 	{ .start = 0x00a04560, .end = 0x00a0457c },
461 	{ .start = 0x00a04590, .end = 0x00a04598 },
462 	{ .start = 0x00a045c0, .end = 0x00a045f4 },
463 };
464 
465 static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
466 	{ .start = 0x00a05c00, .end = 0x00a05c18 },
467 	{ .start = 0x00a05400, .end = 0x00a056e8 },
468 	{ .start = 0x00a08000, .end = 0x00a098bc },
469 	{ .start = 0x00a02400, .end = 0x00a02758 },
470 };
471 
472 static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
473 	{ .start = 0x00a00000, .end = 0x00a00000 },
474 	{ .start = 0x00a0000c, .end = 0x00a00024 },
475 	{ .start = 0x00a0002c, .end = 0x00a00034 },
476 	{ .start = 0x00a0003c, .end = 0x00a0003c },
477 	{ .start = 0x00a00410, .end = 0x00a00418 },
478 	{ .start = 0x00a00420, .end = 0x00a00420 },
479 	{ .start = 0x00a00428, .end = 0x00a00428 },
480 	{ .start = 0x00a00430, .end = 0x00a0043c },
481 	{ .start = 0x00a00444, .end = 0x00a00444 },
482 	{ .start = 0x00a00840, .end = 0x00a00840 },
483 	{ .start = 0x00a00850, .end = 0x00a00858 },
484 	{ .start = 0x00a01004, .end = 0x00a01008 },
485 	{ .start = 0x00a01010, .end = 0x00a01010 },
486 	{ .start = 0x00a01018, .end = 0x00a01018 },
487 	{ .start = 0x00a01024, .end = 0x00a01024 },
488 	{ .start = 0x00a0102c, .end = 0x00a01034 },
489 	{ .start = 0x00a0103c, .end = 0x00a01040 },
490 	{ .start = 0x00a01048, .end = 0x00a01050 },
491 	{ .start = 0x00a01058, .end = 0x00a01058 },
492 	{ .start = 0x00a01060, .end = 0x00a01070 },
493 	{ .start = 0x00a0108c, .end = 0x00a0108c },
494 	{ .start = 0x00a01c20, .end = 0x00a01c28 },
495 	{ .start = 0x00a01d10, .end = 0x00a01d10 },
496 	{ .start = 0x00a01e28, .end = 0x00a01e2c },
497 	{ .start = 0x00a01e60, .end = 0x00a01e60 },
498 	{ .start = 0x00a01e80, .end = 0x00a01e80 },
499 	{ .start = 0x00a01ea0, .end = 0x00a01ea0 },
500 	{ .start = 0x00a02000, .end = 0x00a0201c },
501 	{ .start = 0x00a02024, .end = 0x00a02024 },
502 	{ .start = 0x00a02040, .end = 0x00a02048 },
503 	{ .start = 0x00a020c0, .end = 0x00a020e0 },
504 	{ .start = 0x00a02400, .end = 0x00a02404 },
505 	{ .start = 0x00a0240c, .end = 0x00a02414 },
506 	{ .start = 0x00a0241c, .end = 0x00a0243c },
507 	{ .start = 0x00a02448, .end = 0x00a024bc },
508 	{ .start = 0x00a024c4, .end = 0x00a024cc },
509 	{ .start = 0x00a02508, .end = 0x00a02508 },
510 	{ .start = 0x00a02510, .end = 0x00a02514 },
511 	{ .start = 0x00a0251c, .end = 0x00a0251c },
512 	{ .start = 0x00a0252c, .end = 0x00a0255c },
513 	{ .start = 0x00a02564, .end = 0x00a025a0 },
514 	{ .start = 0x00a025a8, .end = 0x00a025b4 },
515 	{ .start = 0x00a025c0, .end = 0x00a025c0 },
516 	{ .start = 0x00a025e8, .end = 0x00a025f4 },
517 	{ .start = 0x00a02c08, .end = 0x00a02c18 },
518 	{ .start = 0x00a02c2c, .end = 0x00a02c38 },
519 	{ .start = 0x00a02c68, .end = 0x00a02c78 },
520 	{ .start = 0x00a03000, .end = 0x00a03000 },
521 	{ .start = 0x00a03010, .end = 0x00a03014 },
522 	{ .start = 0x00a0301c, .end = 0x00a0302c },
523 	{ .start = 0x00a03034, .end = 0x00a03038 },
524 	{ .start = 0x00a03040, .end = 0x00a03044 },
525 	{ .start = 0x00a03060, .end = 0x00a03068 },
526 	{ .start = 0x00a03070, .end = 0x00a03070 },
527 	{ .start = 0x00a0307c, .end = 0x00a03084 },
528 	{ .start = 0x00a0308c, .end = 0x00a03090 },
529 	{ .start = 0x00a03098, .end = 0x00a03098 },
530 	{ .start = 0x00a030a0, .end = 0x00a030a0 },
531 	{ .start = 0x00a030a8, .end = 0x00a030b4 },
532 	{ .start = 0x00a030bc, .end = 0x00a030c0 },
533 	{ .start = 0x00a030c8, .end = 0x00a030f4 },
534 	{ .start = 0x00a03100, .end = 0x00a0312c },
535 	{ .start = 0x00a03c00, .end = 0x00a03c5c },
536 	{ .start = 0x00a04400, .end = 0x00a04454 },
537 	{ .start = 0x00a04460, .end = 0x00a04474 },
538 	{ .start = 0x00a044c0, .end = 0x00a044ec },
539 	{ .start = 0x00a04500, .end = 0x00a04504 },
540 	{ .start = 0x00a04510, .end = 0x00a04538 },
541 	{ .start = 0x00a04540, .end = 0x00a04548 },
542 	{ .start = 0x00a04560, .end = 0x00a04560 },
543 	{ .start = 0x00a04570, .end = 0x00a0457c },
544 	{ .start = 0x00a04590, .end = 0x00a04590 },
545 	{ .start = 0x00a04598, .end = 0x00a04598 },
546 	{ .start = 0x00a045c0, .end = 0x00a045f4 },
547 	{ .start = 0x00a0c000, .end = 0x00a0c018 },
548 	{ .start = 0x00a0c020, .end = 0x00a0c028 },
549 	{ .start = 0x00a0c038, .end = 0x00a0c094 },
550 	{ .start = 0x00a0c0c0, .end = 0x00a0c104 },
551 	{ .start = 0x00a0c10c, .end = 0x00a0c118 },
552 	{ .start = 0x00a0c150, .end = 0x00a0c174 },
553 	{ .start = 0x00a0c17c, .end = 0x00a0c188 },
554 	{ .start = 0x00a0c190, .end = 0x00a0c198 },
555 	{ .start = 0x00a0c1a0, .end = 0x00a0c1a8 },
556 	{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
557 };
558 
559 static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
560 				u32 len_bytes, __le32 *data)
561 {
562 	u32 i;
563 
564 	for (i = 0; i < len_bytes; i += 4)
565 		*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
566 }
567 
568 static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
569 			  const struct iwl_prph_range *iwl_prph_dump_addr,
570 			  u32 range_len, void *ptr)
571 {
572 	struct iwl_fw_error_dump_prph *prph;
573 	struct iwl_trans *trans = fwrt->trans;
574 	struct iwl_fw_error_dump_data **data =
575 		(struct iwl_fw_error_dump_data **)ptr;
576 	unsigned long flags;
577 	u32 i;
578 
579 	if (!data)
580 		return;
581 
582 	IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
583 
584 	if (!iwl_trans_grab_nic_access(trans, &flags))
585 		return;
586 
587 	for (i = 0; i < range_len; i++) {
588 		/* The range includes both boundaries */
589 		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
590 			 iwl_prph_dump_addr[i].start + 4;
591 
592 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
593 		(*data)->len = cpu_to_le32(sizeof(*prph) +
594 					num_bytes_in_chunk);
595 		prph = (void *)(*data)->data;
596 		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
597 
598 		iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
599 				    /* our range is inclusive, hence + 4 */
600 				    iwl_prph_dump_addr[i].end -
601 				    iwl_prph_dump_addr[i].start + 4,
602 				    (void *)prph->data);
603 
604 		*data = iwl_fw_error_next_data(*data);
605 	}
606 
607 	iwl_trans_release_nic_access(trans, &flags);
608 }
609 
610 /*
611  * alloc_sgtable - allocates scallerlist table in the given size,
612  * fills it with pages and returns it
613  * @size: the size (in bytes) of the table
614 */
615 static struct scatterlist *alloc_sgtable(int size)
616 {
617 	int alloc_size, nents, i;
618 	struct page *new_page;
619 	struct scatterlist *iter;
620 	struct scatterlist *table;
621 
622 	nents = DIV_ROUND_UP(size, PAGE_SIZE);
623 	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
624 	if (!table)
625 		return NULL;
626 	sg_init_table(table, nents);
627 	iter = table;
628 	for_each_sg(table, iter, sg_nents(table), i) {
629 		new_page = alloc_page(GFP_KERNEL);
630 		if (!new_page) {
631 			/* release all previous allocated pages in the table */
632 			iter = table;
633 			for_each_sg(table, iter, sg_nents(table), i) {
634 				new_page = sg_page(iter);
635 				if (new_page)
636 					__free_page(new_page);
637 			}
638 			return NULL;
639 		}
640 		alloc_size = min_t(int, size, PAGE_SIZE);
641 		size -= PAGE_SIZE;
642 		sg_set_page(iter, new_page, alloc_size, 0);
643 	}
644 	return table;
645 }
646 
647 static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
648 				const struct iwl_prph_range *iwl_prph_dump_addr,
649 				u32 range_len, void *ptr)
650 {
651 	u32 *prph_len = (u32 *)ptr;
652 	int i, num_bytes_in_chunk;
653 
654 	if (!prph_len)
655 		return;
656 
657 	for (i = 0; i < range_len; i++) {
658 		/* The range includes both boundaries */
659 		num_bytes_in_chunk =
660 			iwl_prph_dump_addr[i].end -
661 			iwl_prph_dump_addr[i].start + 4;
662 
663 		*prph_len += sizeof(struct iwl_fw_error_dump_data) +
664 			sizeof(struct iwl_fw_error_dump_prph) +
665 			num_bytes_in_chunk;
666 	}
667 }
668 
669 static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
670 				void (*handler)(struct iwl_fw_runtime *,
671 						const struct iwl_prph_range *,
672 						u32, void *))
673 {
674 	u32 range_len;
675 
676 	if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
677 		range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
678 		handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
679 	} else {
680 		range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
681 		handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
682 
683 		if (fwrt->trans->cfg->mq_rx_supported) {
684 			range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
685 			handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
686 		}
687 	}
688 }
689 
690 static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
691 			    struct iwl_fw_error_dump_data **dump_data,
692 			    u32 len, u32 ofs, u32 type)
693 {
694 	struct iwl_fw_error_dump_mem *dump_mem;
695 
696 	if (!len)
697 		return;
698 
699 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
700 	(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
701 	dump_mem = (void *)(*dump_data)->data;
702 	dump_mem->type = cpu_to_le32(type);
703 	dump_mem->offset = cpu_to_le32(ofs);
704 	iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
705 	*dump_data = iwl_fw_error_next_data(*dump_data);
706 
707 	IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
708 }
709 
710 static void iwl_fw_dump_named_mem(struct iwl_fw_runtime *fwrt,
711 				  struct iwl_fw_error_dump_data **dump_data,
712 				  u32 len, u32 ofs, u8 *name, u8 name_len)
713 {
714 	struct iwl_fw_error_dump_named_mem *dump_mem;
715 
716 	if (!len)
717 		return;
718 
719 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
720 	(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
721 	dump_mem = (void *)(*dump_data)->data;
722 	dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM);
723 	dump_mem->offset = cpu_to_le32(ofs);
724 	dump_mem->name_len = name_len;
725 	memcpy(dump_mem->name, name, name_len);
726 	iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
727 	*dump_data = iwl_fw_error_next_data(*dump_data);
728 
729 	IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
730 }
731 
732 #define ADD_LEN(len, item_len, const_len) \
733 	do {size_t item = item_len; len += (!!item) * const_len + item; } \
734 	while (0)
735 
736 static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
737 			  struct iwl_fwrt_shared_mem_cfg *mem_cfg)
738 {
739 	size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
740 			 sizeof(struct iwl_fw_error_dump_fifo);
741 	u32 fifo_len = 0;
742 	int i;
743 
744 	if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
745 		return 0;
746 
747 	/* Count RXF2 size */
748 	ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
749 
750 	/* Count RXF1 sizes */
751 	if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
752 		mem_cfg->num_lmacs = MAX_NUM_LMAC;
753 
754 	for (i = 0; i < mem_cfg->num_lmacs; i++)
755 		ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
756 
757 	return fifo_len;
758 }
759 
760 static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
761 			  struct iwl_fwrt_shared_mem_cfg *mem_cfg)
762 {
763 	size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
764 			 sizeof(struct iwl_fw_error_dump_fifo);
765 	u32 fifo_len = 0;
766 	int i;
767 
768 	if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
769 		goto dump_internal_txf;
770 
771 	/* Count TXF sizes */
772 	if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
773 		mem_cfg->num_lmacs = MAX_NUM_LMAC;
774 
775 	for (i = 0; i < mem_cfg->num_lmacs; i++) {
776 		int j;
777 
778 		for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
779 			ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
780 				hdr_len);
781 	}
782 
783 dump_internal_txf:
784 	if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
785 	      fw_has_capa(&fwrt->fw->ucode_capa,
786 			  IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
787 		goto out;
788 
789 	for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
790 		ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
791 
792 out:
793 	return fifo_len;
794 }
795 
796 static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
797 			    struct iwl_fw_error_dump_data **data)
798 {
799 	int i;
800 
801 	IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
802 	for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
803 		struct iwl_fw_error_dump_paging *paging;
804 		struct page *pages =
805 			fwrt->fw_paging_db[i].fw_paging_block;
806 		dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
807 
808 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
809 		(*data)->len = cpu_to_le32(sizeof(*paging) +
810 					     PAGING_BLOCK_SIZE);
811 		paging =  (void *)(*data)->data;
812 		paging->index = cpu_to_le32(i);
813 		dma_sync_single_for_cpu(fwrt->trans->dev, addr,
814 					PAGING_BLOCK_SIZE,
815 					DMA_BIDIRECTIONAL);
816 		memcpy(paging->data, page_address(pages),
817 		       PAGING_BLOCK_SIZE);
818 		(*data) = iwl_fw_error_next_data(*data);
819 	}
820 }
821 
822 static struct iwl_fw_error_dump_file *
823 _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
824 		   struct iwl_fw_dump_ptrs *fw_error_dump)
825 {
826 	struct iwl_fw_error_dump_file *dump_file;
827 	struct iwl_fw_error_dump_data *dump_data;
828 	struct iwl_fw_error_dump_info *dump_info;
829 	struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
830 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
831 	u32 sram_len, sram_ofs;
832 	const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
833 	struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
834 	u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
835 	u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
836 	u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
837 				0 : fwrt->trans->cfg->dccm2_len;
838 	int i;
839 
840 	/* SRAM - include stack CCM if driver knows the values for it */
841 	if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
842 		const struct fw_img *img;
843 
844 		if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
845 			return NULL;
846 		img = &fwrt->fw->img[fwrt->cur_fw_img];
847 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
848 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
849 	} else {
850 		sram_ofs = fwrt->trans->cfg->dccm_offset;
851 		sram_len = fwrt->trans->cfg->dccm_len;
852 	}
853 
854 	/* reading RXF/TXF sizes */
855 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
856 		fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
857 		fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
858 
859 		/* Make room for PRPH registers */
860 		if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
861 			iwl_fw_prph_handler(fwrt, &prph_len,
862 					    iwl_fw_get_prph_len);
863 
864 		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
865 		    iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
866 			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
867 	}
868 
869 	file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
870 
871 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
872 		file_len += sizeof(*dump_data) + sizeof(*dump_info);
873 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
874 		file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
875 
876 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
877 		size_t hdr_len = sizeof(*dump_data) +
878 				 sizeof(struct iwl_fw_error_dump_mem);
879 
880 		/* Dump SRAM only if no mem_tlvs */
881 		if (!fwrt->fw->dbg.n_mem_tlv)
882 			ADD_LEN(file_len, sram_len, hdr_len);
883 
884 		/* Make room for all mem types that exist */
885 		ADD_LEN(file_len, smem_len, hdr_len);
886 		ADD_LEN(file_len, sram2_len, hdr_len);
887 
888 		for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
889 			ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
890 	}
891 
892 	/* Make room for fw's virtual image pages, if it exists */
893 	if (iwl_fw_dbg_is_paging_enabled(fwrt))
894 		file_len += fwrt->num_of_paging_blk *
895 			(sizeof(*dump_data) +
896 			 sizeof(struct iwl_fw_error_dump_paging) +
897 			 PAGING_BLOCK_SIZE);
898 
899 	if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
900 		file_len += sizeof(*dump_data) +
901 			fwrt->trans->cfg->d3_debug_data_length * 2;
902 	}
903 
904 	/* If we only want a monitor dump, reset the file length */
905 	if (fwrt->dump.monitor_only) {
906 		file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
907 			   sizeof(*dump_info) + sizeof(*dump_smem_cfg);
908 	}
909 
910 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
911 	    fwrt->dump.desc)
912 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
913 			    fwrt->dump.desc->len;
914 
915 	dump_file = vzalloc(file_len);
916 	if (!dump_file)
917 		return NULL;
918 
919 	fw_error_dump->fwrt_ptr = dump_file;
920 
921 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
922 	dump_data = (void *)dump_file->data;
923 
924 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
925 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
926 		dump_data->len = cpu_to_le32(sizeof(*dump_info));
927 		dump_info = (void *)dump_data->data;
928 		dump_info->device_family =
929 			fwrt->trans->cfg->device_family ==
930 			IWL_DEVICE_FAMILY_7000 ?
931 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
932 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
933 		dump_info->hw_step =
934 			cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
935 		memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
936 		       sizeof(dump_info->fw_human_readable));
937 		strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
938 			sizeof(dump_info->dev_human_readable) - 1);
939 		strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
940 			sizeof(dump_info->bus_human_readable) - 1);
941 		dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
942 		dump_info->lmac_err_id[0] =
943 			cpu_to_le32(fwrt->dump.lmac_err_id[0]);
944 		if (fwrt->smem_cfg.num_lmacs > 1)
945 			dump_info->lmac_err_id[1] =
946 				cpu_to_le32(fwrt->dump.lmac_err_id[1]);
947 		dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
948 
949 		dump_data = iwl_fw_error_next_data(dump_data);
950 	}
951 
952 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
953 		/* Dump shared memory configuration */
954 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
955 		dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
956 		dump_smem_cfg = (void *)dump_data->data;
957 		dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
958 		dump_smem_cfg->num_txfifo_entries =
959 			cpu_to_le32(mem_cfg->num_txfifo_entries);
960 		for (i = 0; i < MAX_NUM_LMAC; i++) {
961 			int j;
962 			u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
963 
964 			for (j = 0; j < TX_FIFO_MAX_NUM; j++)
965 				dump_smem_cfg->lmac[i].txfifo_size[j] =
966 					cpu_to_le32(txf_size[j]);
967 			dump_smem_cfg->lmac[i].rxfifo1_size =
968 				cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
969 		}
970 		dump_smem_cfg->rxfifo2_size =
971 			cpu_to_le32(mem_cfg->rxfifo2_size);
972 		dump_smem_cfg->internal_txfifo_addr =
973 			cpu_to_le32(mem_cfg->internal_txfifo_addr);
974 		for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
975 			dump_smem_cfg->internal_txfifo_size[i] =
976 				cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
977 		}
978 
979 		dump_data = iwl_fw_error_next_data(dump_data);
980 	}
981 
982 	/* We only dump the FIFOs if the FW is in error state */
983 	if (fifo_len) {
984 		iwl_fw_dump_rxf(fwrt, &dump_data);
985 		iwl_fw_dump_txf(fwrt, &dump_data);
986 		if (radio_len)
987 			iwl_read_radio_regs(fwrt, &dump_data);
988 	}
989 
990 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
991 	    fwrt->dump.desc) {
992 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
993 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
994 					     fwrt->dump.desc->len);
995 		dump_trig = (void *)dump_data->data;
996 		memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
997 		       sizeof(*dump_trig) + fwrt->dump.desc->len);
998 
999 		dump_data = iwl_fw_error_next_data(dump_data);
1000 	}
1001 
1002 	/* In case we only want monitor dump, skip to dump trasport data */
1003 	if (fwrt->dump.monitor_only)
1004 		goto out;
1005 
1006 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
1007 		const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
1008 			fwrt->fw->dbg.mem_tlv;
1009 
1010 		if (!fwrt->fw->dbg.n_mem_tlv)
1011 			iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
1012 					IWL_FW_ERROR_DUMP_MEM_SRAM);
1013 
1014 		for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
1015 			u32 len = le32_to_cpu(fw_dbg_mem[i].len);
1016 			u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
1017 
1018 			iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
1019 					le32_to_cpu(fw_dbg_mem[i].data_type));
1020 		}
1021 
1022 		iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
1023 				fwrt->trans->cfg->smem_offset,
1024 				IWL_FW_ERROR_DUMP_MEM_SMEM);
1025 
1026 		iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
1027 				fwrt->trans->cfg->dccm2_offset,
1028 				IWL_FW_ERROR_DUMP_MEM_SRAM);
1029 	}
1030 
1031 	if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
1032 		u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
1033 		size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
1034 
1035 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
1036 		dump_data->len = cpu_to_le32(data_size * 2);
1037 
1038 		memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
1039 
1040 		kfree(fwrt->dump.d3_debug_data);
1041 		fwrt->dump.d3_debug_data = NULL;
1042 
1043 		iwl_trans_read_mem_bytes(fwrt->trans, addr,
1044 					 dump_data->data + data_size,
1045 					 data_size);
1046 
1047 		dump_data = iwl_fw_error_next_data(dump_data);
1048 	}
1049 
1050 	/* Dump fw's virtual image */
1051 	if (iwl_fw_dbg_is_paging_enabled(fwrt))
1052 		iwl_dump_paging(fwrt, &dump_data);
1053 
1054 	if (prph_len)
1055 		iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
1056 
1057 out:
1058 	dump_file->file_len = cpu_to_le32(file_len);
1059 	return dump_file;
1060 }
1061 
1062 static void iwl_dump_prph_ini(struct iwl_trans *trans,
1063 			      struct iwl_fw_error_dump_data **data,
1064 			      struct iwl_fw_ini_region_cfg *reg)
1065 {
1066 	struct iwl_fw_error_dump_prph *prph;
1067 	unsigned long flags;
1068 	u32 i, size = le32_to_cpu(reg->num_regions);
1069 
1070 	IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
1071 
1072 	if (!iwl_trans_grab_nic_access(trans, &flags))
1073 		return;
1074 
1075 	for (i = 0; i < size; i++) {
1076 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
1077 		(*data)->len = cpu_to_le32(le32_to_cpu(reg->size) +
1078 					   sizeof(*prph));
1079 		prph = (void *)(*data)->data;
1080 		prph->prph_start = reg->start_addr[i];
1081 		prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans,
1082 								  le32_to_cpu(prph->prph_start)));
1083 		*data = iwl_fw_error_next_data(*data);
1084 	}
1085 	iwl_trans_release_nic_access(trans, &flags);
1086 }
1087 
1088 static void iwl_dump_csr_ini(struct iwl_trans *trans,
1089 			     struct iwl_fw_error_dump_data **data,
1090 			     struct iwl_fw_ini_region_cfg *reg)
1091 {
1092 	int i, num = le32_to_cpu(reg->num_regions);
1093 	u32 size = le32_to_cpu(reg->size);
1094 
1095 	IWL_DEBUG_INFO(trans, "WRT CSR dump\n");
1096 
1097 	for (i = 0; i < num; i++) {
1098 		u32 add = le32_to_cpu(reg->start_addr[i]);
1099 		__le32 *val;
1100 		int j;
1101 
1102 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
1103 		(*data)->len = cpu_to_le32(size);
1104 		val = (void *)(*data)->data;
1105 
1106 		for (j = 0; j < size; j += 4)
1107 			*val++ = cpu_to_le32(iwl_trans_read32(trans, j + add));
1108 
1109 		*data = iwl_fw_error_next_data(*data);
1110 	}
1111 }
1112 
1113 static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
1114 				      struct iwl_fw_ini_trigger *trigger)
1115 {
1116 	int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
1117 
1118 	if (!trigger || !trigger->num_regions)
1119 		return 0;
1120 
1121 	num = le32_to_cpu(trigger->num_regions);
1122 	for (i = 0; i < num; i++) {
1123 		u32 reg_id = le32_to_cpu(trigger->data[i]);
1124 		struct iwl_fw_ini_region_cfg *reg;
1125 		enum iwl_fw_ini_region_type type;
1126 		u32 num_entries;
1127 
1128 		if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
1129 			continue;
1130 
1131 		reg = fwrt->dump.active_regs[reg_id].reg;
1132 		if (WARN(!reg, "Unassigned region %d\n", reg_id))
1133 			continue;
1134 
1135 		type = le32_to_cpu(reg->region_type);
1136 		num_entries = le32_to_cpu(reg->num_regions);
1137 
1138 		switch (type) {
1139 		case IWL_FW_INI_REGION_DEVICE_MEMORY:
1140 			size += hdr_len +
1141 				sizeof(struct iwl_fw_error_dump_named_mem) +
1142 				le32_to_cpu(reg->size);
1143 			break;
1144 		case IWL_FW_INI_REGION_PERIPHERY_MAC:
1145 		case IWL_FW_INI_REGION_PERIPHERY_PHY:
1146 		case IWL_FW_INI_REGION_PERIPHERY_AUX:
1147 			size += num_entries *
1148 				(hdr_len +
1149 				 sizeof(struct iwl_fw_error_dump_prph) +
1150 				 sizeof(u32));
1151 			break;
1152 		case IWL_FW_INI_REGION_TXF:
1153 			size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg);
1154 			break;
1155 		case IWL_FW_INI_REGION_RXF:
1156 			size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg);
1157 			break;
1158 		case IWL_FW_INI_REGION_PAGING:
1159 			if (!iwl_fw_dbg_is_paging_enabled(fwrt))
1160 				break;
1161 			size += fwrt->num_of_paging_blk *
1162 				(hdr_len +
1163 				 sizeof(struct iwl_fw_error_dump_paging) +
1164 				 PAGING_BLOCK_SIZE);
1165 			break;
1166 		case IWL_FW_INI_REGION_CSR:
1167 			size += num_entries *
1168 				(hdr_len + le32_to_cpu(reg->size));
1169 			break;
1170 		case IWL_FW_INI_REGION_DRAM_BUFFER:
1171 			/* Transport takes care of DRAM dumping */
1172 		case IWL_FW_INI_REGION_INTERNAL_BUFFER:
1173 		case IWL_FW_INI_REGION_DRAM_IMR:
1174 			/* Undefined yet */
1175 		default:
1176 			break;
1177 		}
1178 	}
1179 	return size;
1180 }
1181 
1182 static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
1183 				    struct iwl_fw_ini_trigger *trigger,
1184 				    struct iwl_fw_error_dump_data **data,
1185 				    u32 *dump_mask)
1186 {
1187 	int i, num = le32_to_cpu(trigger->num_regions);
1188 
1189 	for (i = 0; i < num; i++) {
1190 		u32 reg_id = le32_to_cpu(trigger->data[i]);
1191 		enum iwl_fw_ini_region_type type;
1192 		struct iwl_fw_ini_region_cfg *reg;
1193 
1194 		if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))
1195 			continue;
1196 
1197 		reg = fwrt->dump.active_regs[reg_id].reg;
1198 		/* Don't warn, get_trigger_len already warned */
1199 		if (!reg)
1200 			continue;
1201 
1202 		type = le32_to_cpu(reg->region_type);
1203 		switch (type) {
1204 		case IWL_FW_INI_REGION_DEVICE_MEMORY:
1205 			if (WARN_ON(le32_to_cpu(reg->num_regions) > 1))
1206 				continue;
1207 			iwl_fw_dump_named_mem(fwrt, data,
1208 					      le32_to_cpu(reg->size),
1209 					      le32_to_cpu(reg->start_addr[0]),
1210 					      reg->name,
1211 					      le32_to_cpu(reg->name_len));
1212 			break;
1213 		case IWL_FW_INI_REGION_PERIPHERY_MAC:
1214 		case IWL_FW_INI_REGION_PERIPHERY_PHY:
1215 		case IWL_FW_INI_REGION_PERIPHERY_AUX:
1216 			iwl_dump_prph_ini(fwrt->trans, data, reg);
1217 			break;
1218 		case IWL_FW_INI_REGION_DRAM_BUFFER:
1219 			*dump_mask |= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
1220 			break;
1221 		case IWL_FW_INI_REGION_PAGING:
1222 			if (iwl_fw_dbg_is_paging_enabled(fwrt))
1223 				iwl_dump_paging(fwrt, data);
1224 			else
1225 				*dump_mask |= BIT(IWL_FW_ERROR_DUMP_PAGING);
1226 			break;
1227 		case IWL_FW_INI_REGION_TXF:
1228 			iwl_fw_dump_txf(fwrt, data);
1229 			break;
1230 		case IWL_FW_INI_REGION_RXF:
1231 			iwl_fw_dump_rxf(fwrt, data);
1232 			break;
1233 		case IWL_FW_INI_REGION_CSR:
1234 			iwl_dump_csr_ini(fwrt->trans, data, reg);
1235 			break;
1236 		case IWL_FW_INI_REGION_DRAM_IMR:
1237 		case IWL_FW_INI_REGION_INTERNAL_BUFFER:
1238 			/* This is undefined yet */
1239 		default:
1240 			break;
1241 		}
1242 	}
1243 }
1244 
1245 static struct iwl_fw_error_dump_file *
1246 _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
1247 		       struct iwl_fw_dump_ptrs *fw_error_dump,
1248 		       u32 *dump_mask)
1249 {
1250 	int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
1251 	struct iwl_fw_error_dump_data *dump_data;
1252 	struct iwl_fw_error_dump_file *dump_file;
1253 	struct iwl_fw_ini_trigger *trigger, *ext;
1254 
1255 	if (id == FW_DBG_TRIGGER_FW_ASSERT)
1256 		id = IWL_FW_TRIGGER_ID_FW_ASSERT;
1257 
1258 	if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
1259 		return NULL;
1260 
1261 	trigger = fwrt->dump.active_trigs[id].conf;
1262 	ext = fwrt->dump.active_trigs[id].conf_ext;
1263 
1264 	size = sizeof(*dump_file);
1265 	size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
1266 	size += iwl_fw_ini_get_trigger_len(fwrt, ext);
1267 
1268 	if (!size)
1269 		return NULL;
1270 
1271 	dump_file = vzalloc(size);
1272 	if (!dump_file)
1273 		return NULL;
1274 
1275 	fw_error_dump->fwrt_ptr = dump_file;
1276 
1277 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1278 	dump_data = (void *)dump_file->data;
1279 	dump_file->file_len = cpu_to_le32(size);
1280 
1281 	*dump_mask = 0;
1282 	if (trigger)
1283 		iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask);
1284 	if (ext)
1285 		iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask);
1286 
1287 	return dump_file;
1288 }
1289 
1290 void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
1291 {
1292 	struct iwl_fw_dump_ptrs *fw_error_dump;
1293 	struct iwl_fw_error_dump_file *dump_file;
1294 	struct scatterlist *sg_dump_data;
1295 	u32 file_len;
1296 	u32 dump_mask = fwrt->fw->dbg.dump_mask;
1297 
1298 	IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
1299 
1300 	/* there's no point in fw dump if the bus is dead */
1301 	if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
1302 		IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
1303 		goto out;
1304 	}
1305 
1306 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1307 	if (!fw_error_dump)
1308 		goto out;
1309 
1310 	if (fwrt->trans->ini_valid)
1311 		dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump,
1312 						   &dump_mask);
1313 	else
1314 		dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
1315 
1316 	if (!dump_file) {
1317 		kfree(fw_error_dump);
1318 		goto out;
1319 	}
1320 
1321 	if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
1322 		dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
1323 
1324 	fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
1325 	file_len = le32_to_cpu(dump_file->file_len);
1326 	fw_error_dump->fwrt_len = file_len;
1327 	if (fw_error_dump->trans_ptr) {
1328 		file_len += fw_error_dump->trans_ptr->len;
1329 		dump_file->file_len = cpu_to_le32(file_len);
1330 	}
1331 
1332 	sg_dump_data = alloc_sgtable(file_len);
1333 	if (sg_dump_data) {
1334 		sg_pcopy_from_buffer(sg_dump_data,
1335 				     sg_nents(sg_dump_data),
1336 				     fw_error_dump->fwrt_ptr,
1337 				     fw_error_dump->fwrt_len, 0);
1338 		if (fw_error_dump->trans_ptr)
1339 			sg_pcopy_from_buffer(sg_dump_data,
1340 					     sg_nents(sg_dump_data),
1341 					     fw_error_dump->trans_ptr->data,
1342 					     fw_error_dump->trans_ptr->len,
1343 					     fw_error_dump->fwrt_len);
1344 		dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
1345 			       GFP_KERNEL);
1346 	}
1347 	vfree(fw_error_dump->fwrt_ptr);
1348 	vfree(fw_error_dump->trans_ptr);
1349 	kfree(fw_error_dump);
1350 
1351 out:
1352 	iwl_fw_free_dump_desc(fwrt);
1353 	clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1354 	IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
1355 }
1356 IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
1357 
1358 const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
1359 	.trig_desc = {
1360 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1361 	},
1362 };
1363 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
1364 
1365 void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt)
1366 {
1367 	IWL_INFO(fwrt, "error dump due to fw assert\n");
1368 	fwrt->dump.desc = &iwl_dump_desc_assert;
1369 	iwl_fw_error_dump(fwrt);
1370 }
1371 IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump);
1372 
1373 void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
1374 {
1375 	struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
1376 		kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
1377 
1378 	if (!iwl_dump_desc_no_alive)
1379 		return;
1380 
1381 	iwl_dump_desc_no_alive->trig_desc.type =
1382 		cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
1383 	iwl_dump_desc_no_alive->len = 0;
1384 
1385 	if (WARN_ON(fwrt->dump.desc))
1386 		iwl_fw_free_dump_desc(fwrt);
1387 
1388 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
1389 		 FW_DBG_TRIGGER_NO_ALIVE);
1390 
1391 	fwrt->dump.desc = iwl_dump_desc_no_alive;
1392 	iwl_fw_error_dump(fwrt);
1393 	clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
1394 }
1395 IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
1396 
1397 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
1398 			    const struct iwl_fw_dump_desc *desc,
1399 			    bool monitor_only,
1400 			    unsigned int delay)
1401 {
1402 	/*
1403 	 * If the loading of the FW completed successfully, the next step is to
1404 	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
1405 	 * zero, the FW was already loaded successully. If the state is "NO_FW"
1406 	 * in such a case - exit, since FW may be dead. Otherwise, we
1407 	 * can try to collect the data, since FW might just not be fully
1408 	 * loaded (no "ALIVE" yet), and the debug data is accessible.
1409 	 *
1410 	 * Corner case: got the FW alive but crashed before getting the SMEM
1411 	 *	config. In such a case, due to HW access problems, we might
1412 	 *	collect garbage.
1413 	 */
1414 	if (fwrt->trans->state == IWL_TRANS_NO_FW &&
1415 	    fwrt->smem_cfg.num_lmacs)
1416 		return -EIO;
1417 
1418 	if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
1419 	    test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
1420 		return -EBUSY;
1421 
1422 	if (WARN_ON(fwrt->dump.desc))
1423 		iwl_fw_free_dump_desc(fwrt);
1424 
1425 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
1426 		 le32_to_cpu(desc->trig_desc.type));
1427 
1428 	fwrt->dump.desc = desc;
1429 	fwrt->dump.monitor_only = monitor_only;
1430 
1431 	schedule_delayed_work(&fwrt->dump.wk, delay);
1432 
1433 	return 0;
1434 }
1435 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
1436 
1437 int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
1438 			enum iwl_fw_dbg_trigger trig,
1439 			const char *str, size_t len,
1440 			struct iwl_fw_dbg_trigger_tlv *trigger)
1441 {
1442 	struct iwl_fw_dump_desc *desc;
1443 	unsigned int delay = 0;
1444 	bool monitor_only = false;
1445 
1446 	if (trigger) {
1447 		u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
1448 
1449 		if (!le16_to_cpu(trigger->occurrences))
1450 			return 0;
1451 
1452 		if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
1453 			IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
1454 				 trig);
1455 			iwl_force_nmi(fwrt->trans);
1456 			return 0;
1457 		}
1458 
1459 		trigger->occurrences = cpu_to_le16(occurrences);
1460 		delay = le16_to_cpu(trigger->trig_dis_ms);
1461 		monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
1462 	}
1463 
1464 	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
1465 	if (!desc)
1466 		return -ENOMEM;
1467 
1468 
1469 	desc->len = len;
1470 	desc->trig_desc.type = cpu_to_le32(trig);
1471 	memcpy(desc->trig_desc.data, str, len);
1472 
1473 	return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
1474 }
1475 IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect);
1476 
1477 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
1478 		       u32 id, const char *str, size_t len)
1479 {
1480 	struct iwl_fw_dump_desc *desc;
1481 	u32 occur, delay;
1482 
1483 	if (!fwrt->trans->ini_valid)
1484 		return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL);
1485 
1486 	if (id == FW_DBG_TRIGGER_USER)
1487 		id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
1488 
1489 	if (WARN_ON(!fwrt->dump.active_trigs[id].active))
1490 		return -EINVAL;
1491 
1492 	delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->dump_delay);
1493 	occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
1494 	if (!occur)
1495 		return 0;
1496 
1497 	if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) {
1498 		IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
1499 		iwl_force_nmi(fwrt->trans);
1500 		return 0;
1501 	}
1502 
1503 	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
1504 	if (!desc)
1505 		return -ENOMEM;
1506 
1507 	occur--;
1508 	fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur);
1509 
1510 	desc->len = len;
1511 	desc->trig_desc.type = cpu_to_le32(id);
1512 	memcpy(desc->trig_desc.data, str, len);
1513 
1514 	return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
1515 }
1516 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
1517 
1518 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
1519 			    struct iwl_fw_dbg_trigger_tlv *trigger,
1520 			    const char *fmt, ...)
1521 {
1522 	int ret, len = 0;
1523 	char buf[64];
1524 
1525 	if (fwrt->trans->ini_valid)
1526 		return 0;
1527 
1528 	if (fmt) {
1529 		va_list ap;
1530 
1531 		buf[sizeof(buf) - 1] = '\0';
1532 
1533 		va_start(ap, fmt);
1534 		vsnprintf(buf, sizeof(buf), fmt, ap);
1535 		va_end(ap);
1536 
1537 		/* check for truncation */
1538 		if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
1539 			buf[sizeof(buf) - 1] = '\0';
1540 
1541 		len = strlen(buf) + 1;
1542 	}
1543 
1544 	ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
1545 				  trigger);
1546 
1547 	if (ret)
1548 		return ret;
1549 
1550 	return 0;
1551 }
1552 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
1553 
1554 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
1555 {
1556 	u8 *ptr;
1557 	int ret;
1558 	int i;
1559 
1560 	if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
1561 		      "Invalid configuration %d\n", conf_id))
1562 		return -EINVAL;
1563 
1564 	/* EARLY START - firmware's configuration is hard coded */
1565 	if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
1566 	     !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
1567 	    conf_id == FW_DBG_START_FROM_ALIVE)
1568 		return 0;
1569 
1570 	if (!fwrt->fw->dbg.conf_tlv[conf_id])
1571 		return -EINVAL;
1572 
1573 	if (fwrt->dump.conf != FW_DBG_INVALID)
1574 		IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
1575 			 fwrt->dump.conf);
1576 
1577 	/* Send all HCMDs for configuring the FW debug */
1578 	ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
1579 	for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
1580 		struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
1581 		struct iwl_host_cmd hcmd = {
1582 			.id = cmd->id,
1583 			.len = { le16_to_cpu(cmd->len), },
1584 			.data = { cmd->data, },
1585 		};
1586 
1587 		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
1588 		if (ret)
1589 			return ret;
1590 
1591 		ptr += sizeof(*cmd);
1592 		ptr += le16_to_cpu(cmd->len);
1593 	}
1594 
1595 	fwrt->dump.conf = conf_id;
1596 
1597 	return 0;
1598 }
1599 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
1600 
1601 /* this function assumes dump_start was called beforehand and dump_end will be
1602  * called afterwards
1603  */
1604 void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
1605 {
1606 	struct iwl_fw_dbg_params params = {0};
1607 
1608 	if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
1609 		return;
1610 
1611 	if (fwrt->ops && fwrt->ops->fw_running &&
1612 	    !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1613 		IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1614 		iwl_fw_free_dump_desc(fwrt);
1615 		clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1616 		return;
1617 	}
1618 
1619 	iwl_fw_dbg_stop_recording(fwrt, &params);
1620 
1621 	iwl_fw_error_dump(fwrt);
1622 
1623 	/* start recording again if the firmware is not crashed */
1624 	if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
1625 	    fwrt->fw->dbg.dest_tlv) {
1626 		/* wait before we collect the data till the DBGC stop */
1627 		udelay(500);
1628 		iwl_fw_dbg_restart_recording(fwrt, &params);
1629 	}
1630 }
1631 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
1632 
1633 void iwl_fw_error_dump_wk(struct work_struct *work)
1634 {
1635 	struct iwl_fw_runtime *fwrt =
1636 		container_of(work, struct iwl_fw_runtime, dump.wk.work);
1637 
1638 	if (fwrt->ops && fwrt->ops->dump_start &&
1639 	    fwrt->ops->dump_start(fwrt->ops_ctx))
1640 		return;
1641 
1642 	iwl_fw_dbg_collect_sync(fwrt);
1643 
1644 	if (fwrt->ops && fwrt->ops->dump_end)
1645 		fwrt->ops->dump_end(fwrt->ops_ctx);
1646 }
1647 
1648 void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
1649 {
1650 	const struct iwl_cfg *cfg = fwrt->trans->cfg;
1651 
1652 	if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
1653 		return;
1654 
1655 	if (!fwrt->dump.d3_debug_data) {
1656 		fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
1657 						   GFP_KERNEL);
1658 		if (!fwrt->dump.d3_debug_data) {
1659 			IWL_ERR(fwrt,
1660 				"failed to allocate memory for D3 debug data\n");
1661 			return;
1662 		}
1663 	}
1664 
1665 	/* if the buffer holds previous debug data it is overwritten */
1666 	iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
1667 				 fwrt->dump.d3_debug_data,
1668 				 cfg->d3_debug_data_length);
1669 }
1670 IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
1671 
1672 static void
1673 iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
1674 			     struct iwl_fw_ini_allocation_tlv *alloc)
1675 {
1676 	struct iwl_trans *trans = fwrt->trans;
1677 	struct iwl_ldbg_config_cmd ldbg_cmd = {
1678 		.type = cpu_to_le32(BUFFER_ALLOCATION),
1679 	};
1680 	struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation;
1681 	struct iwl_host_cmd hcmd = {
1682 		.id = LDBG_CONFIG_CMD,
1683 		.flags = CMD_ASYNC,
1684 		.data[0] = &ldbg_cmd,
1685 		.len[0] = sizeof(ldbg_cmd),
1686 	};
1687 	void *virtual_addr = NULL;
1688 	u32 size = le32_to_cpu(alloc->size);
1689 	dma_addr_t phys_addr;
1690 
1691 	if (!trans->num_blocks &&
1692 	    le32_to_cpu(alloc->buffer_location) !=
1693 	    IWL_FW_INI_LOCATION_DRAM_PATH)
1694 		return;
1695 
1696 	virtual_addr =
1697 		dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr,
1698 				   GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO |
1699 				   __GFP_COMP);
1700 
1701 	/* TODO: alloc fragments if needed */
1702 	if (!virtual_addr)
1703 		IWL_ERR(fwrt, "Failed to allocate debug memory\n");
1704 
1705 	if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
1706 		return;
1707 
1708 	trans->fw_mon[trans->num_blocks].block = virtual_addr;
1709 	trans->fw_mon[trans->num_blocks].physical = phys_addr;
1710 	trans->fw_mon[trans->num_blocks].size = size;
1711 	trans->num_blocks++;
1712 
1713 	IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
1714 
1715 	/* First block is assigned via registers / context info */
1716 	if (trans->num_blocks == 1)
1717 		return;
1718 
1719 	cmd->num_frags = cpu_to_le32(1);
1720 	cmd->fragments[0].address = cpu_to_le64(phys_addr);
1721 	cmd->fragments[0].size = alloc->size;
1722 	cmd->allocation_id = alloc->allocation_id;
1723 	cmd->buffer_location = alloc->buffer_location;
1724 
1725 	iwl_trans_send_cmd(trans, &hcmd);
1726 }
1727 
1728 static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
1729 				 struct iwl_ucode_tlv *tlv)
1730 {
1731 	struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
1732 	struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
1733 	u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv);
1734 
1735 	struct iwl_host_cmd hcmd = {
1736 		.id = WIDE_ID(data->group, data->id),
1737 		.len = { len, },
1738 		.data = { data->data, },
1739 	};
1740 
1741 	iwl_trans_send_cmd(fwrt->trans, &hcmd);
1742 }
1743 
1744 static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
1745 				      struct iwl_fw_ini_region_tlv *tlv,
1746 				      bool ext, enum iwl_fw_ini_apply_point pnt)
1747 {
1748 	void *iter = (void *)tlv->region_config;
1749 	int i, size = le32_to_cpu(tlv->num_regions);
1750 
1751 	for (i = 0; i < size; i++) {
1752 		struct iwl_fw_ini_region_cfg *reg = iter;
1753 		int id = le32_to_cpu(reg->region_id);
1754 		struct iwl_fw_ini_active_regs *active;
1755 
1756 		if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
1757 			 "Invalid region id %d for apply point %d\n", id, pnt))
1758 			break;
1759 
1760 		active = &fwrt->dump.active_regs[id];
1761 
1762 		if (ext && active->apply_point == pnt)
1763 			IWL_WARN(fwrt->trans,
1764 				 "External region TLV overrides FW default %x\n",
1765 				 id);
1766 
1767 		IWL_DEBUG_FW(fwrt,
1768 			     "%s: apply point %d, activating region ID %d\n",
1769 			     __func__, pnt, id);
1770 
1771 		active->reg = reg;
1772 		active->apply_point = pnt;
1773 
1774 		if (le32_to_cpu(reg->region_type) !=
1775 		    IWL_FW_INI_REGION_DRAM_BUFFER)
1776 			iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
1777 
1778 		iter += sizeof(*reg);
1779 	}
1780 }
1781 
1782 static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
1783 				       struct iwl_fw_ini_trigger_tlv *tlv,
1784 				       bool ext,
1785 				       enum iwl_fw_ini_apply_point apply_point)
1786 {
1787 	int i, size = le32_to_cpu(tlv->num_triggers);
1788 	void *iter = (void *)tlv->trigger_config;
1789 
1790 	for (i = 0; i < size; i++) {
1791 		struct iwl_fw_ini_trigger *trig = iter;
1792 		struct iwl_fw_ini_active_triggers *active;
1793 		int id = le32_to_cpu(trig->trigger_id);
1794 		u32 num;
1795 
1796 		if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
1797 			break;
1798 
1799 		active = &fwrt->dump.active_trigs[id];
1800 
1801 		if (active->apply_point != apply_point) {
1802 			active->conf = NULL;
1803 			active->conf_ext = NULL;
1804 		}
1805 
1806 		num = le32_to_cpu(trig->num_regions);
1807 
1808 		if (ext && active->apply_point == apply_point) {
1809 			num += le32_to_cpu(active->conf->num_regions);
1810 			if (trig->ignore_default) {
1811 				active->conf_ext = active->conf;
1812 				active->conf = trig;
1813 			} else {
1814 				active->conf_ext = trig;
1815 			}
1816 		} else {
1817 			active->conf = trig;
1818 		}
1819 
1820 		/* Since zero means infinity - just set to -1 */
1821 		if (!le32_to_cpu(trig->occurrences))
1822 			trig->occurrences = cpu_to_le32(-1);
1823 		if (!le32_to_cpu(trig->ignore_consec))
1824 			trig->ignore_consec = cpu_to_le32(-1);
1825 
1826 		iter += sizeof(*trig) +
1827 			le32_to_cpu(trig->num_regions) * sizeof(__le32);
1828 
1829 		active->active = num;
1830 		active->apply_point = apply_point;
1831 	}
1832 }
1833 
1834 static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
1835 				    struct iwl_apply_point_data *data,
1836 				    enum iwl_fw_ini_apply_point pnt,
1837 				    bool ext)
1838 {
1839 	void *iter = data->data;
1840 
1841 	while (iter && iter < data->data + data->size) {
1842 		struct iwl_ucode_tlv *tlv = iter;
1843 		void *ini_tlv = (void *)tlv->data;
1844 		u32 type = le32_to_cpu(tlv->type);
1845 
1846 		switch (type) {
1847 		case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1848 			iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
1849 			break;
1850 		case IWL_UCODE_TLV_TYPE_HCMD:
1851 			if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
1852 				IWL_ERR(fwrt,
1853 					"Invalid apply point %x for host command\n",
1854 					pnt);
1855 				goto next;
1856 			}
1857 			iwl_fw_dbg_send_hcmd(fwrt, tlv);
1858 			break;
1859 		case IWL_UCODE_TLV_TYPE_REGIONS:
1860 			iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
1861 			break;
1862 		case IWL_UCODE_TLV_TYPE_TRIGGERS:
1863 			iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt);
1864 			break;
1865 		case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
1866 			break;
1867 		default:
1868 			WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
1869 			break;
1870 		}
1871 next:
1872 		iter += sizeof(*tlv) + le32_to_cpu(tlv->length);
1873 	}
1874 }
1875 
1876 void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
1877 			    enum iwl_fw_ini_apply_point apply_point)
1878 {
1879 	void *data = &fwrt->trans->apply_points[apply_point];
1880 
1881 	_iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
1882 
1883 	data = &fwrt->trans->apply_points_ext[apply_point];
1884 	_iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
1885 }
1886 IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
1887