xref: /freebsd/sys/contrib/dev/iwlwifi/iwl-trans.h (revision b1879975794772ee51f0b4865753364c7d7626c3)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
9 
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
14 
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
17 #include "fw/img.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
24 #if defined(__FreeBSD__)
25 #include <linux/skbuff.h>
26 #include "iwl-modparams.h"
27 #endif
28 
29 /**
30  * DOC: Transport layer - what is it ?
31  *
32  * The transport layer is the layer that deals with the HW directly. It provides
33  * the PCIe access to the underlying hardwarwe. The transport layer doesn't
34  * provide any policy, algorithm or anything of this kind, but only mechanisms
35  * to make the HW do something. It is not completely stateless but close to it.
36  */
37 
38 /**
39  * DOC: Life cycle of the transport layer
40  *
41  * The transport layer has a very precise life cycle.
42  *
43  *	1) A helper function is called during the module initialization and
44  *	   registers the bus driver's ops with the transport's alloc function.
45  *	2) Bus's probe calls to the transport layer's allocation functions.
46  *	   Of course this function is bus specific.
47  *	3) This allocation functions will spawn the upper layer which will
48  *	   register mac80211.
49  *
50  *	4) At some point (i.e. mac80211's start call), the op_mode will call
51  *	   the following sequence:
52  *	   start_hw
53  *	   start_fw
54  *
55  *	5) Then when finished (or reset):
56  *	   stop_device
57  *
58  *	6) Eventually, the free function will be called.
59  */
60 
61 /* default preset 0 (start from bit 16)*/
62 #define IWL_FW_DBG_DOMAIN_POS	16
63 #define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
64 
65 #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
66 
67 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
68 #define FH_RSCSR_FRAME_INVALID		0x55550000
69 #define FH_RSCSR_FRAME_ALIGN		0x40
70 #define FH_RSCSR_RPA_EN			BIT(25)
71 #define FH_RSCSR_RADA_EN		BIT(26)
72 #define FH_RSCSR_RXQ_POS		16
73 #define FH_RSCSR_RXQ_MASK		0x3F0000
74 
75 struct iwl_rx_packet {
76 	/*
77 	 * The first 4 bytes of the RX frame header contain both the RX frame
78 	 * size and some flags.
79 	 * Bit fields:
80 	 * 31:    flag flush RB request
81 	 * 30:    flag ignore TC (terminal counter) request
82 	 * 29:    flag fast IRQ request
83 	 * 28-27: Reserved
84 	 * 26:    RADA enabled
85 	 * 25:    Offload enabled
86 	 * 24:    RPF enabled
87 	 * 23:    RSS enabled
88 	 * 22:    Checksum enabled
89 	 * 21-16: RX queue
90 	 * 15-14: Reserved
91 	 * 13-00: RX frame size
92 	 */
93 	__le32 len_n_flags;
94 	struct iwl_cmd_header hdr;
95 	u8 data[];
96 } __packed;
97 
98 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
99 {
100 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
101 }
102 
103 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
104 {
105 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
106 }
107 
108 /**
109  * enum CMD_MODE - how to send the host commands ?
110  *
111  * @CMD_ASYNC: Return right away and don't wait for the response
112  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
113  *	the response. The caller needs to call iwl_free_resp when done.
114  * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
115  * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
116  * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
117  *	SUSPEND and RESUME commands. We are in D3 mode when we set
118  *	trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
119  */
120 enum CMD_MODE {
121 	CMD_ASYNC		= BIT(0),
122 	CMD_WANT_SKB		= BIT(1),
123 	CMD_SEND_IN_RFKILL	= BIT(2),
124 	CMD_BLOCK_TXQS		= BIT(3),
125 	CMD_SEND_IN_D3          = BIT(4),
126 };
127 #define CMD_MODE_BITS 5
128 
129 #define DEF_CMD_PAYLOAD_SIZE 320
130 
131 /**
132  * struct iwl_device_cmd
133  *
134  * For allocation of the command and tx queues, this establishes the overall
135  * size of the largest command we send to uCode, except for commands that
136  * aren't fully copied and use other TFD space.
137  *
138  * @hdr: command header
139  * @payload: payload for the command
140  * @hdr_wide: wide command header
141  * @payload_wide: payload for the wide command
142  */
143 struct iwl_device_cmd {
144 	union {
145 		struct {
146 			struct iwl_cmd_header hdr;	/* uCode API */
147 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
148 		};
149 		struct {
150 			struct iwl_cmd_header_wide hdr_wide;
151 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
152 					sizeof(struct iwl_cmd_header_wide) +
153 					sizeof(struct iwl_cmd_header)];
154 		};
155 	};
156 } __packed;
157 
158 /**
159  * struct iwl_device_tx_cmd - buffer for TX command
160  * @hdr: the header
161  * @payload: the payload placeholder
162  *
163  * The actual structure is sized dynamically according to need.
164  */
165 struct iwl_device_tx_cmd {
166 	struct iwl_cmd_header hdr;
167 	u8 payload[];
168 } __packed;
169 
170 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
171 
172 /*
173  * number of transfer buffers (fragments) per transmit frame descriptor;
174  * this is just the driver's idea, the hardware supports 20
175  */
176 #define IWL_MAX_CMD_TBS_PER_TFD	2
177 
178 /**
179  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
180  *
181  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
182  *	ring. The transport layer doesn't map the command's buffer to DMA, but
183  *	rather copies it to a previously allocated DMA buffer. This flag tells
184  *	the transport layer not to copy the command, but to map the existing
185  *	buffer (that is passed in) instead. This saves the memcpy and allows
186  *	commands that are bigger than the fixed buffer to be submitted.
187  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
188  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
189  *	chunk internally and free it again after the command completes. This
190  *	can (currently) be used only once per command.
191  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
192  */
193 enum iwl_hcmd_dataflag {
194 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
195 	IWL_HCMD_DFL_DUP	= BIT(1),
196 };
197 
198 enum iwl_error_event_table_status {
199 	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
200 	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
201 	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
202 	IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
203 	IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
204 	IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
205 	IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
206 };
207 
208 /**
209  * struct iwl_host_cmd - Host command to the uCode
210  *
211  * @data: array of chunks that composes the data of the host command
212  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
213  * @_rx_page_order: (internally used to free response packet)
214  *  [ FreeBSD uses _page instead ]
215  * @_rx_page_addr: (internally used to free response packet)
216  * @flags: can be CMD_*
217  * @len: array of the lengths of the chunks in data
218  * @dataflags: IWL_HCMD_DFL_*
219  * @id: command id of the host command, for wide commands encoding the
220  *	version and group as well
221  */
222 struct iwl_host_cmd {
223 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
224 	struct iwl_rx_packet *resp_pkt;
225 #if defined(__linux__)
226 	unsigned long _rx_page_addr;
227 #elif defined(__FreeBSD__)
228 	struct page *_page;
229 #endif
230 	u32 _rx_page_order;
231 
232 	u32 flags;
233 	u32 id;
234 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
235 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
236 };
237 
238 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
239 {
240 #if defined(__linux__)
241 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
242 #elif defined(__FreeBSD__)
243 	__free_pages(cmd->_page, cmd->_rx_page_order);
244 #endif
245 }
246 
247 struct iwl_rx_cmd_buffer {
248 	struct page *_page;
249 	int _offset;
250 	bool _page_stolen;
251 	u32 _rx_page_order;
252 	unsigned int truesize;
253 };
254 
255 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
256 {
257 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
258 }
259 
260 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
261 {
262 	return r->_offset;
263 }
264 
265 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
266 {
267 	r->_page_stolen = true;
268 	get_page(r->_page);
269 	return r->_page;
270 }
271 
272 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
273 {
274 	__free_pages(r->_page, r->_rx_page_order);
275 }
276 
277 #define MAX_NO_RECLAIM_CMDS	6
278 
279 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
280 
281 /*
282  * Maximum number of HW queues the transport layer
283  * currently supports
284  */
285 #define IWL_MAX_HW_QUEUES		32
286 #define IWL_MAX_TVQM_QUEUES		512
287 
288 #define IWL_MAX_TID_COUNT	8
289 #define IWL_MGMT_TID		15
290 #define IWL_FRAME_LIMIT	64
291 #define IWL_MAX_RX_HW_QUEUES	16
292 #define IWL_9000_MAX_RX_HW_QUEUES	1
293 
294 /**
295  * enum iwl_d3_status - WoWLAN image/device status
296  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
297  * @IWL_D3_STATUS_RESET: device was reset while suspended
298  */
299 enum iwl_d3_status {
300 	IWL_D3_STATUS_ALIVE,
301 	IWL_D3_STATUS_RESET,
302 };
303 
304 /**
305  * enum iwl_trans_status: transport status flags
306  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
307  * @STATUS_DEVICE_ENABLED: APM is enabled
308  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
309  * @STATUS_INT_ENABLED: interrupts are enabled
310  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
311  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
312  * @STATUS_FW_ERROR: the fw is in error state
313  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
314  * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
315  *	e.g. for testing
316  */
317 enum iwl_trans_status {
318 	STATUS_SYNC_HCMD_ACTIVE,
319 	STATUS_DEVICE_ENABLED,
320 	STATUS_TPOWER_PMI,
321 	STATUS_INT_ENABLED,
322 	STATUS_RFKILL_HW,
323 	STATUS_RFKILL_OPMODE,
324 	STATUS_FW_ERROR,
325 	STATUS_TRANS_DEAD,
326 	STATUS_SUPPRESS_CMD_ERROR_ONCE,
327 };
328 
329 static inline int
330 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
331 {
332 	switch (rb_size) {
333 	case IWL_AMSDU_2K:
334 		return get_order(2 * 1024);
335 	case IWL_AMSDU_4K:
336 		return get_order(4 * 1024);
337 	case IWL_AMSDU_8K:
338 		return get_order(8 * 1024);
339 	case IWL_AMSDU_12K:
340 		return get_order(16 * 1024);
341 	default:
342 		WARN_ON(1);
343 		return -1;
344 	}
345 }
346 
347 static inline int
348 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
349 {
350 	switch (rb_size) {
351 	case IWL_AMSDU_2K:
352 		return 2 * 1024;
353 	case IWL_AMSDU_4K:
354 		return 4 * 1024;
355 	case IWL_AMSDU_8K:
356 		return 8 * 1024;
357 	case IWL_AMSDU_12K:
358 		return 16 * 1024;
359 	default:
360 		WARN_ON(1);
361 		return 0;
362 	}
363 }
364 
365 struct iwl_hcmd_names {
366 	u8 cmd_id;
367 	const char *const cmd_name;
368 };
369 
370 #define HCMD_NAME(x)	\
371 	{ .cmd_id = x, .cmd_name = #x }
372 
373 struct iwl_hcmd_arr {
374 	const struct iwl_hcmd_names *arr;
375 	int size;
376 };
377 
378 #define HCMD_ARR(x)	\
379 	{ .arr = x, .size = ARRAY_SIZE(x) }
380 
381 /**
382  * struct iwl_dump_sanitize_ops - dump sanitization operations
383  * @frob_txf: Scrub the TX FIFO data
384  * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
385  *	but that might be short or long (&struct iwl_cmd_header or
386  *	&struct iwl_cmd_header_wide)
387  * @frob_mem: Scrub memory data
388  */
389 struct iwl_dump_sanitize_ops {
390 	void (*frob_txf)(void *ctx, void *buf, size_t buflen);
391 	void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
392 	void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
393 };
394 
395 /**
396  * struct iwl_trans_config - transport configuration
397  *
398  * @op_mode: pointer to the upper layer.
399  * @cmd_queue: the index of the command queue.
400  *	Must be set before start_fw.
401  * @cmd_fifo: the fifo for host commands
402  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
403  * @no_reclaim_cmds: Some devices erroneously don't set the
404  *	SEQ_RX_FRAME bit on some notifications, this is the
405  *	list of such notifications to filter. Max length is
406  *	%MAX_NO_RECLAIM_CMDS.
407  * @n_no_reclaim_cmds: # of commands in list
408  * @rx_buf_size: RX buffer size needed for A-MSDUs
409  *	if unset 4k will be the RX buffer size
410  * @bc_table_dword: set to true if the BC table expects the byte count to be
411  *	in DWORD (as opposed to bytes)
412  * @scd_set_active: should the transport configure the SCD for HCMD queue
413  * @command_groups: array of command groups, each member is an array of the
414  *	commands in the group; for debugging only
415  * @command_groups_size: number of command groups, to avoid illegal access
416  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
417  *	space for at least two pointers
418  * @fw_reset_handshake: firmware supports reset flow handshake
419  * @queue_alloc_cmd_ver: queue allocation command version, set to 0
420  *	for using the older SCD_QUEUE_CFG, set to the version of
421  *	SCD_QUEUE_CONFIG_CMD otherwise.
422  */
423 struct iwl_trans_config {
424 	struct iwl_op_mode *op_mode;
425 
426 	u8 cmd_queue;
427 	u8 cmd_fifo;
428 	unsigned int cmd_q_wdg_timeout;
429 	const u8 *no_reclaim_cmds;
430 	unsigned int n_no_reclaim_cmds;
431 
432 	enum iwl_amsdu_size rx_buf_size;
433 	bool bc_table_dword;
434 	bool scd_set_active;
435 	const struct iwl_hcmd_arr *command_groups;
436 	int command_groups_size;
437 
438 	u8 cb_data_offs;
439 	bool fw_reset_handshake;
440 	u8 queue_alloc_cmd_ver;
441 };
442 
443 struct iwl_trans_dump_data {
444 	u32 len;
445 	u8 data[];
446 };
447 
448 struct iwl_trans;
449 
450 struct iwl_trans_txq_scd_cfg {
451 	u8 fifo;
452 	u8 sta_id;
453 	u8 tid;
454 	bool aggregate;
455 	int frame_limit;
456 };
457 
458 /**
459  * struct iwl_trans_rxq_dma_data - RX queue DMA data
460  * @fr_bd_cb: DMA address of free BD cyclic buffer
461  * @fr_bd_wid: Initial write index of the free BD cyclic buffer
462  * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
463  * @ur_bd_cb: DMA address of used BD cyclic buffer
464  */
465 struct iwl_trans_rxq_dma_data {
466 	u64 fr_bd_cb;
467 	u32 fr_bd_wid;
468 	u64 urbd_stts_wrptr;
469 	u64 ur_bd_cb;
470 };
471 
472 /* maximal number of DRAM MAP entries supported by FW */
473 #define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
474 
475 /**
476  * struct iwl_pnvm_image - contains info about the parsed pnvm image
477  * @chunks: array of pointers to pnvm payloads and their sizes
478  * @n_chunks: the number of the pnvm payloads.
479  * @version: the version of the loaded PNVM image
480  */
481 struct iwl_pnvm_image {
482 	struct {
483 		const void *data;
484 		u32 len;
485 	} chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
486 	u32 n_chunks;
487 	u32 version;
488 };
489 
490 /**
491  * enum iwl_trans_state - state of the transport layer
492  *
493  * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
494  * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
495  * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
496  */
497 enum iwl_trans_state {
498 	IWL_TRANS_NO_FW,
499 	IWL_TRANS_FW_STARTED,
500 	IWL_TRANS_FW_ALIVE,
501 };
502 
503 /**
504  * DOC: Platform power management
505  *
506  * In system-wide power management the entire platform goes into a low
507  * power state (e.g. idle or suspend to RAM) at the same time and the
508  * device is configured as a wakeup source for the entire platform.
509  * This is usually triggered by userspace activity (e.g. the user
510  * presses the suspend button or a power management daemon decides to
511  * put the platform in low power mode).  The device's behavior in this
512  * mode is dictated by the wake-on-WLAN configuration.
513  *
514  * The terms used for the device's behavior are as follows:
515  *
516  *	- D0: the device is fully powered and the host is awake;
517  *	- D3: the device is in low power mode and only reacts to
518  *		specific events (e.g. magic-packet received or scan
519  *		results found);
520  *
521  * These terms reflect the power modes in the firmware and are not to
522  * be confused with the physical device power state.
523  */
524 
525 /**
526  * enum iwl_plat_pm_mode - platform power management mode
527  *
528  * This enumeration describes the device's platform power management
529  * behavior when in system-wide suspend (i.e WoWLAN).
530  *
531  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
532  *	device.  In system-wide suspend mode, it means that the all
533  *	connections will be closed automatically by mac80211 before
534  *	the platform is suspended.
535  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
536  */
537 enum iwl_plat_pm_mode {
538 	IWL_PLAT_PM_MODE_DISABLED,
539 	IWL_PLAT_PM_MODE_D3,
540 };
541 
542 /**
543  * enum iwl_ini_cfg_state
544  * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
545  * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
546  * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
547  *	are corrupted. The rest of the debug TLVs will still be used
548  */
549 enum iwl_ini_cfg_state {
550 	IWL_INI_CFG_STATE_NOT_LOADED,
551 	IWL_INI_CFG_STATE_LOADED,
552 	IWL_INI_CFG_STATE_CORRUPTED,
553 };
554 
555 /* Max time to wait for nmi interrupt */
556 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
557 
558 /**
559  * struct iwl_dram_data
560  * @physical: page phy pointer
561  * @block: pointer to the allocated block/page
562  * @size: size of the block/page
563  */
564 struct iwl_dram_data {
565 	dma_addr_t physical;
566 	void *block;
567 	int size;
568 };
569 
570 /**
571  * struct iwl_dram_regions - DRAM regions container structure
572  * @drams: array of several DRAM areas that contains the pnvm and power
573  *	reduction table payloads.
574  * @n_regions: number of DRAM regions that were allocated
575  * @prph_scratch_mem_desc: points to a structure allocated in dram,
576  *	designed to show FW where all the payloads are.
577  */
578 struct iwl_dram_regions {
579 	struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
580 	struct iwl_dram_data prph_scratch_mem_desc;
581 	u8 n_regions;
582 };
583 
584 /**
585  * struct iwl_fw_mon - fw monitor per allocation id
586  * @num_frags: number of fragments
587  * @frags: an array of DRAM buffer fragments
588  */
589 struct iwl_fw_mon {
590 	u32 num_frags;
591 	struct iwl_dram_data *frags;
592 };
593 
594 /**
595  * struct iwl_self_init_dram - dram data used by self init process
596  * @fw: lmac and umac dram data
597  * @fw_cnt: total number of items in array
598  * @paging: paging dram data
599  * @paging_cnt: total number of items in array
600  */
601 struct iwl_self_init_dram {
602 	struct iwl_dram_data *fw;
603 	int fw_cnt;
604 	struct iwl_dram_data *paging;
605 	int paging_cnt;
606 };
607 
608 /**
609  * struct iwl_imr_data - imr dram data used during debug process
610  * @imr_enable: imr enable status received from fw
611  * @imr_size: imr dram size received from fw
612  * @sram_addr: sram address from debug tlv
613  * @sram_size: sram size from debug tlv
614  * @imr2sram_remainbyte: size remained after each dma transfer
615  * @imr_curr_addr: current dst address used during dma transfer
616  * @imr_base_addr: imr address received from fw
617  */
618 struct iwl_imr_data {
619 	u32 imr_enable;
620 	u32 imr_size;
621 	u32 sram_addr;
622 	u32 sram_size;
623 	u32 imr2sram_remainbyte;
624 	u64 imr_curr_addr;
625 	__le64 imr_base_addr;
626 };
627 
628 #define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES      32
629 
630 /**
631  * struct iwl_pc_data - program counter details
632  * @pc_name: cpu name
633  * @pc_address: cpu program counter
634  */
635 struct iwl_pc_data {
636 	u8  pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
637 	u32 pc_address;
638 };
639 
640 /**
641  * struct iwl_trans_debug - transport debug related data
642  *
643  * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
644  * @rec_on: true iff there is a fw debug recording currently active
645  * @dest_tlv: points to the destination TLV for debug
646  * @conf_tlv: array of pointers to configuration TLVs for debug
647  * @trigger_tlv: array of pointers to triggers TLVs for debug
648  * @lmac_error_event_table: addrs of lmacs error tables
649  * @umac_error_event_table: addr of umac error table
650  * @tcm_error_event_table: address(es) of TCM error table(s)
651  * @rcm_error_event_table: address(es) of RCM error table(s)
652  * @error_event_table_tlv_status: bitmap that indicates what error table
653  *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
654  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
655  * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
656  * @fw_mon_cfg: debug buffer allocation configuration
657  * @fw_mon_ini: DRAM buffer fragments per allocation id
658  * @fw_mon: DRAM buffer for firmware monitor
659  * @hw_error: equals true if hw error interrupt was received from the FW
660  * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
661  * @unsupported_region_msk: unsupported regions out of active_regions
662  * @active_regions: active regions
663  * @debug_info_tlv_list: list of debug info TLVs
664  * @time_point: array of debug time points
665  * @periodic_trig_list: periodic triggers list
666  * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
667  * @ucode_preset: preset based on ucode
668  * @restart_required: indicates debug restart is required
669  * @last_tp_resetfw: last handling of reset during debug timepoint
670  * @imr_data: IMR debug data allocation
671  * @dump_file_name_ext: dump file name extension
672  * @dump_file_name_ext_valid: dump file name extension if valid or not
673  * @num_pc: number of program counter for cpu
674  * @pc_data: details of the program counter
675  * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
676  */
677 struct iwl_trans_debug {
678 	u8 n_dest_reg;
679 	bool rec_on;
680 
681 	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
682 	const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
683 	struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
684 
685 	u32 lmac_error_event_table[2];
686 	u32 umac_error_event_table;
687 	u32 tcm_error_event_table[2];
688 	u32 rcm_error_event_table[2];
689 	unsigned int error_event_table_tlv_status;
690 
691 	enum iwl_ini_cfg_state internal_ini_cfg;
692 	enum iwl_ini_cfg_state external_ini_cfg;
693 
694 	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
695 	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
696 
697 	struct iwl_dram_data fw_mon;
698 
699 	bool hw_error;
700 	enum iwl_fw_ini_buffer_location ini_dest;
701 
702 	u64 unsupported_region_msk;
703 	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
704 	struct list_head debug_info_tlv_list;
705 	struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];
706 	struct list_head periodic_trig_list;
707 
708 	u32 domains_bitmap;
709 	u32 ucode_preset;
710 	bool restart_required;
711 	u32 last_tp_resetfw;
712 	struct iwl_imr_data imr_data;
713 	u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
714 	bool dump_file_name_ext_valid;
715 	u32 num_pc;
716 	struct iwl_pc_data *pc_data;
717 	bool yoyo_bin_loaded;
718 };
719 
720 struct iwl_dma_ptr {
721 	dma_addr_t dma;
722 	void *addr;
723 	size_t size;
724 };
725 
726 struct iwl_cmd_meta {
727 	/* only for SYNC commands, iff the reply skb is wanted */
728 	struct iwl_host_cmd *source;
729 	u32 flags: CMD_MODE_BITS;
730 	/* sg_offset is valid if it is non-zero */
731 	u32 sg_offset: PAGE_SHIFT;
732 	u32 tbs;
733 };
734 
735 /*
736  * The FH will write back to the first TB only, so we need to copy some data
737  * into the buffer regardless of whether it should be mapped or not.
738  * This indicates how big the first TB must be to include the scratch buffer
739  * and the assigned PN.
740  * Since PN location is 8 bytes at offset 12, it's 20 now.
741  * If we make it bigger then allocations will be bigger and copy slower, so
742  * that's probably not useful.
743  */
744 #define IWL_FIRST_TB_SIZE	20
745 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
746 
747 struct iwl_pcie_txq_entry {
748 	void *cmd;
749 	struct sk_buff *skb;
750 	/* buffer to free after command completes */
751 	const void *free_buf;
752 	struct iwl_cmd_meta meta;
753 };
754 
755 struct iwl_pcie_first_tb_buf {
756 	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
757 };
758 
759 /**
760  * struct iwl_txq - Tx Queue for DMA
761  * @tfds: transmit frame descriptors (DMA memory)
762  * @first_tb_bufs: start of command headers, including scratch buffers, for
763  *	the writeback -- this is DMA memory and an array holding one buffer
764  *	for each command on the queue
765  * @first_tb_dma: DMA address for the first_tb_bufs start
766  * @entries: transmit entries (driver state)
767  * @lock: queue lock
768  * @reclaim_lock: reclaim lock
769  * @stuck_timer: timer that fires if queue gets stuck
770  * @trans: pointer back to transport (for timer)
771  * @need_update: indicates need to update read/write index
772  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
773  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
774  * @frozen: tx stuck queue timer is frozen
775  * @frozen_expiry_remainder: remember how long until the timer fires
776  * @block: queue is blocked
777  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
778  * @write_ptr: 1-st empty entry (index) host_w
779  * @read_ptr: last used entry (index) host_r
780  * @dma_addr:  physical addr for BD's
781  * @n_window: safe queue window
782  * @id: queue id
783  * @low_mark: low watermark, resume queue if free space more than this
784  * @high_mark: high watermark, stop queue if free space less than this
785  * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
786  * @overflow_tx: need to transmit from overflow
787  *
788  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
789  * descriptors) and required locking structures.
790  *
791  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
792  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
793  * there might be HW changes in the future). For the normal TX
794  * queues, n_window, which is the size of the software queue data
795  * is also 256; however, for the command queue, n_window is only
796  * 32 since we don't need so many commands pending. Since the HW
797  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
798  * This means that we end up with the following:
799  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
800  *  SW entries:           | 0      | ... | 31          |
801  * where N is a number between 0 and 7. This means that the SW
802  * data is a window overlayed over the HW queue.
803  */
804 struct iwl_txq {
805 	void *tfds;
806 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
807 	dma_addr_t first_tb_dma;
808 	struct iwl_pcie_txq_entry *entries;
809 	/* lock for syncing changes on the queue */
810 	spinlock_t lock;
811 	/* lock to prevent concurrent reclaim */
812 	spinlock_t reclaim_lock;
813 	unsigned long frozen_expiry_remainder;
814 	struct timer_list stuck_timer;
815 	struct iwl_trans *trans;
816 	bool need_update;
817 	bool frozen;
818 	bool ampdu;
819 	int block;
820 	unsigned long wd_timeout;
821 	struct sk_buff_head overflow_q;
822 	struct iwl_dma_ptr bc_tbl;
823 
824 	int write_ptr;
825 	int read_ptr;
826 	dma_addr_t dma_addr;
827 	int n_window;
828 	u32 id;
829 	int low_mark;
830 	int high_mark;
831 
832 	bool overflow_tx;
833 };
834 
835 /**
836  * struct iwl_trans - transport common data
837  *
838  * @csme_own: true if we couldn't get ownership on the device
839  * @op_mode: pointer to the op_mode
840  * @trans_cfg: the trans-specific configuration part
841  * @cfg: pointer to the configuration
842  * @drv: pointer to iwl_drv
843  * @state: current device state
844  * @status: a bit-mask of transport status flags
845  * @dev: pointer to struct device * that represents the device
846  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
847  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
848  * @hw_rf_id: a u32 with the device RF ID
849  * @hw_cnv_id: a u32 with the device CNV ID
850  * @hw_crf_id: a u32 with the device CRF ID
851  * @hw_wfpm_id: a u32 with the device wfpm ID
852  * @hw_id: a u32 with the ID of the device / sub-device.
853  *	Set during transport allocation.
854  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
855  * @sku_id: the SKU identifier (for PNVM matching)
856  * @pnvm_loaded: indicates PNVM was loaded
857  * @hw_rev: the revision data of the HW
858  * @hw_rev_step: The mac step of the HW
859  * @pm_support: set to true in start_hw if link pm is supported
860  * @ltr_enabled: set to true if the LTR is enabled
861  * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
862  * @reduce_power_loaded: indicates reduced power section was loaded
863  * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
864  * @command_groups: pointer to command group name list array
865  * @command_groups_size: array size of @command_groups
866  * @wide_cmd_header: true when ucode supports wide command header format
867  * @wait_command_queue: wait queue for sync commands
868  * @num_rx_queues: number of RX queues allocated by the transport;
869  *	the transport must set this before calling iwl_drv_start()
870  * @iml_len: the length of the image loader
871  * @iml: a pointer to the image loader itself
872  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
873  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
874  * @dev_cmd_pool_name: name for the TX command allocation pool
875  * @dbgfs_dir: iwlwifi debugfs base dir for this device
876  * @sync_cmd_lockdep_map: lockdep map for checking sync commands
877  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
878  *	starting the firmware, used for tracing
879  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
880  *	start of the 802.11 header in the @rx_mpdu_cmd
881  * @dbg: additional debug data, see &struct iwl_trans_debug
882  * @init_dram: FW initialization DMA data
883  * @system_pm_mode: the system-wide power management mode in use.
884  *	This mode is set dynamically, depending on the WoWLAN values
885  *	configured from the userspace at runtime.
886  * @name: the device name
887  * @mbx_addr_0_step: step address data 0
888  * @mbx_addr_1_step: step address data 1
889  * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
890  *	only valid for discrete (not integrated) NICs
891  * @invalid_tx_cmd: invalid TX command buffer
892  * @reduced_cap_sku: reduced capability supported SKU
893  * @no_160: device not supporting 160 MHz
894  * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
895  * @trans_specific: data for the specific transport this is allocated for/with
896  */
897 struct iwl_trans {
898 	bool csme_own;
899 	struct iwl_op_mode *op_mode;
900 	const struct iwl_cfg_trans_params *trans_cfg;
901 	const struct iwl_cfg *cfg;
902 	struct iwl_drv *drv;
903 	enum iwl_trans_state state;
904 	unsigned long status;
905 
906 	struct device *dev;
907 	u32 max_skb_frags;
908 	u32 hw_rev;
909 	u32 hw_rev_step;
910 	u32 hw_rf_id;
911 	u32 hw_crf_id;
912 	u32 hw_cnv_id;
913 	u32 hw_wfpm_id;
914 	u32 hw_id;
915 	char hw_id_str[52];
916 	u32 sku_id[3];
917 	bool reduced_cap_sku;
918 	u8 no_160:1, step_urm:1;
919 
920 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
921 
922 	bool pm_support;
923 	bool ltr_enabled;
924 	u8 pnvm_loaded:1;
925 	u8 fail_to_parse_pnvm_image:1;
926 	u8 reduce_power_loaded:1;
927 	u8 failed_to_load_reduce_power_image:1;
928 
929 	const struct iwl_hcmd_arr *command_groups;
930 	int command_groups_size;
931 	bool wide_cmd_header;
932 
933 	wait_queue_head_t wait_command_queue;
934 	u8 num_rx_queues;
935 
936 	size_t iml_len;
937 	u8 *iml;
938 
939 	/* The following fields are internal only */
940 	struct kmem_cache *dev_cmd_pool;
941 	char dev_cmd_pool_name[50];
942 
943 	struct dentry *dbgfs_dir;
944 
945 #ifdef CONFIG_LOCKDEP
946 	struct lockdep_map sync_cmd_lockdep_map;
947 #endif
948 
949 	struct iwl_trans_debug dbg;
950 	struct iwl_self_init_dram init_dram;
951 
952 	enum iwl_plat_pm_mode system_pm_mode;
953 
954 	const char *name;
955 	u32 mbx_addr_0_step;
956 	u32 mbx_addr_1_step;
957 
958 	u8 pcie_link_speed;
959 
960 	struct iwl_dma_ptr invalid_tx_cmd;
961 
962 	/* pointer to trans specific struct */
963 	/*Ensure that this pointer will always be aligned to sizeof pointer */
964 	char trans_specific[] __aligned(sizeof(void *));
965 };
966 
967 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
968 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
969 
970 void iwl_trans_configure(struct iwl_trans *trans,
971 			 const struct iwl_trans_config *trans_cfg);
972 
973 int iwl_trans_start_hw(struct iwl_trans *trans);
974 
975 void iwl_trans_op_mode_leave(struct iwl_trans *trans);
976 
977 void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
978 
979 int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
980 		       bool run_in_rfkill);
981 
982 void iwl_trans_stop_device(struct iwl_trans *trans);
983 
984 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
985 
986 int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
987 			bool test, bool reset);
988 
989 struct iwl_trans_dump_data *
990 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
991 		    const struct iwl_dump_sanitize_ops *sanitize_ops,
992 		    void *sanitize_ctx);
993 
994 static inline struct iwl_device_tx_cmd *
995 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
996 {
997 	return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
998 }
999 
1000 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1001 
1002 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1003 					 struct iwl_device_tx_cmd *dev_cmd)
1004 {
1005 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1006 }
1007 
1008 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1009 		 struct iwl_device_tx_cmd *dev_cmd, int queue);
1010 
1011 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
1012 		       struct sk_buff_head *skbs, bool is_flush);
1013 
1014 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
1015 
1016 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1017 			   bool configure_scd);
1018 
1019 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1020 			      const struct iwl_trans_txq_scd_cfg *cfg,
1021 			      unsigned int queue_wdg_timeout);
1022 
1023 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1024 			       struct iwl_trans_rxq_dma_data *data);
1025 
1026 void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
1027 
1028 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1029 			u8 tid, int size, unsigned int wdg_timeout);
1030 
1031 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1032 				   int txq_id, bool shared_mode);
1033 
1034 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1035 					int fifo, int sta_id, int tid,
1036 					int frame_limit, u16 ssn,
1037 					unsigned int queue_wdg_timeout)
1038 {
1039 	struct iwl_trans_txq_scd_cfg cfg = {
1040 		.fifo = fifo,
1041 		.sta_id = sta_id,
1042 		.tid = tid,
1043 		.frame_limit = frame_limit,
1044 		.aggregate = sta_id >= 0,
1045 	};
1046 
1047 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1048 }
1049 
1050 static inline
1051 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1052 			     unsigned int queue_wdg_timeout)
1053 {
1054 	struct iwl_trans_txq_scd_cfg cfg = {
1055 		.fifo = fifo,
1056 		.sta_id = -1,
1057 		.tid = IWL_MAX_TID_COUNT,
1058 		.frame_limit = IWL_FRAME_LIMIT,
1059 		.aggregate = false,
1060 	};
1061 
1062 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1063 }
1064 
1065 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1066 				unsigned long txqs, bool freeze);
1067 
1068 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
1069 
1070 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
1071 
1072 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
1073 
1074 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
1075 
1076 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
1077 
1078 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
1079 
1080 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
1081 
1082 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1083 		       void *buf, int dwords);
1084 
1085 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
1086 			    u32 *val);
1087 
1088 #ifdef CONFIG_IWLWIFI_DEBUGFS
1089 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
1090 #endif
1091 
1092 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1093 	do {								      \
1094 		if (__builtin_constant_p(bufsize))			      \
1095 			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1096 		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1097 	} while (0)
1098 
1099 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
1100 			    u64 src_addr, u32 byte_cnt);
1101 
1102 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1103 {
1104 	u32 value;
1105 
1106 	if (iwl_trans_read_mem(trans, addr, &value, 1))
1107 		return 0xa5a5a5a5;
1108 
1109 	return value;
1110 }
1111 
1112 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1113 			const void *buf, int dwords);
1114 
1115 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1116 					u32 val)
1117 {
1118 	return iwl_trans_write_mem(trans, addr, &val, 1);
1119 }
1120 
1121 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
1122 
1123 int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership);
1124 
1125 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
1126 			     u32 mask, u32 value);
1127 
1128 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
1129 
1130 #define iwl_trans_grab_nic_access(trans)		\
1131 	__cond_lock(nic_access,				\
1132 		    likely(_iwl_trans_grab_nic_access(trans)))
1133 
1134 void __releases(nic_access)
1135 iwl_trans_release_nic_access(struct iwl_trans *trans);
1136 
1137 static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1138 {
1139 	if (WARN_ON_ONCE(!trans->op_mode))
1140 		return;
1141 
1142 	/* prevent double restarts due to the same erroneous FW */
1143 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1144 		trans->state = IWL_TRANS_NO_FW;
1145 		iwl_op_mode_nic_error(trans->op_mode, sync);
1146 	}
1147 }
1148 
1149 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1150 {
1151 	return trans->state == IWL_TRANS_FW_ALIVE;
1152 }
1153 
1154 void iwl_trans_sync_nmi(struct iwl_trans *trans);
1155 
1156 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1157 				  u32 sw_err_bit);
1158 
1159 int iwl_trans_load_pnvm(struct iwl_trans *trans,
1160 			const struct iwl_pnvm_image *pnvm_data,
1161 			const struct iwl_ucode_capabilities *capa);
1162 
1163 void iwl_trans_set_pnvm(struct iwl_trans *trans,
1164 			const struct iwl_ucode_capabilities *capa);
1165 
1166 int iwl_trans_load_reduce_power(struct iwl_trans *trans,
1167 				const struct iwl_pnvm_image *payloads,
1168 				const struct iwl_ucode_capabilities *capa);
1169 
1170 void iwl_trans_set_reduce_power(struct iwl_trans *trans,
1171 				const struct iwl_ucode_capabilities *capa);
1172 
1173 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1174 {
1175 	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1176 		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1177 }
1178 
1179 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
1180 
1181 /*****************************************************
1182  * transport helper functions
1183  *****************************************************/
1184 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1185 			  struct device *dev,
1186 			  const struct iwl_cfg_trans_params *cfg_trans);
1187 int iwl_trans_init(struct iwl_trans *trans);
1188 void iwl_trans_free(struct iwl_trans *trans);
1189 
1190 static inline bool iwl_trans_is_hw_error_value(u32 val)
1191 {
1192 	return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
1193 }
1194 
1195 /*****************************************************
1196  * PCIe handling
1197  *****************************************************/
1198 int __must_check iwl_pci_register_driver(void);
1199 void iwl_pci_unregister_driver(void);
1200 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
1201 
1202 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
1203 			     struct iwl_host_cmd *cmd);
1204 
1205 #endif /* __iwl_trans_h__ */
1206