xref: /linux/drivers/net/wireless/intel/iwlwifi/iwl-trans.h (revision 0d2ab5f922e75d10162e7199826e14df9cfae5cc)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
9 
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
14 
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
17 #include "fw/img.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
24 
25 /**
26  * DOC: Transport layer - what is it ?
27  *
28  * The transport layer is the layer that deals with the HW directly. It provides
29  * the PCIe access to the underlying hardwarwe. The transport layer doesn't
30  * provide any policy, algorithm or anything of this kind, but only mechanisms
31  * to make the HW do something. It is not completely stateless but close to it.
32  */
33 
34 /**
35  * DOC: Life cycle of the transport layer
36  *
37  * The transport layer has a very precise life cycle.
38  *
39  *	1) A helper function is called during the module initialization and
40  *	   registers the bus driver's ops with the transport's alloc function.
41  *	2) Bus's probe calls to the transport layer's allocation functions.
42  *	   Of course this function is bus specific.
43  *	3) This allocation functions will spawn the upper layer which will
44  *	   register mac80211.
45  *
46  *	4) At some point (i.e. mac80211's start call), the op_mode will call
47  *	   the following sequence:
48  *	   start_hw
49  *	   start_fw
50  *
51  *	5) Then when finished (or reset):
52  *	   stop_device
53  *
54  *	6) Eventually, the free function will be called.
55  */
56 
57 /* default preset 0 (start from bit 16)*/
58 #define IWL_FW_DBG_DOMAIN_POS	16
59 #define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
60 
61 #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
62 
63 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
64 #define FH_RSCSR_FRAME_INVALID		0x55550000
65 #define FH_RSCSR_FRAME_ALIGN		0x40
66 #define FH_RSCSR_RPA_EN			BIT(25)
67 #define FH_RSCSR_RADA_EN		BIT(26)
68 #define FH_RSCSR_RXQ_POS		16
69 #define FH_RSCSR_RXQ_MASK		0x3F0000
70 
71 struct iwl_rx_packet {
72 	/*
73 	 * The first 4 bytes of the RX frame header contain both the RX frame
74 	 * size and some flags.
75 	 * Bit fields:
76 	 * 31:    flag flush RB request
77 	 * 30:    flag ignore TC (terminal counter) request
78 	 * 29:    flag fast IRQ request
79 	 * 28-27: Reserved
80 	 * 26:    RADA enabled
81 	 * 25:    Offload enabled
82 	 * 24:    RPF enabled
83 	 * 23:    RSS enabled
84 	 * 22:    Checksum enabled
85 	 * 21-16: RX queue
86 	 * 15-14: Reserved
87 	 * 13-00: RX frame size
88 	 */
89 	__le32 len_n_flags;
90 	struct iwl_cmd_header hdr;
91 	u8 data[];
92 } __packed;
93 
94 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
95 {
96 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
97 }
98 
99 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
100 {
101 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
102 }
103 
104 /**
105  * enum CMD_MODE - how to send the host commands ?
106  *
107  * @CMD_ASYNC: Return right away and don't wait for the response
108  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
109  *	the response. The caller needs to call iwl_free_resp when done.
110  * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
111  * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
112  */
113 enum CMD_MODE {
114 	CMD_ASYNC		= BIT(0),
115 	CMD_WANT_SKB		= BIT(1),
116 	CMD_SEND_IN_RFKILL	= BIT(2),
117 	CMD_BLOCK_TXQS		= BIT(3),
118 };
119 #define CMD_MODE_BITS 5
120 
121 #define DEF_CMD_PAYLOAD_SIZE 320
122 
123 /**
124  * struct iwl_device_cmd
125  *
126  * For allocation of the command and tx queues, this establishes the overall
127  * size of the largest command we send to uCode, except for commands that
128  * aren't fully copied and use other TFD space.
129  *
130  * @hdr: command header
131  * @payload: payload for the command
132  * @hdr_wide: wide command header
133  * @payload_wide: payload for the wide command
134  */
135 struct iwl_device_cmd {
136 	union {
137 		struct {
138 			struct iwl_cmd_header hdr;	/* uCode API */
139 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
140 		};
141 		struct {
142 			struct iwl_cmd_header_wide hdr_wide;
143 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
144 					sizeof(struct iwl_cmd_header_wide) +
145 					sizeof(struct iwl_cmd_header)];
146 		};
147 	};
148 } __packed;
149 
150 /**
151  * struct iwl_device_tx_cmd - buffer for TX command
152  * @hdr: the header
153  * @payload: the payload placeholder
154  *
155  * The actual structure is sized dynamically according to need.
156  */
157 struct iwl_device_tx_cmd {
158 	struct iwl_cmd_header hdr;
159 	u8 payload[];
160 } __packed;
161 
162 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
163 
164 /*
165  * number of transfer buffers (fragments) per transmit frame descriptor;
166  * this is just the driver's idea, the hardware supports 20
167  */
168 #define IWL_MAX_CMD_TBS_PER_TFD	2
169 
170 /**
171  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
172  *
173  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
174  *	ring. The transport layer doesn't map the command's buffer to DMA, but
175  *	rather copies it to a previously allocated DMA buffer. This flag tells
176  *	the transport layer not to copy the command, but to map the existing
177  *	buffer (that is passed in) instead. This saves the memcpy and allows
178  *	commands that are bigger than the fixed buffer to be submitted.
179  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
180  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
181  *	chunk internally and free it again after the command completes. This
182  *	can (currently) be used only once per command.
183  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
184  */
185 enum iwl_hcmd_dataflag {
186 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
187 	IWL_HCMD_DFL_DUP	= BIT(1),
188 };
189 
190 enum iwl_error_event_table_status {
191 	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
192 	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
193 	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
194 	IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
195 	IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
196 	IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
197 	IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
198 };
199 
200 /**
201  * struct iwl_host_cmd - Host command to the uCode
202  *
203  * @data: array of chunks that composes the data of the host command
204  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
205  * @_rx_page_order: (internally used to free response packet)
206  * @_rx_page_addr: (internally used to free response packet)
207  * @flags: can be CMD_*
208  * @len: array of the lengths of the chunks in data
209  * @dataflags: IWL_HCMD_DFL_*
210  * @id: command id of the host command, for wide commands encoding the
211  *	version and group as well
212  */
213 struct iwl_host_cmd {
214 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
215 	struct iwl_rx_packet *resp_pkt;
216 	unsigned long _rx_page_addr;
217 	u32 _rx_page_order;
218 
219 	u32 flags;
220 	u32 id;
221 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
222 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
223 };
224 
225 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
226 {
227 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
228 }
229 
230 struct iwl_rx_cmd_buffer {
231 	struct page *_page;
232 	int _offset;
233 	bool _page_stolen;
234 	u32 _rx_page_order;
235 	unsigned int truesize;
236 };
237 
238 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
239 {
240 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
241 }
242 
243 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
244 {
245 	return r->_offset;
246 }
247 
248 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
249 {
250 	r->_page_stolen = true;
251 	get_page(r->_page);
252 	return r->_page;
253 }
254 
255 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
256 {
257 	__free_pages(r->_page, r->_rx_page_order);
258 }
259 
260 #define MAX_NO_RECLAIM_CMDS	6
261 
262 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
263 
264 /*
265  * Maximum number of HW queues the transport layer
266  * currently supports
267  */
268 #define IWL_MAX_HW_QUEUES		32
269 #define IWL_MAX_TVQM_QUEUES		512
270 
271 #define IWL_MAX_TID_COUNT	8
272 #define IWL_MGMT_TID		15
273 #define IWL_FRAME_LIMIT	64
274 #define IWL_MAX_RX_HW_QUEUES	16
275 #define IWL_9000_MAX_RX_HW_QUEUES	1
276 
277 /**
278  * enum iwl_trans_status: transport status flags
279  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
280  * @STATUS_DEVICE_ENABLED: APM is enabled
281  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
282  * @STATUS_INT_ENABLED: interrupts are enabled
283  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
284  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
285  * @STATUS_FW_ERROR: the fw is in error state
286  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
287  * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
288  *	e.g. for testing
289  * @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
290  *	via iwl_trans_finish_sw_reset()
291  * @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
292  *	the firmware state yet
293  * @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
294  *	attempt another reset yet
295  */
296 enum iwl_trans_status {
297 	STATUS_SYNC_HCMD_ACTIVE,
298 	STATUS_DEVICE_ENABLED,
299 	STATUS_TPOWER_PMI,
300 	STATUS_INT_ENABLED,
301 	STATUS_RFKILL_HW,
302 	STATUS_RFKILL_OPMODE,
303 	STATUS_FW_ERROR,
304 	STATUS_TRANS_DEAD,
305 	STATUS_SUPPRESS_CMD_ERROR_ONCE,
306 	STATUS_IN_SW_RESET,
307 	STATUS_RESET_PENDING,
308 	STATUS_TRANS_RESET_IN_PROGRESS,
309 };
310 
311 static inline int
312 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
313 {
314 	switch (rb_size) {
315 	case IWL_AMSDU_2K:
316 		return get_order(2 * 1024);
317 	case IWL_AMSDU_4K:
318 		return get_order(4 * 1024);
319 	case IWL_AMSDU_8K:
320 		return get_order(8 * 1024);
321 	case IWL_AMSDU_12K:
322 		return get_order(16 * 1024);
323 	default:
324 		WARN_ON(1);
325 		return -1;
326 	}
327 }
328 
329 static inline int
330 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
331 {
332 	switch (rb_size) {
333 	case IWL_AMSDU_2K:
334 		return 2 * 1024;
335 	case IWL_AMSDU_4K:
336 		return 4 * 1024;
337 	case IWL_AMSDU_8K:
338 		return 8 * 1024;
339 	case IWL_AMSDU_12K:
340 		return 16 * 1024;
341 	default:
342 		WARN_ON(1);
343 		return 0;
344 	}
345 }
346 
347 struct iwl_hcmd_names {
348 	u8 cmd_id;
349 	const char *const cmd_name;
350 };
351 
352 #define HCMD_NAME(x)	\
353 	{ .cmd_id = x, .cmd_name = #x }
354 
355 struct iwl_hcmd_arr {
356 	const struct iwl_hcmd_names *arr;
357 	int size;
358 };
359 
360 #define HCMD_ARR(x)	\
361 	{ .arr = x, .size = ARRAY_SIZE(x) }
362 
363 /**
364  * struct iwl_dump_sanitize_ops - dump sanitization operations
365  * @frob_txf: Scrub the TX FIFO data
366  * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
367  *	but that might be short or long (&struct iwl_cmd_header or
368  *	&struct iwl_cmd_header_wide)
369  * @frob_mem: Scrub memory data
370  */
371 struct iwl_dump_sanitize_ops {
372 	void (*frob_txf)(void *ctx, void *buf, size_t buflen);
373 	void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
374 	void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
375 };
376 
377 /**
378  * struct iwl_trans_config - transport configuration
379  *
380  * These values should be set before iwl_trans_op_mode_enter().
381  *
382  * @cmd_queue: the index of the command queue.
383  *	Must be set before start_fw.
384  * @cmd_fifo: the fifo for host commands
385  * @no_reclaim_cmds: Some devices erroneously don't set the
386  *	SEQ_RX_FRAME bit on some notifications, this is the
387  *	list of such notifications to filter. Max length is
388  *	%MAX_NO_RECLAIM_CMDS.
389  * @n_no_reclaim_cmds: # of commands in list
390  * @rx_buf_size: RX buffer size needed for A-MSDUs
391  *	if unset 4k will be the RX buffer size
392  * @scd_set_active: should the transport configure the SCD for HCMD queue
393  * @command_groups: array of command groups, each member is an array of the
394  *	commands in the group; for debugging only
395  * @command_groups_size: number of command groups, to avoid illegal access
396  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
397  *	space for at least two pointers
398  * @fw_reset_handshake: firmware supports reset flow handshake
399  * @queue_alloc_cmd_ver: queue allocation command version, set to 0
400  *	for using the older SCD_QUEUE_CFG, set to the version of
401  *	SCD_QUEUE_CONFIG_CMD otherwise.
402  * @wide_cmd_header: true when ucode supports wide command header format
403  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
404  *	starting the firmware, used for tracing
405  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
406  *	start of the 802.11 header in the @rx_mpdu_cmd
407  * @dsbr_urm_fw_dependent: switch to URM based on fw settings
408  * @dsbr_urm_permanent: switch to URM permanently
409  * @mbx_addr_0_step: step address data 0
410  * @mbx_addr_1_step: step address data 1
411  * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
412  */
413 struct iwl_trans_config {
414 	u8 cmd_queue;
415 	u8 cmd_fifo;
416 	u8 n_no_reclaim_cmds;
417 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
418 
419 	enum iwl_amsdu_size rx_buf_size;
420 	bool scd_set_active;
421 	const struct iwl_hcmd_arr *command_groups;
422 	int command_groups_size;
423 
424 	u8 cb_data_offs;
425 	bool fw_reset_handshake;
426 	u8 queue_alloc_cmd_ver;
427 
428 	bool wide_cmd_header;
429 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
430 
431 	u8 dsbr_urm_fw_dependent:1,
432 	   dsbr_urm_permanent:1,
433 	   ext_32khz_clock_valid:1;
434 
435 	u32 mbx_addr_0_step;
436 	u32 mbx_addr_1_step;
437 };
438 
439 struct iwl_trans_dump_data {
440 	u32 len;
441 	u8 data[];
442 };
443 
444 struct iwl_trans;
445 
446 struct iwl_trans_txq_scd_cfg {
447 	u8 fifo;
448 	u8 sta_id;
449 	u8 tid;
450 	bool aggregate;
451 	int frame_limit;
452 };
453 
454 /**
455  * struct iwl_trans_rxq_dma_data - RX queue DMA data
456  * @fr_bd_cb: DMA address of free BD cyclic buffer
457  * @fr_bd_wid: Initial write index of the free BD cyclic buffer
458  * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
459  * @ur_bd_cb: DMA address of used BD cyclic buffer
460  */
461 struct iwl_trans_rxq_dma_data {
462 	u64 fr_bd_cb;
463 	u32 fr_bd_wid;
464 	u64 urbd_stts_wrptr;
465 	u64 ur_bd_cb;
466 };
467 
468 /* maximal number of DRAM MAP entries supported by FW */
469 #define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
470 
471 /**
472  * struct iwl_pnvm_image - contains info about the parsed pnvm image
473  * @chunks: array of pointers to pnvm payloads and their sizes
474  * @n_chunks: the number of the pnvm payloads.
475  * @version: the version of the loaded PNVM image
476  */
477 struct iwl_pnvm_image {
478 	struct {
479 		const void *data;
480 		u32 len;
481 	} chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
482 	u32 n_chunks;
483 	u32 version;
484 };
485 
486 /**
487  * enum iwl_trans_state - state of the transport layer
488  *
489  * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
490  * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
491  * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
492  */
493 enum iwl_trans_state {
494 	IWL_TRANS_NO_FW,
495 	IWL_TRANS_FW_STARTED,
496 	IWL_TRANS_FW_ALIVE,
497 };
498 
499 /**
500  * DOC: Platform power management
501  *
502  * In system-wide power management the entire platform goes into a low
503  * power state (e.g. idle or suspend to RAM) at the same time and the
504  * device is configured as a wakeup source for the entire platform.
505  * This is usually triggered by userspace activity (e.g. the user
506  * presses the suspend button or a power management daemon decides to
507  * put the platform in low power mode).  The device's behavior in this
508  * mode is dictated by the wake-on-WLAN configuration.
509  *
510  * The terms used for the device's behavior are as follows:
511  *
512  *	- D0: the device is fully powered and the host is awake;
513  *	- D3: the device is in low power mode and only reacts to
514  *		specific events (e.g. magic-packet received or scan
515  *		results found);
516  *
517  * These terms reflect the power modes in the firmware and are not to
518  * be confused with the physical device power state.
519  */
520 
521 /**
522  * enum iwl_ini_cfg_state
523  * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
524  * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
525  * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
526  *	are corrupted. The rest of the debug TLVs will still be used
527  */
528 enum iwl_ini_cfg_state {
529 	IWL_INI_CFG_STATE_NOT_LOADED,
530 	IWL_INI_CFG_STATE_LOADED,
531 	IWL_INI_CFG_STATE_CORRUPTED,
532 };
533 
534 /* Max time to wait for nmi interrupt */
535 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
536 
537 /**
538  * struct iwl_dram_data
539  * @physical: page phy pointer
540  * @block: pointer to the allocated block/page
541  * @size: size of the block/page
542  */
543 struct iwl_dram_data {
544 	dma_addr_t physical;
545 	void *block;
546 	int size;
547 };
548 
549 /**
550  * struct iwl_dram_regions - DRAM regions container structure
551  * @drams: array of several DRAM areas that contains the pnvm and power
552  *	reduction table payloads.
553  * @n_regions: number of DRAM regions that were allocated
554  * @prph_scratch_mem_desc: points to a structure allocated in dram,
555  *	designed to show FW where all the payloads are.
556  */
557 struct iwl_dram_regions {
558 	struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
559 	struct iwl_dram_data prph_scratch_mem_desc;
560 	u8 n_regions;
561 };
562 
563 /**
564  * struct iwl_fw_mon - fw monitor per allocation id
565  * @num_frags: number of fragments
566  * @frags: an array of DRAM buffer fragments
567  */
568 struct iwl_fw_mon {
569 	u32 num_frags;
570 	struct iwl_dram_data *frags;
571 };
572 
573 /**
574  * struct iwl_self_init_dram - dram data used by self init process
575  * @fw: lmac and umac dram data
576  * @fw_cnt: total number of items in array
577  * @paging: paging dram data
578  * @paging_cnt: total number of items in array
579  */
580 struct iwl_self_init_dram {
581 	struct iwl_dram_data *fw;
582 	int fw_cnt;
583 	struct iwl_dram_data *paging;
584 	int paging_cnt;
585 };
586 
587 /**
588  * struct iwl_imr_data - imr dram data used during debug process
589  * @imr_enable: imr enable status received from fw
590  * @imr_size: imr dram size received from fw
591  * @sram_addr: sram address from debug tlv
592  * @sram_size: sram size from debug tlv
593  * @imr2sram_remainbyte: size remained after each dma transfer
594  * @imr_curr_addr: current dst address used during dma transfer
595  * @imr_base_addr: imr address received from fw
596  */
597 struct iwl_imr_data {
598 	u32 imr_enable;
599 	u32 imr_size;
600 	u32 sram_addr;
601 	u32 sram_size;
602 	u32 imr2sram_remainbyte;
603 	u64 imr_curr_addr;
604 	__le64 imr_base_addr;
605 };
606 
607 #define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES      32
608 
609 /**
610  * struct iwl_pc_data - program counter details
611  * @pc_name: cpu name
612  * @pc_address: cpu program counter
613  */
614 struct iwl_pc_data {
615 	u8  pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
616 	u32 pc_address;
617 };
618 
619 /**
620  * struct iwl_trans_debug - transport debug related data
621  *
622  * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
623  * @rec_on: true iff there is a fw debug recording currently active
624  * @dest_tlv: points to the destination TLV for debug
625  * @lmac_error_event_table: addrs of lmacs error tables
626  * @umac_error_event_table: addr of umac error table
627  * @tcm_error_event_table: address(es) of TCM error table(s)
628  * @rcm_error_event_table: address(es) of RCM error table(s)
629  * @error_event_table_tlv_status: bitmap that indicates what error table
630  *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
631  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
632  * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
633  * @fw_mon_cfg: debug buffer allocation configuration
634  * @fw_mon_ini: DRAM buffer fragments per allocation id
635  * @fw_mon: DRAM buffer for firmware monitor
636  * @hw_error: equals true if hw error interrupt was received from the FW
637  * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
638  * @unsupported_region_msk: unsupported regions out of active_regions
639  * @active_regions: active regions
640  * @debug_info_tlv_list: list of debug info TLVs
641  * @time_point: array of debug time points
642  * @periodic_trig_list: periodic triggers list
643  * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
644  * @ucode_preset: preset based on ucode
645  * @restart_required: indicates debug restart is required
646  * @last_tp_resetfw: last handling of reset during debug timepoint
647  * @imr_data: IMR debug data allocation
648  * @num_pc: number of program counter for cpu
649  * @pc_data: details of the program counter
650  * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
651  */
652 struct iwl_trans_debug {
653 	u8 n_dest_reg;
654 	bool rec_on;
655 
656 	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
657 
658 	u32 lmac_error_event_table[2];
659 	u32 umac_error_event_table;
660 	u32 tcm_error_event_table[2];
661 	u32 rcm_error_event_table[2];
662 	unsigned int error_event_table_tlv_status;
663 
664 	enum iwl_ini_cfg_state internal_ini_cfg;
665 	enum iwl_ini_cfg_state external_ini_cfg;
666 
667 	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
668 	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
669 
670 	struct iwl_dram_data fw_mon;
671 
672 	bool hw_error;
673 	enum iwl_fw_ini_buffer_location ini_dest;
674 
675 	u64 unsupported_region_msk;
676 	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
677 	struct list_head debug_info_tlv_list;
678 	struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];
679 	struct list_head periodic_trig_list;
680 
681 	u32 domains_bitmap;
682 	u32 ucode_preset;
683 	bool restart_required;
684 	u32 last_tp_resetfw;
685 	struct iwl_imr_data imr_data;
686 	u32 num_pc;
687 	struct iwl_pc_data *pc_data;
688 	bool yoyo_bin_loaded;
689 };
690 
691 struct iwl_dma_ptr {
692 	dma_addr_t dma;
693 	void *addr;
694 	size_t size;
695 };
696 
697 struct iwl_cmd_meta {
698 	/* only for SYNC commands, iff the reply skb is wanted */
699 	struct iwl_host_cmd *source;
700 	u32 flags: CMD_MODE_BITS;
701 	/* sg_offset is valid if it is non-zero */
702 	u32 sg_offset: PAGE_SHIFT;
703 	u32 tbs;
704 };
705 
706 /*
707  * The FH will write back to the first TB only, so we need to copy some data
708  * into the buffer regardless of whether it should be mapped or not.
709  * This indicates how big the first TB must be to include the scratch buffer
710  * and the assigned PN.
711  * Since PN location is 8 bytes at offset 12, it's 20 now.
712  * If we make it bigger then allocations will be bigger and copy slower, so
713  * that's probably not useful.
714  */
715 #define IWL_FIRST_TB_SIZE	20
716 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
717 
718 struct iwl_pcie_txq_entry {
719 	void *cmd;
720 	struct sk_buff *skb;
721 	/* buffer to free after command completes */
722 	const void *free_buf;
723 	struct iwl_cmd_meta meta;
724 };
725 
726 struct iwl_pcie_first_tb_buf {
727 	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
728 };
729 
730 /**
731  * struct iwl_txq - Tx Queue for DMA
732  * @tfds: transmit frame descriptors (DMA memory)
733  * @first_tb_bufs: start of command headers, including scratch buffers, for
734  *	the writeback -- this is DMA memory and an array holding one buffer
735  *	for each command on the queue
736  * @first_tb_dma: DMA address for the first_tb_bufs start
737  * @entries: transmit entries (driver state)
738  * @lock: queue lock
739  * @reclaim_lock: reclaim lock
740  * @stuck_timer: timer that fires if queue gets stuck
741  * @trans: pointer back to transport (for timer)
742  * @need_update: indicates need to update read/write index
743  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
744  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
745  * @frozen: tx stuck queue timer is frozen
746  * @frozen_expiry_remainder: remember how long until the timer fires
747  * @block: queue is blocked
748  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
749  * @write_ptr: 1-st empty entry (index) host_w
750  * @read_ptr: last used entry (index) host_r
751  * @dma_addr:  physical addr for BD's
752  * @n_window: safe queue window
753  * @id: queue id
754  * @low_mark: low watermark, resume queue if free space more than this
755  * @high_mark: high watermark, stop queue if free space less than this
756  * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
757  * @overflow_tx: need to transmit from overflow
758  *
759  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
760  * descriptors) and required locking structures.
761  *
762  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
763  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
764  * there might be HW changes in the future). For the normal TX
765  * queues, n_window, which is the size of the software queue data
766  * is also 256; however, for the command queue, n_window is only
767  * 32 since we don't need so many commands pending. Since the HW
768  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
769  * This means that we end up with the following:
770  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
771  *  SW entries:           | 0      | ... | 31          |
772  * where N is a number between 0 and 7. This means that the SW
773  * data is a window overlayed over the HW queue.
774  */
775 struct iwl_txq {
776 	void *tfds;
777 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
778 	dma_addr_t first_tb_dma;
779 	struct iwl_pcie_txq_entry *entries;
780 	/* lock for syncing changes on the queue */
781 	spinlock_t lock;
782 	/* lock to prevent concurrent reclaim */
783 	spinlock_t reclaim_lock;
784 	unsigned long frozen_expiry_remainder;
785 	struct timer_list stuck_timer;
786 	struct iwl_trans *trans;
787 	bool need_update;
788 	bool frozen;
789 	bool ampdu;
790 	int block;
791 	unsigned long wd_timeout;
792 	struct sk_buff_head overflow_q;
793 	struct iwl_dma_ptr bc_tbl;
794 
795 	int write_ptr;
796 	int read_ptr;
797 	dma_addr_t dma_addr;
798 	int n_window;
799 	u32 id;
800 	int low_mark;
801 	int high_mark;
802 
803 	bool overflow_tx;
804 };
805 
806 /**
807  * struct iwl_trans_info - transport info for outside use
808  * @name: the device name
809  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
810  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
811  * @hw_rev: the revision data of the HW
812  * @hw_rev_step: The mac step of the HW
813  * @hw_rf_id: the device RF ID
814  * @hw_cnv_id: the device CNV ID
815  * @hw_crf_id: the device CRF ID
816  * @hw_id: the ID of the device / sub-device
817  *	Bits 0:15 represent the sub-device ID
818  *	Bits 16:31 represent the device ID.
819  * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
820  *	only valid for discrete (not integrated) NICs
821  * @num_rxqs: number of RX queues allocated by the transport
822  */
823 struct iwl_trans_info {
824 	const char *name;
825 	u32 max_skb_frags;
826 	u32 hw_rev;
827 	u32 hw_rev_step;
828 	u32 hw_rf_id;
829 	u32 hw_crf_id;
830 	u32 hw_cnv_id;
831 	u32 hw_id;
832 	u8 pcie_link_speed;
833 	u8 num_rxqs;
834 };
835 
836 /**
837  * struct iwl_trans - transport common data
838  *
839  * @csme_own: true if we couldn't get ownership on the device
840  * @op_mode: pointer to the op_mode
841  * @mac_cfg: the trans-specific configuration part
842  * @cfg: pointer to the configuration
843  * @drv: pointer to iwl_drv
844  * @conf: configuration set by the opmode before enter
845  * @state: current device state
846  * @status: a bit-mask of transport status flags
847  * @dev: pointer to struct device * that represents the device
848  * @info: device information for use by other layers
849  * @pnvm_loaded: indicates PNVM was loaded
850  * @pm_support: set to true in start_hw if link pm is supported
851  * @ltr_enabled: set to true if the LTR is enabled
852  * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
853  * @reduce_power_loaded: indicates reduced power section was loaded
854  * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
855  * @dbgfs_dir: iwlwifi debugfs base dir for this device
856  * @sync_cmd_lockdep_map: lockdep map for checking sync commands
857  * @dbg: additional debug data, see &struct iwl_trans_debug
858  * @init_dram: FW initialization DMA data
859  * @reduced_cap_sku: reduced capability supported SKU
860  * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
861  * @restart: restart worker data
862  * @restart.wk: restart worker
863  * @restart.mode: reset/restart error mode information
864  * @restart.during_reset: error occurred during previous software reset
865  * @trans_specific: data for the specific transport this is allocated for/with
866  * @request_top_reset: TOP reset was requested, used by the reset
867  *	worker that should be scheduled (with appropriate reason)
868  * @do_top_reset: indication to the (PCIe) transport/context-info
869  *	to do the TOP reset
870  */
871 struct iwl_trans {
872 	bool csme_own;
873 	struct iwl_op_mode *op_mode;
874 	const struct iwl_mac_cfg *mac_cfg;
875 	const struct iwl_rf_cfg *cfg;
876 	struct iwl_drv *drv;
877 	struct iwl_trans_config conf;
878 	enum iwl_trans_state state;
879 	unsigned long status;
880 
881 	struct device *dev;
882 
883 	const struct iwl_trans_info info;
884 	bool reduced_cap_sku;
885 	bool step_urm;
886 
887 	bool pm_support;
888 	bool ltr_enabled;
889 	u8 pnvm_loaded:1;
890 	u8 fail_to_parse_pnvm_image:1;
891 	u8 reduce_power_loaded:1;
892 	u8 failed_to_load_reduce_power_image:1;
893 
894 	struct dentry *dbgfs_dir;
895 
896 #ifdef CONFIG_LOCKDEP
897 	struct lockdep_map sync_cmd_lockdep_map;
898 #endif
899 
900 	struct iwl_trans_debug dbg;
901 	struct iwl_self_init_dram init_dram;
902 
903 	struct {
904 		struct delayed_work wk;
905 		struct iwl_fw_error_dump_mode mode;
906 		bool during_reset;
907 	} restart;
908 
909 	u8 request_top_reset:1,
910 	   do_top_reset:1;
911 
912 	/* pointer to trans specific struct */
913 	/*Ensure that this pointer will always be aligned to sizeof pointer */
914 	char trans_specific[] __aligned(sizeof(void *));
915 };
916 
917 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
918 
919 void iwl_trans_op_mode_enter(struct iwl_trans *trans,
920 			     struct iwl_op_mode *op_mode);
921 
922 int iwl_trans_start_hw(struct iwl_trans *trans);
923 
924 void iwl_trans_op_mode_leave(struct iwl_trans *trans);
925 
926 void iwl_trans_fw_alive(struct iwl_trans *trans);
927 
928 int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
929 		       enum iwl_ucode_type ucode_type, bool run_in_rfkill);
930 
931 void iwl_trans_stop_device(struct iwl_trans *trans);
932 
933 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset);
934 
935 int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset);
936 
937 struct iwl_trans_dump_data *
938 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
939 		    const struct iwl_dump_sanitize_ops *sanitize_ops,
940 		    void *sanitize_ctx);
941 
942 struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans);
943 
944 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
945 
946 void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
947 			   struct iwl_device_tx_cmd *dev_cmd);
948 
949 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
950 		 struct iwl_device_tx_cmd *dev_cmd, int queue);
951 
952 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
953 		       struct sk_buff_head *skbs, bool is_flush);
954 
955 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
956 
957 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
958 			   bool configure_scd);
959 
960 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
961 			      const struct iwl_trans_txq_scd_cfg *cfg,
962 			      unsigned int queue_wdg_timeout);
963 
964 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
965 			       struct iwl_trans_rxq_dma_data *data);
966 
967 void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
968 
969 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
970 			u8 tid, int size, unsigned int wdg_timeout);
971 
972 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
973 				   int txq_id, bool shared_mode);
974 
975 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
976 					int fifo, int sta_id, int tid,
977 					int frame_limit, u16 ssn,
978 					unsigned int queue_wdg_timeout)
979 {
980 	struct iwl_trans_txq_scd_cfg cfg = {
981 		.fifo = fifo,
982 		.sta_id = sta_id,
983 		.tid = tid,
984 		.frame_limit = frame_limit,
985 		.aggregate = sta_id >= 0,
986 	};
987 
988 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
989 }
990 
991 static inline
992 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
993 			     unsigned int queue_wdg_timeout)
994 {
995 	struct iwl_trans_txq_scd_cfg cfg = {
996 		.fifo = fifo,
997 		.sta_id = -1,
998 		.tid = IWL_MAX_TID_COUNT,
999 		.frame_limit = IWL_FRAME_LIMIT,
1000 		.aggregate = false,
1001 	};
1002 
1003 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1004 }
1005 
1006 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1007 				unsigned long txqs, bool freeze);
1008 
1009 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
1010 
1011 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
1012 
1013 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
1014 
1015 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
1016 
1017 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
1018 
1019 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
1020 
1021 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
1022 
1023 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1024 		       void *buf, int dwords);
1025 
1026 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
1027 			    u32 *val);
1028 
1029 #ifdef CONFIG_IWLWIFI_DEBUGFS
1030 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
1031 #endif
1032 
1033 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)	\
1034 	({							\
1035 		if (__builtin_constant_p(bufsize))		\
1036 			BUILD_BUG_ON((bufsize) % sizeof(u32));	\
1037 		iwl_trans_read_mem(trans, addr, buf,		\
1038 				   (bufsize) / sizeof(u32));	\
1039 	})
1040 
1041 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
1042 			    u64 src_addr, u32 byte_cnt);
1043 
1044 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1045 {
1046 	u32 value;
1047 
1048 	if (iwl_trans_read_mem(trans, addr, &value, 1))
1049 		return 0xa5a5a5a5;
1050 
1051 	return value;
1052 }
1053 
1054 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1055 			const void *buf, int dwords);
1056 
1057 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1058 					u32 val)
1059 {
1060 	return iwl_trans_write_mem(trans, addr, &val, 1);
1061 }
1062 
1063 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
1064 
1065 int iwl_trans_sw_reset(struct iwl_trans *trans);
1066 
1067 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
1068 			     u32 mask, u32 value);
1069 
1070 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
1071 
1072 #define iwl_trans_grab_nic_access(trans)		\
1073 	__cond_lock(nic_access,				\
1074 		    likely(_iwl_trans_grab_nic_access(trans)))
1075 
1076 void __releases(nic_access)
1077 iwl_trans_release_nic_access(struct iwl_trans *trans);
1078 
1079 static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
1080 					    enum iwl_fw_error_type type)
1081 {
1082 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1083 		return;
1084 	/* clear this on device init, not cleared on any unbind/reprobe */
1085 	if (test_and_set_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status))
1086 		return;
1087 
1088 	trans->restart.mode.type = type;
1089 	trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;
1090 
1091 	set_bit(STATUS_RESET_PENDING, &trans->status);
1092 
1093 	/*
1094 	 * keep track of whether or not this happened while resetting,
1095 	 * by the timer the worker runs it might have finished
1096 	 */
1097 	trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
1098 					       &trans->status);
1099 	queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);
1100 }
1101 
1102 static inline void iwl_trans_fw_error(struct iwl_trans *trans,
1103 				      enum iwl_fw_error_type type)
1104 {
1105 	if (WARN_ON_ONCE(!trans->op_mode))
1106 		return;
1107 
1108 	/* prevent double restarts due to the same erroneous FW */
1109 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1110 		trans->state = IWL_TRANS_NO_FW;
1111 		iwl_op_mode_nic_error(trans->op_mode, type);
1112 		iwl_trans_schedule_reset(trans, type);
1113 	}
1114 }
1115 
1116 static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,
1117 					     enum iwl_fw_error_type type)
1118 {
1119 	if (WARN_ON_ONCE(!trans->op_mode))
1120 		return;
1121 
1122 	set_bit(STATUS_IN_SW_RESET, &trans->status);
1123 
1124 	if (WARN_ON(type == IWL_ERR_TYPE_TOP_RESET_BY_BT))
1125 		return;
1126 
1127 	if (!trans->op_mode->ops->sw_reset ||
1128 	    !trans->op_mode->ops->sw_reset(trans->op_mode, type))
1129 		clear_bit(STATUS_IN_SW_RESET, &trans->status);
1130 }
1131 
1132 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1133 {
1134 	return trans->state == IWL_TRANS_FW_ALIVE;
1135 }
1136 
1137 void iwl_trans_sync_nmi(struct iwl_trans *trans);
1138 
1139 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1140 				  u32 sw_err_bit);
1141 
1142 int iwl_trans_load_pnvm(struct iwl_trans *trans,
1143 			const struct iwl_pnvm_image *pnvm_data,
1144 			const struct iwl_ucode_capabilities *capa);
1145 
1146 void iwl_trans_set_pnvm(struct iwl_trans *trans,
1147 			const struct iwl_ucode_capabilities *capa);
1148 
1149 int iwl_trans_load_reduce_power(struct iwl_trans *trans,
1150 				const struct iwl_pnvm_image *payloads,
1151 				const struct iwl_ucode_capabilities *capa);
1152 
1153 void iwl_trans_set_reduce_power(struct iwl_trans *trans,
1154 				const struct iwl_ucode_capabilities *capa);
1155 
1156 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1157 {
1158 	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1159 		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1160 }
1161 
1162 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
1163 
1164 static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
1165 {
1166 	clear_bit(STATUS_IN_SW_RESET, &trans->status);
1167 }
1168 
1169 /*****************************************************
1170  * transport helper functions
1171  *****************************************************/
1172 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1173 				  struct device *dev,
1174 				  const struct iwl_mac_cfg *mac_cfg);
1175 void iwl_trans_free(struct iwl_trans *trans);
1176 
1177 static inline bool iwl_trans_is_hw_error_value(u32 val)
1178 {
1179 	return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
1180 }
1181 
1182 void iwl_trans_free_restart_list(void);
1183 
1184 static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
1185 {
1186 	u16 result = trans->cfg->num_rbds;
1187 
1188 	/*
1189 	 * Since AX210 family (So/Ty) the device cannot put mutliple
1190 	 * frames into the same buffer, so double the value for them.
1191 	 */
1192 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1193 		return 2 * result;
1194 	return result;
1195 }
1196 
1197 static inline void iwl_trans_suppress_cmd_error_once(struct iwl_trans *trans)
1198 {
1199 	set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status);
1200 }
1201 
1202 static inline bool iwl_trans_device_enabled(struct iwl_trans *trans)
1203 {
1204 	return test_bit(STATUS_DEVICE_ENABLED, &trans->status);
1205 }
1206 
1207 static inline bool iwl_trans_is_dead(struct iwl_trans *trans)
1208 {
1209 	return test_bit(STATUS_TRANS_DEAD, &trans->status);
1210 }
1211 
1212 /*****************************************************
1213  * PCIe handling
1214  *****************************************************/
1215 int __must_check iwl_pci_register_driver(void);
1216 void iwl_pci_unregister_driver(void);
1217 
1218 /* Note: order matters */
1219 enum iwl_reset_mode {
1220 	/* upper level modes: */
1221 	IWL_RESET_MODE_SW_RESET,
1222 	IWL_RESET_MODE_REPROBE,
1223 	/* TOP reset doesn't require PCIe remove */
1224 	IWL_RESET_MODE_TOP_RESET,
1225 	/* PCIE level modes: */
1226 	IWL_RESET_MODE_REMOVE_ONLY,
1227 	IWL_RESET_MODE_RESCAN,
1228 	IWL_RESET_MODE_FUNC_RESET,
1229 	IWL_RESET_MODE_PROD_RESET,
1230 
1231 	/* keep last - special backoff value */
1232 	IWL_RESET_MODE_BACKOFF,
1233 };
1234 
1235 void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
1236 void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);
1237 
1238 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
1239 			     struct iwl_host_cmd *cmd);
1240 
1241 /* Internal helper */
1242 static inline void iwl_trans_set_info(struct iwl_trans *trans,
1243 				      struct iwl_trans_info *info)
1244 {
1245 	struct iwl_trans_info *write;
1246 
1247 	write = (void *)(uintptr_t)&trans->info;
1248 	*write = *info;
1249 }
1250 
1251 static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
1252 {
1253 	return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
1254 }
1255 
1256 #endif /* __iwl_trans_h__ */
1257