xref: /freebsd/sys/contrib/dev/iwlwifi/iwl-trans.h (revision 6b627f88584ce13118e0a24951b503c0b1f2d5a7)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
9 
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
14 
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
17 #include "fw/img.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
24 #if defined(__FreeBSD__)
25 #include <linux/skbuff.h>
26 #include "iwl-modparams.h"
27 #endif
28 
29 /**
30  * DOC: Transport layer - what is it ?
31  *
32  * The transport layer is the layer that deals with the HW directly. It provides
33  * the PCIe access to the underlying hardwarwe. The transport layer doesn't
34  * provide any policy, algorithm or anything of this kind, but only mechanisms
35  * to make the HW do something. It is not completely stateless but close to it.
36  */
37 
38 /**
39  * DOC: Life cycle of the transport layer
40  *
41  * The transport layer has a very precise life cycle.
42  *
43  *	1) A helper function is called during the module initialization and
44  *	   registers the bus driver's ops with the transport's alloc function.
45  *	2) Bus's probe calls to the transport layer's allocation functions.
46  *	   Of course this function is bus specific.
47  *	3) This allocation functions will spawn the upper layer which will
48  *	   register mac80211.
49  *
50  *	4) At some point (i.e. mac80211's start call), the op_mode will call
51  *	   the following sequence:
52  *	   start_hw
53  *	   start_fw
54  *
55  *	5) Then when finished (or reset):
56  *	   stop_device
57  *
58  *	6) Eventually, the free function will be called.
59  */
60 
61 /* default preset 0 (start from bit 16)*/
62 #define IWL_FW_DBG_DOMAIN_POS	16
63 #define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
64 
65 #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
66 
67 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
68 #define FH_RSCSR_FRAME_INVALID		0x55550000
69 #define FH_RSCSR_FRAME_ALIGN		0x40
70 #define FH_RSCSR_RPA_EN			BIT(25)
71 #define FH_RSCSR_RADA_EN		BIT(26)
72 #define FH_RSCSR_RXQ_POS		16
73 #define FH_RSCSR_RXQ_MASK		0x3F0000
74 
75 struct iwl_rx_packet {
76 	/*
77 	 * The first 4 bytes of the RX frame header contain both the RX frame
78 	 * size and some flags.
79 	 * Bit fields:
80 	 * 31:    flag flush RB request
81 	 * 30:    flag ignore TC (terminal counter) request
82 	 * 29:    flag fast IRQ request
83 	 * 28-27: Reserved
84 	 * 26:    RADA enabled
85 	 * 25:    Offload enabled
86 	 * 24:    RPF enabled
87 	 * 23:    RSS enabled
88 	 * 22:    Checksum enabled
89 	 * 21-16: RX queue
90 	 * 15-14: Reserved
91 	 * 13-00: RX frame size
92 	 */
93 	__le32 len_n_flags;
94 	struct iwl_cmd_header hdr;
95 	u8 data[];
96 } __packed;
97 
iwl_rx_packet_len(const struct iwl_rx_packet * pkt)98 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
99 {
100 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
101 }
102 
iwl_rx_packet_payload_len(const struct iwl_rx_packet * pkt)103 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
104 {
105 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
106 }
107 
108 /**
109  * enum CMD_MODE - how to send the host commands ?
110  *
111  * @CMD_ASYNC: Return right away and don't wait for the response
112  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
113  *	the response. The caller needs to call iwl_free_resp when done.
114  * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
115  * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
116  */
117 enum CMD_MODE {
118 	CMD_ASYNC		= BIT(0),
119 	CMD_WANT_SKB		= BIT(1),
120 	CMD_SEND_IN_RFKILL	= BIT(2),
121 	CMD_BLOCK_TXQS		= BIT(3),
122 };
123 #define CMD_MODE_BITS 5
124 
125 #define DEF_CMD_PAYLOAD_SIZE 320
126 
127 /**
128  * struct iwl_device_cmd
129  *
130  * For allocation of the command and tx queues, this establishes the overall
131  * size of the largest command we send to uCode, except for commands that
132  * aren't fully copied and use other TFD space.
133  *
134  * @hdr: command header
135  * @payload: payload for the command
136  * @hdr_wide: wide command header
137  * @payload_wide: payload for the wide command
138  */
139 struct iwl_device_cmd {
140 	union {
141 		struct {
142 			struct iwl_cmd_header hdr;	/* uCode API */
143 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
144 		};
145 		struct {
146 			struct iwl_cmd_header_wide hdr_wide;
147 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
148 					sizeof(struct iwl_cmd_header_wide) +
149 					sizeof(struct iwl_cmd_header)];
150 		};
151 	};
152 } __packed;
153 
154 /**
155  * struct iwl_device_tx_cmd - buffer for TX command
156  * @hdr: the header
157  * @payload: the payload placeholder
158  *
159  * The actual structure is sized dynamically according to need.
160  */
161 struct iwl_device_tx_cmd {
162 	struct iwl_cmd_header hdr;
163 	u8 payload[];
164 } __packed;
165 
166 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
167 
168 /*
169  * number of transfer buffers (fragments) per transmit frame descriptor;
170  * this is just the driver's idea, the hardware supports 20
171  */
172 #define IWL_MAX_CMD_TBS_PER_TFD	2
173 
174 /**
175  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
176  *
177  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
178  *	ring. The transport layer doesn't map the command's buffer to DMA, but
179  *	rather copies it to a previously allocated DMA buffer. This flag tells
180  *	the transport layer not to copy the command, but to map the existing
181  *	buffer (that is passed in) instead. This saves the memcpy and allows
182  *	commands that are bigger than the fixed buffer to be submitted.
183  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
184  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
185  *	chunk internally and free it again after the command completes. This
186  *	can (currently) be used only once per command.
187  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
188  */
189 enum iwl_hcmd_dataflag {
190 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
191 	IWL_HCMD_DFL_DUP	= BIT(1),
192 };
193 
194 enum iwl_error_event_table_status {
195 	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
196 	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
197 	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
198 	IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
199 	IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
200 	IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
201 	IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
202 };
203 
204 /**
205  * struct iwl_host_cmd - Host command to the uCode
206  *
207  * @data: array of chunks that composes the data of the host command
208  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
209  * @_rx_page_order: (internally used to free response packet)
210  *  [ FreeBSD uses _page instead ]
211  * @_rx_page_addr: (internally used to free response packet)
212  * @flags: can be CMD_*
213  * @len: array of the lengths of the chunks in data
214  * @dataflags: IWL_HCMD_DFL_*
215  * @id: command id of the host command, for wide commands encoding the
216  *	version and group as well
217  */
218 struct iwl_host_cmd {
219 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
220 	struct iwl_rx_packet *resp_pkt;
221 #if defined(__linux__)
222 	unsigned long _rx_page_addr;
223 #elif defined(__FreeBSD__)
224 	struct page *_page;
225 #endif
226 	u32 _rx_page_order;
227 
228 	u32 flags;
229 	u32 id;
230 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
231 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
232 };
233 
iwl_free_resp(struct iwl_host_cmd * cmd)234 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
235 {
236 #if defined(__linux__)
237 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
238 #elif defined(__FreeBSD__)
239 	__free_pages(cmd->_page, cmd->_rx_page_order);
240 #endif
241 }
242 
243 struct iwl_rx_cmd_buffer {
244 	struct page *_page;
245 	int _offset;
246 	bool _page_stolen;
247 	u32 _rx_page_order;
248 	unsigned int truesize;
249 };
250 
rxb_addr(struct iwl_rx_cmd_buffer * r)251 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
252 {
253 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
254 }
255 
rxb_offset(struct iwl_rx_cmd_buffer * r)256 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
257 {
258 	return r->_offset;
259 }
260 
rxb_steal_page(struct iwl_rx_cmd_buffer * r)261 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
262 {
263 	r->_page_stolen = true;
264 	get_page(r->_page);
265 	return r->_page;
266 }
267 
iwl_free_rxb(struct iwl_rx_cmd_buffer * r)268 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
269 {
270 	__free_pages(r->_page, r->_rx_page_order);
271 }
272 
273 #define MAX_NO_RECLAIM_CMDS	6
274 
275 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
276 
277 /*
278  * Maximum number of HW queues the transport layer
279  * currently supports
280  */
281 #define IWL_MAX_HW_QUEUES		32
282 #define IWL_MAX_TVQM_QUEUES		512
283 
284 #define IWL_MAX_TID_COUNT	8
285 #define IWL_MGMT_TID		15
286 #define IWL_FRAME_LIMIT	64
287 #define IWL_MAX_RX_HW_QUEUES	16
288 #define IWL_9000_MAX_RX_HW_QUEUES	1
289 
290 /**
291  * enum iwl_d3_status - WoWLAN image/device status
292  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
293  * @IWL_D3_STATUS_RESET: device was reset while suspended
294  */
295 enum iwl_d3_status {
296 	IWL_D3_STATUS_ALIVE,
297 	IWL_D3_STATUS_RESET,
298 };
299 
300 /**
301  * enum iwl_trans_status: transport status flags
302  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
303  * @STATUS_DEVICE_ENABLED: APM is enabled
304  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
305  * @STATUS_INT_ENABLED: interrupts are enabled
306  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
307  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
308  * @STATUS_FW_ERROR: the fw is in error state
309  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
310  * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
311  *	e.g. for testing
312  * @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
313  *	via iwl_trans_finish_sw_reset()
314  * @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
315  *	the firmware state yet
316  * @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
317  *	attempt another reset yet
318  * @STATUS_SUSPENDED: device is suspended, don't send commands that
319  *	aren't marked accordingly
320  */
321 enum iwl_trans_status {
322 	STATUS_SYNC_HCMD_ACTIVE,
323 	STATUS_DEVICE_ENABLED,
324 	STATUS_TPOWER_PMI,
325 	STATUS_INT_ENABLED,
326 	STATUS_RFKILL_HW,
327 	STATUS_RFKILL_OPMODE,
328 	STATUS_FW_ERROR,
329 	STATUS_TRANS_DEAD,
330 	STATUS_SUPPRESS_CMD_ERROR_ONCE,
331 	STATUS_IN_SW_RESET,
332 	STATUS_RESET_PENDING,
333 	STATUS_TRANS_RESET_IN_PROGRESS,
334 	STATUS_SUSPENDED,
335 };
336 
337 static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)338 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
339 {
340 	switch (rb_size) {
341 	case IWL_AMSDU_2K:
342 		return get_order(2 * 1024);
343 	case IWL_AMSDU_4K:
344 		return get_order(4 * 1024);
345 	case IWL_AMSDU_8K:
346 		return get_order(8 * 1024);
347 	case IWL_AMSDU_12K:
348 		return get_order(16 * 1024);
349 	default:
350 		WARN_ON(1);
351 		return -1;
352 	}
353 }
354 
355 static inline int
iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)356 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
357 {
358 	switch (rb_size) {
359 	case IWL_AMSDU_2K:
360 		return 2 * 1024;
361 	case IWL_AMSDU_4K:
362 		return 4 * 1024;
363 	case IWL_AMSDU_8K:
364 		return 8 * 1024;
365 	case IWL_AMSDU_12K:
366 		return 16 * 1024;
367 	default:
368 		WARN_ON(1);
369 		return 0;
370 	}
371 }
372 
373 struct iwl_hcmd_names {
374 	u8 cmd_id;
375 	const char *const cmd_name;
376 };
377 
378 #define HCMD_NAME(x)	\
379 	{ .cmd_id = x, .cmd_name = #x }
380 
381 struct iwl_hcmd_arr {
382 	const struct iwl_hcmd_names *arr;
383 	int size;
384 };
385 
386 #define HCMD_ARR(x)	\
387 	{ .arr = x, .size = ARRAY_SIZE(x) }
388 
389 /**
390  * struct iwl_dump_sanitize_ops - dump sanitization operations
391  * @frob_txf: Scrub the TX FIFO data
392  * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
393  *	but that might be short or long (&struct iwl_cmd_header or
394  *	&struct iwl_cmd_header_wide)
395  * @frob_mem: Scrub memory data
396  */
397 struct iwl_dump_sanitize_ops {
398 	void (*frob_txf)(void *ctx, void *buf, size_t buflen);
399 	void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
400 	void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
401 };
402 
403 /**
404  * struct iwl_trans_config - transport configuration
405  *
406  * These values should be set before iwl_trans_op_mode_enter().
407  *
408  * @cmd_queue: the index of the command queue.
409  *	Must be set before start_fw.
410  * @cmd_fifo: the fifo for host commands
411  * @no_reclaim_cmds: Some devices erroneously don't set the
412  *	SEQ_RX_FRAME bit on some notifications, this is the
413  *	list of such notifications to filter. Max length is
414  *	%MAX_NO_RECLAIM_CMDS.
415  * @n_no_reclaim_cmds: # of commands in list
416  * @rx_buf_size: RX buffer size needed for A-MSDUs
417  *	if unset 4k will be the RX buffer size
418  * @scd_set_active: should the transport configure the SCD for HCMD queue
419  * @command_groups: array of command groups, each member is an array of the
420  *	commands in the group; for debugging only
421  * @command_groups_size: number of command groups, to avoid illegal access
422  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
423  *	space for at least two pointers
424  * @fw_reset_handshake: firmware supports reset flow handshake
425  * @queue_alloc_cmd_ver: queue allocation command version, set to 0
426  *	for using the older SCD_QUEUE_CFG, set to the version of
427  *	SCD_QUEUE_CONFIG_CMD otherwise.
428  * @wide_cmd_header: true when ucode supports wide command header format
429  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
430  *	starting the firmware, used for tracing
431  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
432  *	start of the 802.11 header in the @rx_mpdu_cmd
433  * @dsbr_urm_fw_dependent: switch to URM based on fw settings
434  * @dsbr_urm_permanent: switch to URM permanently
435  * @mbx_addr_0_step: step address data 0
436  * @mbx_addr_1_step: step address data 1
437  * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
438  */
439 struct iwl_trans_config {
440 	u8 cmd_queue;
441 	u8 cmd_fifo;
442 	u8 n_no_reclaim_cmds;
443 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
444 
445 	enum iwl_amsdu_size rx_buf_size;
446 	bool scd_set_active;
447 	const struct iwl_hcmd_arr *command_groups;
448 	int command_groups_size;
449 
450 	u8 cb_data_offs;
451 	bool fw_reset_handshake;
452 	u8 queue_alloc_cmd_ver;
453 
454 	bool wide_cmd_header;
455 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
456 
457 	u8 dsbr_urm_fw_dependent:1,
458 	   dsbr_urm_permanent:1,
459 	   ext_32khz_clock_valid:1;
460 
461 	u32 mbx_addr_0_step;
462 	u32 mbx_addr_1_step;
463 };
464 
465 struct iwl_trans_dump_data {
466 	u32 len;
467 	u8 data[];
468 };
469 
470 struct iwl_trans;
471 
472 struct iwl_trans_txq_scd_cfg {
473 	u8 fifo;
474 	u8 sta_id;
475 	u8 tid;
476 	bool aggregate;
477 	int frame_limit;
478 };
479 
480 /**
481  * struct iwl_trans_rxq_dma_data - RX queue DMA data
482  * @fr_bd_cb: DMA address of free BD cyclic buffer
483  * @fr_bd_wid: Initial write index of the free BD cyclic buffer
484  * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
485  * @ur_bd_cb: DMA address of used BD cyclic buffer
486  */
487 struct iwl_trans_rxq_dma_data {
488 	u64 fr_bd_cb;
489 	u32 fr_bd_wid;
490 	u64 urbd_stts_wrptr;
491 	u64 ur_bd_cb;
492 };
493 
494 /* maximal number of DRAM MAP entries supported by FW */
495 #define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
496 
497 /**
498  * struct iwl_pnvm_image - contains info about the parsed pnvm image
499  * @chunks: array of pointers to pnvm payloads and their sizes
500  * @n_chunks: the number of the pnvm payloads.
501  * @version: the version of the loaded PNVM image
502  */
503 struct iwl_pnvm_image {
504 	struct {
505 		const void *data;
506 		u32 len;
507 	} chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
508 	u32 n_chunks;
509 	u32 version;
510 };
511 
512 /**
513  * enum iwl_trans_state - state of the transport layer
514  *
515  * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
516  * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
517  * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
518  */
519 enum iwl_trans_state {
520 	IWL_TRANS_NO_FW,
521 	IWL_TRANS_FW_STARTED,
522 	IWL_TRANS_FW_ALIVE,
523 };
524 
525 /**
526  * DOC: Platform power management
527  *
528  * In system-wide power management the entire platform goes into a low
529  * power state (e.g. idle or suspend to RAM) at the same time and the
530  * device is configured as a wakeup source for the entire platform.
531  * This is usually triggered by userspace activity (e.g. the user
532  * presses the suspend button or a power management daemon decides to
533  * put the platform in low power mode).  The device's behavior in this
534  * mode is dictated by the wake-on-WLAN configuration.
535  *
536  * The terms used for the device's behavior are as follows:
537  *
538  *	- D0: the device is fully powered and the host is awake;
539  *	- D3: the device is in low power mode and only reacts to
540  *		specific events (e.g. magic-packet received or scan
541  *		results found);
542  *
543  * These terms reflect the power modes in the firmware and are not to
544  * be confused with the physical device power state.
545  */
546 
547 /**
548  * enum iwl_ini_cfg_state
549  * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
550  * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
551  * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
552  *	are corrupted. The rest of the debug TLVs will still be used
553  */
554 enum iwl_ini_cfg_state {
555 	IWL_INI_CFG_STATE_NOT_LOADED,
556 	IWL_INI_CFG_STATE_LOADED,
557 	IWL_INI_CFG_STATE_CORRUPTED,
558 };
559 
560 /* Max time to wait for nmi interrupt */
561 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
562 
563 /**
564  * struct iwl_dram_data
565  * @physical: page phy pointer
566  * @block: pointer to the allocated block/page
567  * @size: size of the block/page
568  */
569 struct iwl_dram_data {
570 	dma_addr_t physical;
571 	void *block;
572 	int size;
573 };
574 
575 /**
576  * struct iwl_dram_regions - DRAM regions container structure
577  * @drams: array of several DRAM areas that contains the pnvm and power
578  *	reduction table payloads.
579  * @n_regions: number of DRAM regions that were allocated
580  * @prph_scratch_mem_desc: points to a structure allocated in dram,
581  *	designed to show FW where all the payloads are.
582  */
583 struct iwl_dram_regions {
584 	struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
585 	struct iwl_dram_data prph_scratch_mem_desc;
586 	u8 n_regions;
587 };
588 
589 /**
590  * struct iwl_fw_mon - fw monitor per allocation id
591  * @num_frags: number of fragments
592  * @frags: an array of DRAM buffer fragments
593  */
594 struct iwl_fw_mon {
595 	u32 num_frags;
596 	struct iwl_dram_data *frags;
597 };
598 
599 /**
600  * struct iwl_self_init_dram - dram data used by self init process
601  * @fw: lmac and umac dram data
602  * @fw_cnt: total number of items in array
603  * @paging: paging dram data
604  * @paging_cnt: total number of items in array
605  */
606 struct iwl_self_init_dram {
607 	struct iwl_dram_data *fw;
608 	int fw_cnt;
609 	struct iwl_dram_data *paging;
610 	int paging_cnt;
611 };
612 
613 /**
614  * struct iwl_imr_data - imr dram data used during debug process
615  * @imr_enable: imr enable status received from fw
616  * @imr_size: imr dram size received from fw
617  * @sram_addr: sram address from debug tlv
618  * @sram_size: sram size from debug tlv
619  * @imr2sram_remainbyte: size remained after each dma transfer
620  * @imr_curr_addr: current dst address used during dma transfer
621  * @imr_base_addr: imr address received from fw
622  */
623 struct iwl_imr_data {
624 	u32 imr_enable;
625 	u32 imr_size;
626 	u32 sram_addr;
627 	u32 sram_size;
628 	u32 imr2sram_remainbyte;
629 	u64 imr_curr_addr;
630 	__le64 imr_base_addr;
631 };
632 
633 #define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES      32
634 
635 /**
636  * struct iwl_pc_data - program counter details
637  * @pc_name: cpu name
638  * @pc_address: cpu program counter
639  */
640 struct iwl_pc_data {
641 	u8  pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
642 	u32 pc_address;
643 };
644 
645 /**
646  * struct iwl_trans_debug - transport debug related data
647  *
648  * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
649  * @rec_on: true iff there is a fw debug recording currently active
650  * @dest_tlv: points to the destination TLV for debug
651  * @lmac_error_event_table: addrs of lmacs error tables
652  * @umac_error_event_table: addr of umac error table
653  * @tcm_error_event_table: address(es) of TCM error table(s)
654  * @rcm_error_event_table: address(es) of RCM error table(s)
655  * @error_event_table_tlv_status: bitmap that indicates what error table
656  *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
657  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
658  * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
659  * @fw_mon_cfg: debug buffer allocation configuration
660  * @fw_mon_ini: DRAM buffer fragments per allocation id
661  * @fw_mon: DRAM buffer for firmware monitor
662  * @hw_error: equals true if hw error interrupt was received from the FW
663  * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
664  * @unsupported_region_msk: unsupported regions out of active_regions
665  * @active_regions: active regions
666  * @debug_info_tlv_list: list of debug info TLVs
667  * @time_point: array of debug time points
668  * @periodic_trig_list: periodic triggers list
669  * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
670  * @ucode_preset: preset based on ucode
671  * @restart_required: indicates debug restart is required
672  * @last_tp_resetfw: last handling of reset during debug timepoint
673  * @imr_data: IMR debug data allocation
674  * @dump_file_name_ext: dump file name extension
675  * @dump_file_name_ext_valid: dump file name extension if valid or not
676  * @num_pc: number of program counter for cpu
677  * @pc_data: details of the program counter
678  * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
679  */
680 struct iwl_trans_debug {
681 	u8 n_dest_reg;
682 	bool rec_on;
683 
684 	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
685 
686 	u32 lmac_error_event_table[2];
687 	u32 umac_error_event_table;
688 	u32 tcm_error_event_table[2];
689 	u32 rcm_error_event_table[2];
690 	unsigned int error_event_table_tlv_status;
691 
692 	enum iwl_ini_cfg_state internal_ini_cfg;
693 	enum iwl_ini_cfg_state external_ini_cfg;
694 
695 	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
696 	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
697 
698 	struct iwl_dram_data fw_mon;
699 
700 	bool hw_error;
701 	enum iwl_fw_ini_buffer_location ini_dest;
702 
703 	u64 unsupported_region_msk;
704 	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
705 	struct list_head debug_info_tlv_list;
706 	struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];
707 	struct list_head periodic_trig_list;
708 
709 	u32 domains_bitmap;
710 	u32 ucode_preset;
711 	bool restart_required;
712 	u32 last_tp_resetfw;
713 	struct iwl_imr_data imr_data;
714 	u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
715 	bool dump_file_name_ext_valid;
716 	u32 num_pc;
717 	struct iwl_pc_data *pc_data;
718 	bool yoyo_bin_loaded;
719 };
720 
721 struct iwl_dma_ptr {
722 	dma_addr_t dma;
723 	void *addr;
724 	size_t size;
725 };
726 
727 struct iwl_cmd_meta {
728 	/* only for SYNC commands, iff the reply skb is wanted */
729 	struct iwl_host_cmd *source;
730 	u32 flags: CMD_MODE_BITS;
731 	/* sg_offset is valid if it is non-zero */
732 	u32 sg_offset: PAGE_SHIFT;
733 	u32 tbs;
734 };
735 
736 /*
737  * The FH will write back to the first TB only, so we need to copy some data
738  * into the buffer regardless of whether it should be mapped or not.
739  * This indicates how big the first TB must be to include the scratch buffer
740  * and the assigned PN.
741  * Since PN location is 8 bytes at offset 12, it's 20 now.
742  * If we make it bigger then allocations will be bigger and copy slower, so
743  * that's probably not useful.
744  */
745 #define IWL_FIRST_TB_SIZE	20
746 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
747 
748 struct iwl_pcie_txq_entry {
749 	void *cmd;
750 	struct sk_buff *skb;
751 	/* buffer to free after command completes */
752 	const void *free_buf;
753 	struct iwl_cmd_meta meta;
754 };
755 
756 struct iwl_pcie_first_tb_buf {
757 	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
758 };
759 
760 /**
761  * struct iwl_txq - Tx Queue for DMA
762  * @tfds: transmit frame descriptors (DMA memory)
763  * @first_tb_bufs: start of command headers, including scratch buffers, for
764  *	the writeback -- this is DMA memory and an array holding one buffer
765  *	for each command on the queue
766  * @first_tb_dma: DMA address for the first_tb_bufs start
767  * @entries: transmit entries (driver state)
768  * @lock: queue lock
769  * @reclaim_lock: reclaim lock
770  * @stuck_timer: timer that fires if queue gets stuck
771  * @trans: pointer back to transport (for timer)
772  * @need_update: indicates need to update read/write index
773  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
774  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
775  * @frozen: tx stuck queue timer is frozen
776  * @frozen_expiry_remainder: remember how long until the timer fires
777  * @block: queue is blocked
778  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
779  * @write_ptr: 1-st empty entry (index) host_w
780  * @read_ptr: last used entry (index) host_r
781  * @dma_addr:  physical addr for BD's
782  * @n_window: safe queue window
783  * @id: queue id
784  * @low_mark: low watermark, resume queue if free space more than this
785  * @high_mark: high watermark, stop queue if free space less than this
786  * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
787  * @overflow_tx: need to transmit from overflow
788  *
789  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
790  * descriptors) and required locking structures.
791  *
792  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
793  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
794  * there might be HW changes in the future). For the normal TX
795  * queues, n_window, which is the size of the software queue data
796  * is also 256; however, for the command queue, n_window is only
797  * 32 since we don't need so many commands pending. Since the HW
798  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
799  * This means that we end up with the following:
800  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
801  *  SW entries:           | 0      | ... | 31          |
802  * where N is a number between 0 and 7. This means that the SW
803  * data is a window overlayed over the HW queue.
804  */
805 struct iwl_txq {
806 	void *tfds;
807 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
808 	dma_addr_t first_tb_dma;
809 	struct iwl_pcie_txq_entry *entries;
810 	/* lock for syncing changes on the queue */
811 	spinlock_t lock;
812 	/* lock to prevent concurrent reclaim */
813 	spinlock_t reclaim_lock;
814 	unsigned long frozen_expiry_remainder;
815 	struct timer_list stuck_timer;
816 	struct iwl_trans *trans;
817 	bool need_update;
818 	bool frozen;
819 	bool ampdu;
820 	int block;
821 	unsigned long wd_timeout;
822 	struct sk_buff_head overflow_q;
823 	struct iwl_dma_ptr bc_tbl;
824 
825 	int write_ptr;
826 	int read_ptr;
827 	dma_addr_t dma_addr;
828 	int n_window;
829 	u32 id;
830 	int low_mark;
831 	int high_mark;
832 
833 	bool overflow_tx;
834 };
835 
836 /**
837  * struct iwl_trans_info - transport info for outside use
838  * @name: the device name
839  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
840  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
841  * @hw_rev: the revision data of the HW
842  * @hw_rev_step: The mac step of the HW
843  * @hw_rf_id: the device RF ID
844  * @hw_cnv_id: the device CNV ID
845  * @hw_crf_id: the device CRF ID
846  * @hw_wfpm_id: the device wfpm ID
847  * @hw_id: the ID of the device / sub-device
848  *	Bits 0:15 represent the sub-device ID
849  *	Bits 16:31 represent the device ID.
850  * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
851  *	only valid for discrete (not integrated) NICs
852  * @num_rxqs: number of RX queues allocated by the transport
853  */
854 struct iwl_trans_info {
855 	const char *name;
856 	u32 max_skb_frags;
857 	u32 hw_rev;
858 	u32 hw_rev_step;
859 	u32 hw_rf_id;
860 	u32 hw_crf_id;
861 	u32 hw_cnv_id;
862 	u32 hw_wfpm_id;
863 	u32 hw_id;
864 	u8 pcie_link_speed;
865 	u8 num_rxqs;
866 };
867 
868 /**
869  * struct iwl_trans - transport common data
870  *
871  * @csme_own: true if we couldn't get ownership on the device
872  * @op_mode: pointer to the op_mode
873  * @mac_cfg: the trans-specific configuration part
874  * @cfg: pointer to the configuration
875  * @drv: pointer to iwl_drv
876  * @conf: configuration set by the opmode before enter
877  * @state: current device state
878  * @status: a bit-mask of transport status flags
879  * @dev: pointer to struct device * that represents the device
880  * @info: device information for use by other layers
881  * @pnvm_loaded: indicates PNVM was loaded
882  * @pm_support: set to true in start_hw if link pm is supported
883  * @ltr_enabled: set to true if the LTR is enabled
884  * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
885  * @reduce_power_loaded: indicates reduced power section was loaded
886  * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
887  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
888  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
889  * @dev_cmd_pool_name: name for the TX command allocation pool
890  * @dbgfs_dir: iwlwifi debugfs base dir for this device
891  * @sync_cmd_lockdep_map: lockdep map for checking sync commands
892  * @dbg: additional debug data, see &struct iwl_trans_debug
893  * @init_dram: FW initialization DMA data
894  * @reduced_cap_sku: reduced capability supported SKU
895  * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
896  * @restart: restart worker data
897  * @restart.wk: restart worker
898  * @restart.mode: reset/restart error mode information
899  * @restart.during_reset: error occurred during previous software reset
900  * @trans_specific: data for the specific transport this is allocated for/with
901  * @request_top_reset: TOP reset was requested, used by the reset
902  *	worker that should be scheduled (with appropriate reason)
903  * @do_top_reset: indication to the (PCIe) transport/context-info
904  *	to do the TOP reset
905  */
906 struct iwl_trans {
907 	bool csme_own;
908 	struct iwl_op_mode *op_mode;
909 	const struct iwl_mac_cfg *mac_cfg;
910 	const struct iwl_rf_cfg *cfg;
911 	struct iwl_drv *drv;
912 	struct iwl_trans_config conf;
913 	enum iwl_trans_state state;
914 	unsigned long status;
915 
916 	struct device *dev;
917 
918 	const struct iwl_trans_info info;
919 	bool reduced_cap_sku;
920 	bool step_urm;
921 
922 	bool pm_support;
923 	bool ltr_enabled;
924 	u8 pnvm_loaded:1;
925 	u8 fail_to_parse_pnvm_image:1;
926 	u8 reduce_power_loaded:1;
927 	u8 failed_to_load_reduce_power_image:1;
928 
929 	/* The following fields are internal only */
930 	struct kmem_cache *dev_cmd_pool;
931 	char dev_cmd_pool_name[50];
932 
933 	struct dentry *dbgfs_dir;
934 
935 #ifdef CONFIG_LOCKDEP
936 	struct lockdep_map sync_cmd_lockdep_map;
937 #endif
938 
939 	struct iwl_trans_debug dbg;
940 	struct iwl_self_init_dram init_dram;
941 
942 	struct {
943 		struct delayed_work wk;
944 		struct iwl_fw_error_dump_mode mode;
945 		bool during_reset;
946 	} restart;
947 
948 	u8 request_top_reset:1,
949 	   do_top_reset:1;
950 
951 	/* pointer to trans specific struct */
952 	/*Ensure that this pointer will always be aligned to sizeof pointer */
953 	char trans_specific[] __aligned(sizeof(void *));
954 };
955 
956 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
957 
958 void iwl_trans_op_mode_enter(struct iwl_trans *trans,
959 			     struct iwl_op_mode *op_mode);
960 
961 int iwl_trans_start_hw(struct iwl_trans *trans);
962 
963 void iwl_trans_op_mode_leave(struct iwl_trans *trans);
964 
965 void iwl_trans_fw_alive(struct iwl_trans *trans);
966 
967 int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
968 		       enum iwl_ucode_type ucode_type, bool run_in_rfkill);
969 
970 void iwl_trans_stop_device(struct iwl_trans *trans);
971 
972 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
973 
974 int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
975 			bool test, bool reset);
976 
977 struct iwl_trans_dump_data *
978 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
979 		    const struct iwl_dump_sanitize_ops *sanitize_ops,
980 		    void *sanitize_ctx);
981 
982 static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans * trans)983 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
984 {
985 	return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
986 }
987 
988 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
989 
iwl_trans_free_tx_cmd(struct iwl_trans * trans,struct iwl_device_tx_cmd * dev_cmd)990 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
991 					 struct iwl_device_tx_cmd *dev_cmd)
992 {
993 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
994 }
995 
996 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
997 		 struct iwl_device_tx_cmd *dev_cmd, int queue);
998 
999 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
1000 		       struct sk_buff_head *skbs, bool is_flush);
1001 
1002 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
1003 
1004 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1005 			   bool configure_scd);
1006 
1007 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1008 			      const struct iwl_trans_txq_scd_cfg *cfg,
1009 			      unsigned int queue_wdg_timeout);
1010 
1011 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1012 			       struct iwl_trans_rxq_dma_data *data);
1013 
1014 void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
1015 
1016 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1017 			u8 tid, int size, unsigned int wdg_timeout);
1018 
1019 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1020 				   int txq_id, bool shared_mode);
1021 
iwl_trans_txq_enable(struct iwl_trans * trans,int queue,int fifo,int sta_id,int tid,int frame_limit,u16 ssn,unsigned int queue_wdg_timeout)1022 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1023 					int fifo, int sta_id, int tid,
1024 					int frame_limit, u16 ssn,
1025 					unsigned int queue_wdg_timeout)
1026 {
1027 	struct iwl_trans_txq_scd_cfg cfg = {
1028 		.fifo = fifo,
1029 		.sta_id = sta_id,
1030 		.tid = tid,
1031 		.frame_limit = frame_limit,
1032 		.aggregate = sta_id >= 0,
1033 	};
1034 
1035 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1036 }
1037 
1038 static inline
iwl_trans_ac_txq_enable(struct iwl_trans * trans,int queue,int fifo,unsigned int queue_wdg_timeout)1039 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1040 			     unsigned int queue_wdg_timeout)
1041 {
1042 	struct iwl_trans_txq_scd_cfg cfg = {
1043 		.fifo = fifo,
1044 		.sta_id = -1,
1045 		.tid = IWL_MAX_TID_COUNT,
1046 		.frame_limit = IWL_FRAME_LIMIT,
1047 		.aggregate = false,
1048 	};
1049 
1050 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1051 }
1052 
1053 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1054 				unsigned long txqs, bool freeze);
1055 
1056 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
1057 
1058 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
1059 
1060 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
1061 
1062 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
1063 
1064 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
1065 
1066 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
1067 
1068 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
1069 
1070 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1071 		       void *buf, int dwords);
1072 
1073 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
1074 			    u32 *val);
1075 
1076 #ifdef CONFIG_IWLWIFI_DEBUGFS
1077 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
1078 #endif
1079 
1080 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)	\
1081 	({							\
1082 		if (__builtin_constant_p(bufsize))		\
1083 			BUILD_BUG_ON((bufsize) % sizeof(u32));	\
1084 		iwl_trans_read_mem(trans, addr, buf,		\
1085 				   (bufsize) / sizeof(u32));	\
1086 	})
1087 
1088 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
1089 			    u64 src_addr, u32 byte_cnt);
1090 
iwl_trans_read_mem32(struct iwl_trans * trans,u32 addr)1091 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1092 {
1093 	u32 value;
1094 
1095 	if (iwl_trans_read_mem(trans, addr, &value, 1))
1096 		return 0xa5a5a5a5;
1097 
1098 	return value;
1099 }
1100 
1101 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1102 			const void *buf, int dwords);
1103 
iwl_trans_write_mem32(struct iwl_trans * trans,u32 addr,u32 val)1104 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1105 					u32 val)
1106 {
1107 	return iwl_trans_write_mem(trans, addr, &val, 1);
1108 }
1109 
1110 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
1111 
1112 int iwl_trans_sw_reset(struct iwl_trans *trans);
1113 
1114 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
1115 			     u32 mask, u32 value);
1116 
1117 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
1118 
1119 #define iwl_trans_grab_nic_access(trans)		\
1120 	__cond_lock(nic_access,				\
1121 		    likely(_iwl_trans_grab_nic_access(trans)))
1122 
1123 void __releases(nic_access)
1124 iwl_trans_release_nic_access(struct iwl_trans *trans);
1125 
iwl_trans_schedule_reset(struct iwl_trans * trans,enum iwl_fw_error_type type)1126 static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
1127 					    enum iwl_fw_error_type type)
1128 {
1129 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1130 		return;
1131 	/* clear this on device init, not cleared on any unbind/reprobe */
1132 	if (test_and_set_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status))
1133 		return;
1134 
1135 	trans->restart.mode.type = type;
1136 	trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;
1137 
1138 	set_bit(STATUS_RESET_PENDING, &trans->status);
1139 
1140 	/*
1141 	 * keep track of whether or not this happened while resetting,
1142 	 * by the timer the worker runs it might have finished
1143 	 */
1144 	trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
1145 					       &trans->status);
1146 	queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);
1147 }
1148 
iwl_trans_fw_error(struct iwl_trans * trans,enum iwl_fw_error_type type)1149 static inline void iwl_trans_fw_error(struct iwl_trans *trans,
1150 				      enum iwl_fw_error_type type)
1151 {
1152 	if (WARN_ON_ONCE(!trans->op_mode))
1153 		return;
1154 
1155 	/* prevent double restarts due to the same erroneous FW */
1156 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1157 		trans->state = IWL_TRANS_NO_FW;
1158 		iwl_op_mode_nic_error(trans->op_mode, type);
1159 		iwl_trans_schedule_reset(trans, type);
1160 	}
1161 }
1162 
iwl_trans_opmode_sw_reset(struct iwl_trans * trans,enum iwl_fw_error_type type)1163 static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,
1164 					     enum iwl_fw_error_type type)
1165 {
1166 	if (WARN_ON_ONCE(!trans->op_mode))
1167 		return;
1168 
1169 	set_bit(STATUS_IN_SW_RESET, &trans->status);
1170 
1171 	if (WARN_ON(type == IWL_ERR_TYPE_TOP_RESET_BY_BT))
1172 		return;
1173 
1174 	if (!trans->op_mode->ops->sw_reset ||
1175 	    !trans->op_mode->ops->sw_reset(trans->op_mode, type))
1176 		clear_bit(STATUS_IN_SW_RESET, &trans->status);
1177 }
1178 
iwl_trans_fw_running(struct iwl_trans * trans)1179 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1180 {
1181 	return trans->state == IWL_TRANS_FW_ALIVE;
1182 }
1183 
1184 void iwl_trans_sync_nmi(struct iwl_trans *trans);
1185 
1186 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1187 				  u32 sw_err_bit);
1188 
1189 int iwl_trans_load_pnvm(struct iwl_trans *trans,
1190 			const struct iwl_pnvm_image *pnvm_data,
1191 			const struct iwl_ucode_capabilities *capa);
1192 
1193 void iwl_trans_set_pnvm(struct iwl_trans *trans,
1194 			const struct iwl_ucode_capabilities *capa);
1195 
1196 int iwl_trans_load_reduce_power(struct iwl_trans *trans,
1197 				const struct iwl_pnvm_image *payloads,
1198 				const struct iwl_ucode_capabilities *capa);
1199 
1200 void iwl_trans_set_reduce_power(struct iwl_trans *trans,
1201 				const struct iwl_ucode_capabilities *capa);
1202 
iwl_trans_dbg_ini_valid(struct iwl_trans * trans)1203 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1204 {
1205 	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1206 		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1207 }
1208 
1209 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
1210 
iwl_trans_finish_sw_reset(struct iwl_trans * trans)1211 static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
1212 {
1213 	clear_bit(STATUS_IN_SW_RESET, &trans->status);
1214 }
1215 
1216 /*****************************************************
1217  * transport helper functions
1218  *****************************************************/
1219 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1220 				  struct device *dev,
1221 				  const struct iwl_mac_cfg *mac_cfg,
1222 				  unsigned int txcmd_size,
1223 				  unsigned int txcmd_align);
1224 void iwl_trans_free(struct iwl_trans *trans);
1225 
iwl_trans_is_hw_error_value(u32 val)1226 static inline bool iwl_trans_is_hw_error_value(u32 val)
1227 {
1228 	return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
1229 }
1230 
1231 void iwl_trans_free_restart_list(void);
1232 
iwl_trans_get_num_rbds(struct iwl_trans * trans)1233 static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
1234 {
1235 	u16 result = trans->cfg->num_rbds;
1236 
1237 	/*
1238 	 * Since AX210 family (So/Ty) the device cannot put mutliple
1239 	 * frames into the same buffer, so double the value for them.
1240 	 */
1241 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1242 		return 2 * result;
1243 	return result;
1244 }
1245 
iwl_trans_suppress_cmd_error_once(struct iwl_trans * trans)1246 static inline void iwl_trans_suppress_cmd_error_once(struct iwl_trans *trans)
1247 {
1248 	set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status);
1249 }
1250 
iwl_trans_device_enabled(struct iwl_trans * trans)1251 static inline bool iwl_trans_device_enabled(struct iwl_trans *trans)
1252 {
1253 	return test_bit(STATUS_DEVICE_ENABLED, &trans->status);
1254 }
1255 
iwl_trans_is_dead(struct iwl_trans * trans)1256 static inline bool iwl_trans_is_dead(struct iwl_trans *trans)
1257 {
1258 	return test_bit(STATUS_TRANS_DEAD, &trans->status);
1259 }
1260 
1261 /*****************************************************
1262  * PCIe handling
1263  *****************************************************/
1264 int __must_check iwl_pci_register_driver(void);
1265 void iwl_pci_unregister_driver(void);
1266 
1267 /* Note: order matters */
1268 enum iwl_reset_mode {
1269 	/* upper level modes: */
1270 	IWL_RESET_MODE_SW_RESET,
1271 	IWL_RESET_MODE_REPROBE,
1272 	/* TOP reset doesn't require PCIe remove */
1273 	IWL_RESET_MODE_TOP_RESET,
1274 	/* PCIE level modes: */
1275 	IWL_RESET_MODE_REMOVE_ONLY,
1276 	IWL_RESET_MODE_RESCAN,
1277 	IWL_RESET_MODE_FUNC_RESET,
1278 	IWL_RESET_MODE_PROD_RESET,
1279 
1280 	/* keep last - special backoff value */
1281 	IWL_RESET_MODE_BACKOFF,
1282 };
1283 
1284 void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
1285 void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);
1286 
1287 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
1288 			     struct iwl_host_cmd *cmd);
1289 
1290 /* Internal helper */
iwl_trans_set_info(struct iwl_trans * trans,struct iwl_trans_info * info)1291 static inline void iwl_trans_set_info(struct iwl_trans *trans,
1292 				      struct iwl_trans_info *info)
1293 {
1294 	struct iwl_trans_info *write;
1295 
1296 	write = (void *)(uintptr_t)&trans->info;
1297 	*write = *info;
1298 }
1299 
iwl_trans_get_device_id(struct iwl_trans * trans)1300 static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
1301 {
1302 	return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
1303 }
1304 
1305 #endif /* __iwl_trans_h__ */
1306