xref: /freebsd/sys/contrib/dev/iwlwifi/iwl-trans.h (revision a4128aad8503277614f2d214011ef60a19447b83)
1bfcc09ddSBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2bfcc09ddSBjoern A. Zeeb /*
3*a4128aadSBjoern A. Zeeb  * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
4bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6bfcc09ddSBjoern A. Zeeb  */
7bfcc09ddSBjoern A. Zeeb #ifndef __iwl_trans_h__
8bfcc09ddSBjoern A. Zeeb #define __iwl_trans_h__
9bfcc09ddSBjoern A. Zeeb 
10bfcc09ddSBjoern A. Zeeb #include <linux/ieee80211.h>
11bfcc09ddSBjoern A. Zeeb #include <linux/mm.h> /* for page_address */
12bfcc09ddSBjoern A. Zeeb #include <linux/lockdep.h>
13bfcc09ddSBjoern A. Zeeb #include <linux/kernel.h>
14bfcc09ddSBjoern A. Zeeb 
15bfcc09ddSBjoern A. Zeeb #include "iwl-debug.h"
16bfcc09ddSBjoern A. Zeeb #include "iwl-config.h"
17bfcc09ddSBjoern A. Zeeb #include "fw/img.h"
18bfcc09ddSBjoern A. Zeeb #include "iwl-op-mode.h"
19bfcc09ddSBjoern A. Zeeb #include <linux/firmware.h>
20bfcc09ddSBjoern A. Zeeb #include "fw/api/cmdhdr.h"
21bfcc09ddSBjoern A. Zeeb #include "fw/api/txq.h"
22bfcc09ddSBjoern A. Zeeb #include "fw/api/dbg-tlv.h"
23bfcc09ddSBjoern A. Zeeb #include "iwl-dbg-tlv.h"
24bfcc09ddSBjoern A. Zeeb #if defined(__FreeBSD__)
25bfcc09ddSBjoern A. Zeeb #include <linux/skbuff.h>
26bfcc09ddSBjoern A. Zeeb #include "iwl-modparams.h"
27bfcc09ddSBjoern A. Zeeb #endif
28bfcc09ddSBjoern A. Zeeb 
29bfcc09ddSBjoern A. Zeeb /**
30bfcc09ddSBjoern A. Zeeb  * DOC: Transport layer - what is it ?
31bfcc09ddSBjoern A. Zeeb  *
32bfcc09ddSBjoern A. Zeeb  * The transport layer is the layer that deals with the HW directly. It provides
33*a4128aadSBjoern A. Zeeb  * the PCIe access to the underlying hardwarwe. The transport layer doesn't
34*a4128aadSBjoern A. Zeeb  * provide any policy, algorithm or anything of this kind, but only mechanisms
35*a4128aadSBjoern A. Zeeb  * to make the HW do something. It is not completely stateless but close to it.
36bfcc09ddSBjoern A. Zeeb  */
37bfcc09ddSBjoern A. Zeeb 
38bfcc09ddSBjoern A. Zeeb /**
39bfcc09ddSBjoern A. Zeeb  * DOC: Life cycle of the transport layer
40bfcc09ddSBjoern A. Zeeb  *
41bfcc09ddSBjoern A. Zeeb  * The transport layer has a very precise life cycle.
42bfcc09ddSBjoern A. Zeeb  *
43bfcc09ddSBjoern A. Zeeb  *	1) A helper function is called during the module initialization and
44bfcc09ddSBjoern A. Zeeb  *	   registers the bus driver's ops with the transport's alloc function.
45bfcc09ddSBjoern A. Zeeb  *	2) Bus's probe calls to the transport layer's allocation functions.
46bfcc09ddSBjoern A. Zeeb  *	   Of course this function is bus specific.
47bfcc09ddSBjoern A. Zeeb  *	3) This allocation functions will spawn the upper layer which will
48bfcc09ddSBjoern A. Zeeb  *	   register mac80211.
49bfcc09ddSBjoern A. Zeeb  *
50bfcc09ddSBjoern A. Zeeb  *	4) At some point (i.e. mac80211's start call), the op_mode will call
51bfcc09ddSBjoern A. Zeeb  *	   the following sequence:
52bfcc09ddSBjoern A. Zeeb  *	   start_hw
53bfcc09ddSBjoern A. Zeeb  *	   start_fw
54bfcc09ddSBjoern A. Zeeb  *
55bfcc09ddSBjoern A. Zeeb  *	5) Then when finished (or reset):
56bfcc09ddSBjoern A. Zeeb  *	   stop_device
57bfcc09ddSBjoern A. Zeeb  *
58bfcc09ddSBjoern A. Zeeb  *	6) Eventually, the free function will be called.
59bfcc09ddSBjoern A. Zeeb  */
60bfcc09ddSBjoern A. Zeeb 
61*a4128aadSBjoern A. Zeeb /* default preset 0 (start from bit 16)*/
62*a4128aadSBjoern A. Zeeb #define IWL_FW_DBG_DOMAIN_POS	16
63*a4128aadSBjoern A. Zeeb #define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
64*a4128aadSBjoern A. Zeeb 
65bfcc09ddSBjoern A. Zeeb #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
66bfcc09ddSBjoern A. Zeeb 
67bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
68bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_FRAME_INVALID		0x55550000
69bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_FRAME_ALIGN		0x40
70bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_RPA_EN			BIT(25)
71bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_RADA_EN		BIT(26)
72bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_RXQ_POS		16
73bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_RXQ_MASK		0x3F0000
74bfcc09ddSBjoern A. Zeeb 
75bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet {
76bfcc09ddSBjoern A. Zeeb 	/*
77bfcc09ddSBjoern A. Zeeb 	 * The first 4 bytes of the RX frame header contain both the RX frame
78bfcc09ddSBjoern A. Zeeb 	 * size and some flags.
79bfcc09ddSBjoern A. Zeeb 	 * Bit fields:
80bfcc09ddSBjoern A. Zeeb 	 * 31:    flag flush RB request
81bfcc09ddSBjoern A. Zeeb 	 * 30:    flag ignore TC (terminal counter) request
82bfcc09ddSBjoern A. Zeeb 	 * 29:    flag fast IRQ request
83bfcc09ddSBjoern A. Zeeb 	 * 28-27: Reserved
84bfcc09ddSBjoern A. Zeeb 	 * 26:    RADA enabled
85bfcc09ddSBjoern A. Zeeb 	 * 25:    Offload enabled
86bfcc09ddSBjoern A. Zeeb 	 * 24:    RPF enabled
87bfcc09ddSBjoern A. Zeeb 	 * 23:    RSS enabled
88bfcc09ddSBjoern A. Zeeb 	 * 22:    Checksum enabled
89bfcc09ddSBjoern A. Zeeb 	 * 21-16: RX queue
90bfcc09ddSBjoern A. Zeeb 	 * 15-14: Reserved
91bfcc09ddSBjoern A. Zeeb 	 * 13-00: RX frame size
92bfcc09ddSBjoern A. Zeeb 	 */
93bfcc09ddSBjoern A. Zeeb 	__le32 len_n_flags;
94bfcc09ddSBjoern A. Zeeb 	struct iwl_cmd_header hdr;
95bfcc09ddSBjoern A. Zeeb 	u8 data[];
96bfcc09ddSBjoern A. Zeeb } __packed;
97bfcc09ddSBjoern A. Zeeb 
98bfcc09ddSBjoern A. Zeeb static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
99bfcc09ddSBjoern A. Zeeb {
100bfcc09ddSBjoern A. Zeeb 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
101bfcc09ddSBjoern A. Zeeb }
102bfcc09ddSBjoern A. Zeeb 
103bfcc09ddSBjoern A. Zeeb static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
104bfcc09ddSBjoern A. Zeeb {
105bfcc09ddSBjoern A. Zeeb 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
106bfcc09ddSBjoern A. Zeeb }
107bfcc09ddSBjoern A. Zeeb 
108bfcc09ddSBjoern A. Zeeb /**
109bfcc09ddSBjoern A. Zeeb  * enum CMD_MODE - how to send the host commands ?
110bfcc09ddSBjoern A. Zeeb  *
111bfcc09ddSBjoern A. Zeeb  * @CMD_ASYNC: Return right away and don't wait for the response
112bfcc09ddSBjoern A. Zeeb  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
113bfcc09ddSBjoern A. Zeeb  *	the response. The caller needs to call iwl_free_resp when done.
114*a4128aadSBjoern A. Zeeb  * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
115*a4128aadSBjoern A. Zeeb  * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
116bfcc09ddSBjoern A. Zeeb  * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
117bfcc09ddSBjoern A. Zeeb  *	SUSPEND and RESUME commands. We are in D3 mode when we set
118bfcc09ddSBjoern A. Zeeb  *	trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
119bfcc09ddSBjoern A. Zeeb  */
120bfcc09ddSBjoern A. Zeeb enum CMD_MODE {
121bfcc09ddSBjoern A. Zeeb 	CMD_ASYNC		= BIT(0),
122bfcc09ddSBjoern A. Zeeb 	CMD_WANT_SKB		= BIT(1),
123bfcc09ddSBjoern A. Zeeb 	CMD_SEND_IN_RFKILL	= BIT(2),
124*a4128aadSBjoern A. Zeeb 	CMD_BLOCK_TXQS		= BIT(3),
125bfcc09ddSBjoern A. Zeeb 	CMD_SEND_IN_D3          = BIT(4),
126bfcc09ddSBjoern A. Zeeb };
127*a4128aadSBjoern A. Zeeb #define CMD_MODE_BITS 5
128bfcc09ddSBjoern A. Zeeb 
129bfcc09ddSBjoern A. Zeeb #define DEF_CMD_PAYLOAD_SIZE 320
130bfcc09ddSBjoern A. Zeeb 
131bfcc09ddSBjoern A. Zeeb /**
132bfcc09ddSBjoern A. Zeeb  * struct iwl_device_cmd
133bfcc09ddSBjoern A. Zeeb  *
134bfcc09ddSBjoern A. Zeeb  * For allocation of the command and tx queues, this establishes the overall
135bfcc09ddSBjoern A. Zeeb  * size of the largest command we send to uCode, except for commands that
136bfcc09ddSBjoern A. Zeeb  * aren't fully copied and use other TFD space.
137*a4128aadSBjoern A. Zeeb  *
138*a4128aadSBjoern A. Zeeb  * @hdr: command header
139*a4128aadSBjoern A. Zeeb  * @payload: payload for the command
140*a4128aadSBjoern A. Zeeb  * @hdr_wide: wide command header
141*a4128aadSBjoern A. Zeeb  * @payload_wide: payload for the wide command
142bfcc09ddSBjoern A. Zeeb  */
143bfcc09ddSBjoern A. Zeeb struct iwl_device_cmd {
144bfcc09ddSBjoern A. Zeeb 	union {
145bfcc09ddSBjoern A. Zeeb 		struct {
146bfcc09ddSBjoern A. Zeeb 			struct iwl_cmd_header hdr;	/* uCode API */
147bfcc09ddSBjoern A. Zeeb 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
148bfcc09ddSBjoern A. Zeeb 		};
149bfcc09ddSBjoern A. Zeeb 		struct {
150bfcc09ddSBjoern A. Zeeb 			struct iwl_cmd_header_wide hdr_wide;
151bfcc09ddSBjoern A. Zeeb 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
152bfcc09ddSBjoern A. Zeeb 					sizeof(struct iwl_cmd_header_wide) +
153bfcc09ddSBjoern A. Zeeb 					sizeof(struct iwl_cmd_header)];
154bfcc09ddSBjoern A. Zeeb 		};
155bfcc09ddSBjoern A. Zeeb 	};
156bfcc09ddSBjoern A. Zeeb } __packed;
157bfcc09ddSBjoern A. Zeeb 
158bfcc09ddSBjoern A. Zeeb /**
159bfcc09ddSBjoern A. Zeeb  * struct iwl_device_tx_cmd - buffer for TX command
160bfcc09ddSBjoern A. Zeeb  * @hdr: the header
161bfcc09ddSBjoern A. Zeeb  * @payload: the payload placeholder
162bfcc09ddSBjoern A. Zeeb  *
163bfcc09ddSBjoern A. Zeeb  * The actual structure is sized dynamically according to need.
164bfcc09ddSBjoern A. Zeeb  */
165bfcc09ddSBjoern A. Zeeb struct iwl_device_tx_cmd {
166bfcc09ddSBjoern A. Zeeb 	struct iwl_cmd_header hdr;
167bfcc09ddSBjoern A. Zeeb 	u8 payload[];
168bfcc09ddSBjoern A. Zeeb } __packed;
169bfcc09ddSBjoern A. Zeeb 
170bfcc09ddSBjoern A. Zeeb #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
171bfcc09ddSBjoern A. Zeeb 
172bfcc09ddSBjoern A. Zeeb /*
173bfcc09ddSBjoern A. Zeeb  * number of transfer buffers (fragments) per transmit frame descriptor;
174bfcc09ddSBjoern A. Zeeb  * this is just the driver's idea, the hardware supports 20
175bfcc09ddSBjoern A. Zeeb  */
176bfcc09ddSBjoern A. Zeeb #define IWL_MAX_CMD_TBS_PER_TFD	2
177bfcc09ddSBjoern A. Zeeb 
178bfcc09ddSBjoern A. Zeeb /**
179bfcc09ddSBjoern A. Zeeb  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
180bfcc09ddSBjoern A. Zeeb  *
181bfcc09ddSBjoern A. Zeeb  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
182bfcc09ddSBjoern A. Zeeb  *	ring. The transport layer doesn't map the command's buffer to DMA, but
183bfcc09ddSBjoern A. Zeeb  *	rather copies it to a previously allocated DMA buffer. This flag tells
184bfcc09ddSBjoern A. Zeeb  *	the transport layer not to copy the command, but to map the existing
185bfcc09ddSBjoern A. Zeeb  *	buffer (that is passed in) instead. This saves the memcpy and allows
186bfcc09ddSBjoern A. Zeeb  *	commands that are bigger than the fixed buffer to be submitted.
187bfcc09ddSBjoern A. Zeeb  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
188bfcc09ddSBjoern A. Zeeb  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
189bfcc09ddSBjoern A. Zeeb  *	chunk internally and free it again after the command completes. This
190bfcc09ddSBjoern A. Zeeb  *	can (currently) be used only once per command.
191bfcc09ddSBjoern A. Zeeb  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
192bfcc09ddSBjoern A. Zeeb  */
193bfcc09ddSBjoern A. Zeeb enum iwl_hcmd_dataflag {
194bfcc09ddSBjoern A. Zeeb 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
195bfcc09ddSBjoern A. Zeeb 	IWL_HCMD_DFL_DUP	= BIT(1),
196bfcc09ddSBjoern A. Zeeb };
197bfcc09ddSBjoern A. Zeeb 
198bfcc09ddSBjoern A. Zeeb enum iwl_error_event_table_status {
199bfcc09ddSBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
200bfcc09ddSBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
201bfcc09ddSBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
202d9836fb4SBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
203d9836fb4SBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
204d9836fb4SBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
205d9836fb4SBjoern A. Zeeb 	IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
206bfcc09ddSBjoern A. Zeeb };
207bfcc09ddSBjoern A. Zeeb 
208bfcc09ddSBjoern A. Zeeb /**
209bfcc09ddSBjoern A. Zeeb  * struct iwl_host_cmd - Host command to the uCode
210bfcc09ddSBjoern A. Zeeb  *
211bfcc09ddSBjoern A. Zeeb  * @data: array of chunks that composes the data of the host command
212bfcc09ddSBjoern A. Zeeb  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
213*a4128aadSBjoern A. Zeeb  * @_rx_page_order: (internally used to free response packet)
214*a4128aadSBjoern A. Zeeb  *  [ FreeBSD uses _page instead ]
215bfcc09ddSBjoern A. Zeeb  * @_rx_page_addr: (internally used to free response packet)
216bfcc09ddSBjoern A. Zeeb  * @flags: can be CMD_*
217bfcc09ddSBjoern A. Zeeb  * @len: array of the lengths of the chunks in data
218bfcc09ddSBjoern A. Zeeb  * @dataflags: IWL_HCMD_DFL_*
219bfcc09ddSBjoern A. Zeeb  * @id: command id of the host command, for wide commands encoding the
220bfcc09ddSBjoern A. Zeeb  *	version and group as well
221bfcc09ddSBjoern A. Zeeb  */
222bfcc09ddSBjoern A. Zeeb struct iwl_host_cmd {
223bfcc09ddSBjoern A. Zeeb 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
224bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_packet *resp_pkt;
225bfcc09ddSBjoern A. Zeeb #if defined(__linux__)
226bfcc09ddSBjoern A. Zeeb 	unsigned long _rx_page_addr;
227bfcc09ddSBjoern A. Zeeb #elif defined(__FreeBSD__)
228bfcc09ddSBjoern A. Zeeb 	struct page *_page;
229bfcc09ddSBjoern A. Zeeb #endif
230bfcc09ddSBjoern A. Zeeb 	u32 _rx_page_order;
231bfcc09ddSBjoern A. Zeeb 
232bfcc09ddSBjoern A. Zeeb 	u32 flags;
233bfcc09ddSBjoern A. Zeeb 	u32 id;
234bfcc09ddSBjoern A. Zeeb 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
235bfcc09ddSBjoern A. Zeeb 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
236bfcc09ddSBjoern A. Zeeb };
237bfcc09ddSBjoern A. Zeeb 
238bfcc09ddSBjoern A. Zeeb static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
239bfcc09ddSBjoern A. Zeeb {
240bfcc09ddSBjoern A. Zeeb #if defined(__linux__)
241bfcc09ddSBjoern A. Zeeb 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
242bfcc09ddSBjoern A. Zeeb #elif defined(__FreeBSD__)
243bfcc09ddSBjoern A. Zeeb 	__free_pages(cmd->_page, cmd->_rx_page_order);
244bfcc09ddSBjoern A. Zeeb #endif
245bfcc09ddSBjoern A. Zeeb }
246bfcc09ddSBjoern A. Zeeb 
247bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer {
248bfcc09ddSBjoern A. Zeeb 	struct page *_page;
249bfcc09ddSBjoern A. Zeeb 	int _offset;
250bfcc09ddSBjoern A. Zeeb 	bool _page_stolen;
251bfcc09ddSBjoern A. Zeeb 	u32 _rx_page_order;
252bfcc09ddSBjoern A. Zeeb 	unsigned int truesize;
253bfcc09ddSBjoern A. Zeeb };
254bfcc09ddSBjoern A. Zeeb 
255bfcc09ddSBjoern A. Zeeb static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
256bfcc09ddSBjoern A. Zeeb {
257bfcc09ddSBjoern A. Zeeb 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
258bfcc09ddSBjoern A. Zeeb }
259bfcc09ddSBjoern A. Zeeb 
260bfcc09ddSBjoern A. Zeeb static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
261bfcc09ddSBjoern A. Zeeb {
262bfcc09ddSBjoern A. Zeeb 	return r->_offset;
263bfcc09ddSBjoern A. Zeeb }
264bfcc09ddSBjoern A. Zeeb 
265bfcc09ddSBjoern A. Zeeb static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
266bfcc09ddSBjoern A. Zeeb {
267bfcc09ddSBjoern A. Zeeb 	r->_page_stolen = true;
268bfcc09ddSBjoern A. Zeeb 	get_page(r->_page);
269bfcc09ddSBjoern A. Zeeb 	return r->_page;
270bfcc09ddSBjoern A. Zeeb }
271bfcc09ddSBjoern A. Zeeb 
272bfcc09ddSBjoern A. Zeeb static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
273bfcc09ddSBjoern A. Zeeb {
274bfcc09ddSBjoern A. Zeeb 	__free_pages(r->_page, r->_rx_page_order);
275bfcc09ddSBjoern A. Zeeb }
276bfcc09ddSBjoern A. Zeeb 
277bfcc09ddSBjoern A. Zeeb #define MAX_NO_RECLAIM_CMDS	6
278bfcc09ddSBjoern A. Zeeb 
279bfcc09ddSBjoern A. Zeeb #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
280bfcc09ddSBjoern A. Zeeb 
281bfcc09ddSBjoern A. Zeeb /*
282bfcc09ddSBjoern A. Zeeb  * Maximum number of HW queues the transport layer
283bfcc09ddSBjoern A. Zeeb  * currently supports
284bfcc09ddSBjoern A. Zeeb  */
285bfcc09ddSBjoern A. Zeeb #define IWL_MAX_HW_QUEUES		32
286bfcc09ddSBjoern A. Zeeb #define IWL_MAX_TVQM_QUEUES		512
287bfcc09ddSBjoern A. Zeeb 
288bfcc09ddSBjoern A. Zeeb #define IWL_MAX_TID_COUNT	8
289bfcc09ddSBjoern A. Zeeb #define IWL_MGMT_TID		15
290bfcc09ddSBjoern A. Zeeb #define IWL_FRAME_LIMIT	64
291bfcc09ddSBjoern A. Zeeb #define IWL_MAX_RX_HW_QUEUES	16
292*a4128aadSBjoern A. Zeeb #define IWL_9000_MAX_RX_HW_QUEUES	1
293bfcc09ddSBjoern A. Zeeb 
294bfcc09ddSBjoern A. Zeeb /**
295*a4128aadSBjoern A. Zeeb  * enum iwl_d3_status - WoWLAN image/device status
296bfcc09ddSBjoern A. Zeeb  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
297bfcc09ddSBjoern A. Zeeb  * @IWL_D3_STATUS_RESET: device was reset while suspended
298bfcc09ddSBjoern A. Zeeb  */
299bfcc09ddSBjoern A. Zeeb enum iwl_d3_status {
300bfcc09ddSBjoern A. Zeeb 	IWL_D3_STATUS_ALIVE,
301bfcc09ddSBjoern A. Zeeb 	IWL_D3_STATUS_RESET,
302bfcc09ddSBjoern A. Zeeb };
303bfcc09ddSBjoern A. Zeeb 
304bfcc09ddSBjoern A. Zeeb /**
305bfcc09ddSBjoern A. Zeeb  * enum iwl_trans_status: transport status flags
306bfcc09ddSBjoern A. Zeeb  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
307bfcc09ddSBjoern A. Zeeb  * @STATUS_DEVICE_ENABLED: APM is enabled
308bfcc09ddSBjoern A. Zeeb  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
309bfcc09ddSBjoern A. Zeeb  * @STATUS_INT_ENABLED: interrupts are enabled
310bfcc09ddSBjoern A. Zeeb  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
311bfcc09ddSBjoern A. Zeeb  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
312bfcc09ddSBjoern A. Zeeb  * @STATUS_FW_ERROR: the fw is in error state
313bfcc09ddSBjoern A. Zeeb  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
314d9836fb4SBjoern A. Zeeb  * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
315d9836fb4SBjoern A. Zeeb  *	e.g. for testing
316bfcc09ddSBjoern A. Zeeb  */
317bfcc09ddSBjoern A. Zeeb enum iwl_trans_status {
318bfcc09ddSBjoern A. Zeeb 	STATUS_SYNC_HCMD_ACTIVE,
319bfcc09ddSBjoern A. Zeeb 	STATUS_DEVICE_ENABLED,
320bfcc09ddSBjoern A. Zeeb 	STATUS_TPOWER_PMI,
321bfcc09ddSBjoern A. Zeeb 	STATUS_INT_ENABLED,
322bfcc09ddSBjoern A. Zeeb 	STATUS_RFKILL_HW,
323bfcc09ddSBjoern A. Zeeb 	STATUS_RFKILL_OPMODE,
324bfcc09ddSBjoern A. Zeeb 	STATUS_FW_ERROR,
325bfcc09ddSBjoern A. Zeeb 	STATUS_TRANS_DEAD,
326d9836fb4SBjoern A. Zeeb 	STATUS_SUPPRESS_CMD_ERROR_ONCE,
327bfcc09ddSBjoern A. Zeeb };
328bfcc09ddSBjoern A. Zeeb 
329bfcc09ddSBjoern A. Zeeb static inline int
330bfcc09ddSBjoern A. Zeeb iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
331bfcc09ddSBjoern A. Zeeb {
332bfcc09ddSBjoern A. Zeeb 	switch (rb_size) {
333bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_2K:
334bfcc09ddSBjoern A. Zeeb 		return get_order(2 * 1024);
335bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_4K:
336bfcc09ddSBjoern A. Zeeb 		return get_order(4 * 1024);
337bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_8K:
338bfcc09ddSBjoern A. Zeeb 		return get_order(8 * 1024);
339bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_12K:
340bfcc09ddSBjoern A. Zeeb 		return get_order(16 * 1024);
341bfcc09ddSBjoern A. Zeeb 	default:
342bfcc09ddSBjoern A. Zeeb 		WARN_ON(1);
343bfcc09ddSBjoern A. Zeeb 		return -1;
344bfcc09ddSBjoern A. Zeeb 	}
345bfcc09ddSBjoern A. Zeeb }
346bfcc09ddSBjoern A. Zeeb 
347bfcc09ddSBjoern A. Zeeb static inline int
348bfcc09ddSBjoern A. Zeeb iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
349bfcc09ddSBjoern A. Zeeb {
350bfcc09ddSBjoern A. Zeeb 	switch (rb_size) {
351bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_2K:
352bfcc09ddSBjoern A. Zeeb 		return 2 * 1024;
353bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_4K:
354bfcc09ddSBjoern A. Zeeb 		return 4 * 1024;
355bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_8K:
356bfcc09ddSBjoern A. Zeeb 		return 8 * 1024;
357bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_12K:
358bfcc09ddSBjoern A. Zeeb 		return 16 * 1024;
359bfcc09ddSBjoern A. Zeeb 	default:
360bfcc09ddSBjoern A. Zeeb 		WARN_ON(1);
361bfcc09ddSBjoern A. Zeeb 		return 0;
362bfcc09ddSBjoern A. Zeeb 	}
363bfcc09ddSBjoern A. Zeeb }
364bfcc09ddSBjoern A. Zeeb 
365bfcc09ddSBjoern A. Zeeb struct iwl_hcmd_names {
366bfcc09ddSBjoern A. Zeeb 	u8 cmd_id;
367bfcc09ddSBjoern A. Zeeb 	const char *const cmd_name;
368bfcc09ddSBjoern A. Zeeb };
369bfcc09ddSBjoern A. Zeeb 
370bfcc09ddSBjoern A. Zeeb #define HCMD_NAME(x)	\
371bfcc09ddSBjoern A. Zeeb 	{ .cmd_id = x, .cmd_name = #x }
372bfcc09ddSBjoern A. Zeeb 
373bfcc09ddSBjoern A. Zeeb struct iwl_hcmd_arr {
374bfcc09ddSBjoern A. Zeeb 	const struct iwl_hcmd_names *arr;
375bfcc09ddSBjoern A. Zeeb 	int size;
376bfcc09ddSBjoern A. Zeeb };
377bfcc09ddSBjoern A. Zeeb 
378bfcc09ddSBjoern A. Zeeb #define HCMD_ARR(x)	\
379bfcc09ddSBjoern A. Zeeb 	{ .arr = x, .size = ARRAY_SIZE(x) }
380bfcc09ddSBjoern A. Zeeb 
381bfcc09ddSBjoern A. Zeeb /**
382bfcc09ddSBjoern A. Zeeb  * struct iwl_dump_sanitize_ops - dump sanitization operations
383bfcc09ddSBjoern A. Zeeb  * @frob_txf: Scrub the TX FIFO data
384bfcc09ddSBjoern A. Zeeb  * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
385bfcc09ddSBjoern A. Zeeb  *	but that might be short or long (&struct iwl_cmd_header or
386bfcc09ddSBjoern A. Zeeb  *	&struct iwl_cmd_header_wide)
387bfcc09ddSBjoern A. Zeeb  * @frob_mem: Scrub memory data
388bfcc09ddSBjoern A. Zeeb  */
389bfcc09ddSBjoern A. Zeeb struct iwl_dump_sanitize_ops {
390bfcc09ddSBjoern A. Zeeb 	void (*frob_txf)(void *ctx, void *buf, size_t buflen);
391bfcc09ddSBjoern A. Zeeb 	void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
392bfcc09ddSBjoern A. Zeeb 	void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
393bfcc09ddSBjoern A. Zeeb };
394bfcc09ddSBjoern A. Zeeb 
395bfcc09ddSBjoern A. Zeeb /**
396bfcc09ddSBjoern A. Zeeb  * struct iwl_trans_config - transport configuration
397bfcc09ddSBjoern A. Zeeb  *
398bfcc09ddSBjoern A. Zeeb  * @op_mode: pointer to the upper layer.
399bfcc09ddSBjoern A. Zeeb  * @cmd_queue: the index of the command queue.
400bfcc09ddSBjoern A. Zeeb  *	Must be set before start_fw.
401bfcc09ddSBjoern A. Zeeb  * @cmd_fifo: the fifo for host commands
402bfcc09ddSBjoern A. Zeeb  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
403bfcc09ddSBjoern A. Zeeb  * @no_reclaim_cmds: Some devices erroneously don't set the
404bfcc09ddSBjoern A. Zeeb  *	SEQ_RX_FRAME bit on some notifications, this is the
405bfcc09ddSBjoern A. Zeeb  *	list of such notifications to filter. Max length is
406bfcc09ddSBjoern A. Zeeb  *	%MAX_NO_RECLAIM_CMDS.
407bfcc09ddSBjoern A. Zeeb  * @n_no_reclaim_cmds: # of commands in list
408bfcc09ddSBjoern A. Zeeb  * @rx_buf_size: RX buffer size needed for A-MSDUs
409bfcc09ddSBjoern A. Zeeb  *	if unset 4k will be the RX buffer size
410bfcc09ddSBjoern A. Zeeb  * @bc_table_dword: set to true if the BC table expects the byte count to be
411bfcc09ddSBjoern A. Zeeb  *	in DWORD (as opposed to bytes)
412bfcc09ddSBjoern A. Zeeb  * @scd_set_active: should the transport configure the SCD for HCMD queue
413bfcc09ddSBjoern A. Zeeb  * @command_groups: array of command groups, each member is an array of the
414bfcc09ddSBjoern A. Zeeb  *	commands in the group; for debugging only
415bfcc09ddSBjoern A. Zeeb  * @command_groups_size: number of command groups, to avoid illegal access
416bfcc09ddSBjoern A. Zeeb  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
417bfcc09ddSBjoern A. Zeeb  *	space for at least two pointers
418bfcc09ddSBjoern A. Zeeb  * @fw_reset_handshake: firmware supports reset flow handshake
419d9836fb4SBjoern A. Zeeb  * @queue_alloc_cmd_ver: queue allocation command version, set to 0
420d9836fb4SBjoern A. Zeeb  *	for using the older SCD_QUEUE_CFG, set to the version of
421d9836fb4SBjoern A. Zeeb  *	SCD_QUEUE_CONFIG_CMD otherwise.
422bfcc09ddSBjoern A. Zeeb  */
423bfcc09ddSBjoern A. Zeeb struct iwl_trans_config {
424bfcc09ddSBjoern A. Zeeb 	struct iwl_op_mode *op_mode;
425bfcc09ddSBjoern A. Zeeb 
426bfcc09ddSBjoern A. Zeeb 	u8 cmd_queue;
427bfcc09ddSBjoern A. Zeeb 	u8 cmd_fifo;
428bfcc09ddSBjoern A. Zeeb 	unsigned int cmd_q_wdg_timeout;
429bfcc09ddSBjoern A. Zeeb 	const u8 *no_reclaim_cmds;
430bfcc09ddSBjoern A. Zeeb 	unsigned int n_no_reclaim_cmds;
431bfcc09ddSBjoern A. Zeeb 
432bfcc09ddSBjoern A. Zeeb 	enum iwl_amsdu_size rx_buf_size;
433bfcc09ddSBjoern A. Zeeb 	bool bc_table_dword;
434bfcc09ddSBjoern A. Zeeb 	bool scd_set_active;
435bfcc09ddSBjoern A. Zeeb 	const struct iwl_hcmd_arr *command_groups;
436bfcc09ddSBjoern A. Zeeb 	int command_groups_size;
437bfcc09ddSBjoern A. Zeeb 
438bfcc09ddSBjoern A. Zeeb 	u8 cb_data_offs;
439bfcc09ddSBjoern A. Zeeb 	bool fw_reset_handshake;
440d9836fb4SBjoern A. Zeeb 	u8 queue_alloc_cmd_ver;
441bfcc09ddSBjoern A. Zeeb };
442bfcc09ddSBjoern A. Zeeb 
443bfcc09ddSBjoern A. Zeeb struct iwl_trans_dump_data {
444bfcc09ddSBjoern A. Zeeb 	u32 len;
445bfcc09ddSBjoern A. Zeeb 	u8 data[];
446bfcc09ddSBjoern A. Zeeb };
447bfcc09ddSBjoern A. Zeeb 
448bfcc09ddSBjoern A. Zeeb struct iwl_trans;
449bfcc09ddSBjoern A. Zeeb 
450bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg {
451bfcc09ddSBjoern A. Zeeb 	u8 fifo;
452bfcc09ddSBjoern A. Zeeb 	u8 sta_id;
453bfcc09ddSBjoern A. Zeeb 	u8 tid;
454bfcc09ddSBjoern A. Zeeb 	bool aggregate;
455bfcc09ddSBjoern A. Zeeb 	int frame_limit;
456bfcc09ddSBjoern A. Zeeb };
457bfcc09ddSBjoern A. Zeeb 
458bfcc09ddSBjoern A. Zeeb /**
459bfcc09ddSBjoern A. Zeeb  * struct iwl_trans_rxq_dma_data - RX queue DMA data
460bfcc09ddSBjoern A. Zeeb  * @fr_bd_cb: DMA address of free BD cyclic buffer
461bfcc09ddSBjoern A. Zeeb  * @fr_bd_wid: Initial write index of the free BD cyclic buffer
462bfcc09ddSBjoern A. Zeeb  * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
463bfcc09ddSBjoern A. Zeeb  * @ur_bd_cb: DMA address of used BD cyclic buffer
464bfcc09ddSBjoern A. Zeeb  */
465bfcc09ddSBjoern A. Zeeb struct iwl_trans_rxq_dma_data {
466bfcc09ddSBjoern A. Zeeb 	u64 fr_bd_cb;
467bfcc09ddSBjoern A. Zeeb 	u32 fr_bd_wid;
468bfcc09ddSBjoern A. Zeeb 	u64 urbd_stts_wrptr;
469bfcc09ddSBjoern A. Zeeb 	u64 ur_bd_cb;
470bfcc09ddSBjoern A. Zeeb };
471bfcc09ddSBjoern A. Zeeb 
4729af1bba4SBjoern A. Zeeb /* maximal number of DRAM MAP entries supported by FW */
4739af1bba4SBjoern A. Zeeb #define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
4749af1bba4SBjoern A. Zeeb 
4759af1bba4SBjoern A. Zeeb /**
4769af1bba4SBjoern A. Zeeb  * struct iwl_pnvm_image - contains info about the parsed pnvm image
4779af1bba4SBjoern A. Zeeb  * @chunks: array of pointers to pnvm payloads and their sizes
4789af1bba4SBjoern A. Zeeb  * @n_chunks: the number of the pnvm payloads.
4799af1bba4SBjoern A. Zeeb  * @version: the version of the loaded PNVM image
4809af1bba4SBjoern A. Zeeb  */
4819af1bba4SBjoern A. Zeeb struct iwl_pnvm_image {
4829af1bba4SBjoern A. Zeeb 	struct {
4839af1bba4SBjoern A. Zeeb 		const void *data;
4849af1bba4SBjoern A. Zeeb 		u32 len;
4859af1bba4SBjoern A. Zeeb 	} chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
4869af1bba4SBjoern A. Zeeb 	u32 n_chunks;
4879af1bba4SBjoern A. Zeeb 	u32 version;
4889af1bba4SBjoern A. Zeeb };
4899af1bba4SBjoern A. Zeeb 
490bfcc09ddSBjoern A. Zeeb /**
491bfcc09ddSBjoern A. Zeeb  * enum iwl_trans_state - state of the transport layer
492bfcc09ddSBjoern A. Zeeb  *
493bfcc09ddSBjoern A. Zeeb  * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
494bfcc09ddSBjoern A. Zeeb  * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
495bfcc09ddSBjoern A. Zeeb  * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
496bfcc09ddSBjoern A. Zeeb  */
497bfcc09ddSBjoern A. Zeeb enum iwl_trans_state {
498bfcc09ddSBjoern A. Zeeb 	IWL_TRANS_NO_FW,
499bfcc09ddSBjoern A. Zeeb 	IWL_TRANS_FW_STARTED,
500bfcc09ddSBjoern A. Zeeb 	IWL_TRANS_FW_ALIVE,
501bfcc09ddSBjoern A. Zeeb };
502bfcc09ddSBjoern A. Zeeb 
503bfcc09ddSBjoern A. Zeeb /**
504bfcc09ddSBjoern A. Zeeb  * DOC: Platform power management
505bfcc09ddSBjoern A. Zeeb  *
506bfcc09ddSBjoern A. Zeeb  * In system-wide power management the entire platform goes into a low
507bfcc09ddSBjoern A. Zeeb  * power state (e.g. idle or suspend to RAM) at the same time and the
508bfcc09ddSBjoern A. Zeeb  * device is configured as a wakeup source for the entire platform.
509bfcc09ddSBjoern A. Zeeb  * This is usually triggered by userspace activity (e.g. the user
510bfcc09ddSBjoern A. Zeeb  * presses the suspend button or a power management daemon decides to
511bfcc09ddSBjoern A. Zeeb  * put the platform in low power mode).  The device's behavior in this
512bfcc09ddSBjoern A. Zeeb  * mode is dictated by the wake-on-WLAN configuration.
513bfcc09ddSBjoern A. Zeeb  *
514bfcc09ddSBjoern A. Zeeb  * The terms used for the device's behavior are as follows:
515bfcc09ddSBjoern A. Zeeb  *
516bfcc09ddSBjoern A. Zeeb  *	- D0: the device is fully powered and the host is awake;
517bfcc09ddSBjoern A. Zeeb  *	- D3: the device is in low power mode and only reacts to
518bfcc09ddSBjoern A. Zeeb  *		specific events (e.g. magic-packet received or scan
519bfcc09ddSBjoern A. Zeeb  *		results found);
520bfcc09ddSBjoern A. Zeeb  *
521bfcc09ddSBjoern A. Zeeb  * These terms reflect the power modes in the firmware and are not to
522bfcc09ddSBjoern A. Zeeb  * be confused with the physical device power state.
523bfcc09ddSBjoern A. Zeeb  */
524bfcc09ddSBjoern A. Zeeb 
525bfcc09ddSBjoern A. Zeeb /**
526bfcc09ddSBjoern A. Zeeb  * enum iwl_plat_pm_mode - platform power management mode
527bfcc09ddSBjoern A. Zeeb  *
528bfcc09ddSBjoern A. Zeeb  * This enumeration describes the device's platform power management
529bfcc09ddSBjoern A. Zeeb  * behavior when in system-wide suspend (i.e WoWLAN).
530bfcc09ddSBjoern A. Zeeb  *
531bfcc09ddSBjoern A. Zeeb  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
532bfcc09ddSBjoern A. Zeeb  *	device.  In system-wide suspend mode, it means that the all
533bfcc09ddSBjoern A. Zeeb  *	connections will be closed automatically by mac80211 before
534bfcc09ddSBjoern A. Zeeb  *	the platform is suspended.
535bfcc09ddSBjoern A. Zeeb  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
536bfcc09ddSBjoern A. Zeeb  */
537bfcc09ddSBjoern A. Zeeb enum iwl_plat_pm_mode {
538bfcc09ddSBjoern A. Zeeb 	IWL_PLAT_PM_MODE_DISABLED,
539bfcc09ddSBjoern A. Zeeb 	IWL_PLAT_PM_MODE_D3,
540bfcc09ddSBjoern A. Zeeb };
541bfcc09ddSBjoern A. Zeeb 
542bfcc09ddSBjoern A. Zeeb /**
543bfcc09ddSBjoern A. Zeeb  * enum iwl_ini_cfg_state
544bfcc09ddSBjoern A. Zeeb  * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
545bfcc09ddSBjoern A. Zeeb  * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
546bfcc09ddSBjoern A. Zeeb  * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
547bfcc09ddSBjoern A. Zeeb  *	are corrupted. The rest of the debug TLVs will still be used
548bfcc09ddSBjoern A. Zeeb  */
549bfcc09ddSBjoern A. Zeeb enum iwl_ini_cfg_state {
550bfcc09ddSBjoern A. Zeeb 	IWL_INI_CFG_STATE_NOT_LOADED,
551bfcc09ddSBjoern A. Zeeb 	IWL_INI_CFG_STATE_LOADED,
552bfcc09ddSBjoern A. Zeeb 	IWL_INI_CFG_STATE_CORRUPTED,
553bfcc09ddSBjoern A. Zeeb };
554bfcc09ddSBjoern A. Zeeb 
555bfcc09ddSBjoern A. Zeeb /* Max time to wait for nmi interrupt */
556bfcc09ddSBjoern A. Zeeb #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
557bfcc09ddSBjoern A. Zeeb 
558bfcc09ddSBjoern A. Zeeb /**
559bfcc09ddSBjoern A. Zeeb  * struct iwl_dram_data
560bfcc09ddSBjoern A. Zeeb  * @physical: page phy pointer
561bfcc09ddSBjoern A. Zeeb  * @block: pointer to the allocated block/page
562bfcc09ddSBjoern A. Zeeb  * @size: size of the block/page
563bfcc09ddSBjoern A. Zeeb  */
564bfcc09ddSBjoern A. Zeeb struct iwl_dram_data {
565bfcc09ddSBjoern A. Zeeb 	dma_addr_t physical;
566bfcc09ddSBjoern A. Zeeb 	void *block;
567bfcc09ddSBjoern A. Zeeb 	int size;
568bfcc09ddSBjoern A. Zeeb };
569bfcc09ddSBjoern A. Zeeb 
570bfcc09ddSBjoern A. Zeeb /**
571*a4128aadSBjoern A. Zeeb  * struct iwl_dram_regions - DRAM regions container structure
5729af1bba4SBjoern A. Zeeb  * @drams: array of several DRAM areas that contains the pnvm and power
5739af1bba4SBjoern A. Zeeb  *	reduction table payloads.
5749af1bba4SBjoern A. Zeeb  * @n_regions: number of DRAM regions that were allocated
5759af1bba4SBjoern A. Zeeb  * @prph_scratch_mem_desc: points to a structure allocated in dram,
5769af1bba4SBjoern A. Zeeb  *	designed to show FW where all the payloads are.
5779af1bba4SBjoern A. Zeeb  */
5789af1bba4SBjoern A. Zeeb struct iwl_dram_regions {
5799af1bba4SBjoern A. Zeeb 	struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
5809af1bba4SBjoern A. Zeeb 	struct iwl_dram_data prph_scratch_mem_desc;
5819af1bba4SBjoern A. Zeeb 	u8 n_regions;
5829af1bba4SBjoern A. Zeeb };
5839af1bba4SBjoern A. Zeeb 
5849af1bba4SBjoern A. Zeeb /**
585bfcc09ddSBjoern A. Zeeb  * struct iwl_fw_mon - fw monitor per allocation id
586bfcc09ddSBjoern A. Zeeb  * @num_frags: number of fragments
587bfcc09ddSBjoern A. Zeeb  * @frags: an array of DRAM buffer fragments
588bfcc09ddSBjoern A. Zeeb  */
589bfcc09ddSBjoern A. Zeeb struct iwl_fw_mon {
590bfcc09ddSBjoern A. Zeeb 	u32 num_frags;
591bfcc09ddSBjoern A. Zeeb 	struct iwl_dram_data *frags;
592bfcc09ddSBjoern A. Zeeb };
593bfcc09ddSBjoern A. Zeeb 
594bfcc09ddSBjoern A. Zeeb /**
595bfcc09ddSBjoern A. Zeeb  * struct iwl_self_init_dram - dram data used by self init process
596bfcc09ddSBjoern A. Zeeb  * @fw: lmac and umac dram data
597bfcc09ddSBjoern A. Zeeb  * @fw_cnt: total number of items in array
598bfcc09ddSBjoern A. Zeeb  * @paging: paging dram data
599bfcc09ddSBjoern A. Zeeb  * @paging_cnt: total number of items in array
600bfcc09ddSBjoern A. Zeeb  */
601bfcc09ddSBjoern A. Zeeb struct iwl_self_init_dram {
602bfcc09ddSBjoern A. Zeeb 	struct iwl_dram_data *fw;
603bfcc09ddSBjoern A. Zeeb 	int fw_cnt;
604bfcc09ddSBjoern A. Zeeb 	struct iwl_dram_data *paging;
605bfcc09ddSBjoern A. Zeeb 	int paging_cnt;
606bfcc09ddSBjoern A. Zeeb };
607bfcc09ddSBjoern A. Zeeb 
608bfcc09ddSBjoern A. Zeeb /**
609d9836fb4SBjoern A. Zeeb  * struct iwl_imr_data - imr dram data used during debug process
610d9836fb4SBjoern A. Zeeb  * @imr_enable: imr enable status received from fw
611d9836fb4SBjoern A. Zeeb  * @imr_size: imr dram size received from fw
612d9836fb4SBjoern A. Zeeb  * @sram_addr: sram address from debug tlv
613d9836fb4SBjoern A. Zeeb  * @sram_size: sram size from debug tlv
614*a4128aadSBjoern A. Zeeb  * @imr2sram_remainbyte: size remained after each dma transfer
615d9836fb4SBjoern A. Zeeb  * @imr_curr_addr: current dst address used during dma transfer
616d9836fb4SBjoern A. Zeeb  * @imr_base_addr: imr address received from fw
617d9836fb4SBjoern A. Zeeb  */
618d9836fb4SBjoern A. Zeeb struct iwl_imr_data {
619d9836fb4SBjoern A. Zeeb 	u32 imr_enable;
620d9836fb4SBjoern A. Zeeb 	u32 imr_size;
621d9836fb4SBjoern A. Zeeb 	u32 sram_addr;
622d9836fb4SBjoern A. Zeeb 	u32 sram_size;
623d9836fb4SBjoern A. Zeeb 	u32 imr2sram_remainbyte;
624d9836fb4SBjoern A. Zeeb 	u64 imr_curr_addr;
625d9836fb4SBjoern A. Zeeb 	__le64 imr_base_addr;
626d9836fb4SBjoern A. Zeeb };
627d9836fb4SBjoern A. Zeeb 
6289af1bba4SBjoern A. Zeeb #define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES      32
6299af1bba4SBjoern A. Zeeb 
6309af1bba4SBjoern A. Zeeb /**
6319af1bba4SBjoern A. Zeeb  * struct iwl_pc_data - program counter details
6329af1bba4SBjoern A. Zeeb  * @pc_name: cpu name
6339af1bba4SBjoern A. Zeeb  * @pc_address: cpu program counter
6349af1bba4SBjoern A. Zeeb  */
6359af1bba4SBjoern A. Zeeb struct iwl_pc_data {
6369af1bba4SBjoern A. Zeeb 	u8  pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
6379af1bba4SBjoern A. Zeeb 	u32 pc_address;
6389af1bba4SBjoern A. Zeeb };
6399af1bba4SBjoern A. Zeeb 
640d9836fb4SBjoern A. Zeeb /**
641bfcc09ddSBjoern A. Zeeb  * struct iwl_trans_debug - transport debug related data
642bfcc09ddSBjoern A. Zeeb  *
643bfcc09ddSBjoern A. Zeeb  * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
644bfcc09ddSBjoern A. Zeeb  * @rec_on: true iff there is a fw debug recording currently active
645bfcc09ddSBjoern A. Zeeb  * @dest_tlv: points to the destination TLV for debug
646bfcc09ddSBjoern A. Zeeb  * @conf_tlv: array of pointers to configuration TLVs for debug
647bfcc09ddSBjoern A. Zeeb  * @trigger_tlv: array of pointers to triggers TLVs for debug
648bfcc09ddSBjoern A. Zeeb  * @lmac_error_event_table: addrs of lmacs error tables
649bfcc09ddSBjoern A. Zeeb  * @umac_error_event_table: addr of umac error table
650d9836fb4SBjoern A. Zeeb  * @tcm_error_event_table: address(es) of TCM error table(s)
651d9836fb4SBjoern A. Zeeb  * @rcm_error_event_table: address(es) of RCM error table(s)
652bfcc09ddSBjoern A. Zeeb  * @error_event_table_tlv_status: bitmap that indicates what error table
653bfcc09ddSBjoern A. Zeeb  *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
654bfcc09ddSBjoern A. Zeeb  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
655bfcc09ddSBjoern A. Zeeb  * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
656bfcc09ddSBjoern A. Zeeb  * @fw_mon_cfg: debug buffer allocation configuration
657bfcc09ddSBjoern A. Zeeb  * @fw_mon_ini: DRAM buffer fragments per allocation id
658bfcc09ddSBjoern A. Zeeb  * @fw_mon: DRAM buffer for firmware monitor
659bfcc09ddSBjoern A. Zeeb  * @hw_error: equals true if hw error interrupt was received from the FW
660bfcc09ddSBjoern A. Zeeb  * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
661*a4128aadSBjoern A. Zeeb  * @unsupported_region_msk: unsupported regions out of active_regions
662bfcc09ddSBjoern A. Zeeb  * @active_regions: active regions
663bfcc09ddSBjoern A. Zeeb  * @debug_info_tlv_list: list of debug info TLVs
664bfcc09ddSBjoern A. Zeeb  * @time_point: array of debug time points
665bfcc09ddSBjoern A. Zeeb  * @periodic_trig_list: periodic triggers list
666bfcc09ddSBjoern A. Zeeb  * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
667bfcc09ddSBjoern A. Zeeb  * @ucode_preset: preset based on ucode
668*a4128aadSBjoern A. Zeeb  * @restart_required: indicates debug restart is required
669*a4128aadSBjoern A. Zeeb  * @last_tp_resetfw: last handling of reset during debug timepoint
670*a4128aadSBjoern A. Zeeb  * @imr_data: IMR debug data allocation
6719af1bba4SBjoern A. Zeeb  * @dump_file_name_ext: dump file name extension
6729af1bba4SBjoern A. Zeeb  * @dump_file_name_ext_valid: dump file name extension if valid or not
6739af1bba4SBjoern A. Zeeb  * @num_pc: number of program counter for cpu
6749af1bba4SBjoern A. Zeeb  * @pc_data: details of the program counter
675*a4128aadSBjoern A. Zeeb  * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
676bfcc09ddSBjoern A. Zeeb  */
677bfcc09ddSBjoern A. Zeeb struct iwl_trans_debug {
678bfcc09ddSBjoern A. Zeeb 	u8 n_dest_reg;
679bfcc09ddSBjoern A. Zeeb 	bool rec_on;
680bfcc09ddSBjoern A. Zeeb 
681bfcc09ddSBjoern A. Zeeb 	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
682bfcc09ddSBjoern A. Zeeb 	const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
683bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
684bfcc09ddSBjoern A. Zeeb 
685bfcc09ddSBjoern A. Zeeb 	u32 lmac_error_event_table[2];
686bfcc09ddSBjoern A. Zeeb 	u32 umac_error_event_table;
687d9836fb4SBjoern A. Zeeb 	u32 tcm_error_event_table[2];
688d9836fb4SBjoern A. Zeeb 	u32 rcm_error_event_table[2];
689bfcc09ddSBjoern A. Zeeb 	unsigned int error_event_table_tlv_status;
690bfcc09ddSBjoern A. Zeeb 
691bfcc09ddSBjoern A. Zeeb 	enum iwl_ini_cfg_state internal_ini_cfg;
692bfcc09ddSBjoern A. Zeeb 	enum iwl_ini_cfg_state external_ini_cfg;
693bfcc09ddSBjoern A. Zeeb 
694bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
695bfcc09ddSBjoern A. Zeeb 	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
696bfcc09ddSBjoern A. Zeeb 
697bfcc09ddSBjoern A. Zeeb 	struct iwl_dram_data fw_mon;
698bfcc09ddSBjoern A. Zeeb 
699bfcc09ddSBjoern A. Zeeb 	bool hw_error;
700bfcc09ddSBjoern A. Zeeb 	enum iwl_fw_ini_buffer_location ini_dest;
701bfcc09ddSBjoern A. Zeeb 
702bfcc09ddSBjoern A. Zeeb 	u64 unsupported_region_msk;
703bfcc09ddSBjoern A. Zeeb 	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
704bfcc09ddSBjoern A. Zeeb 	struct list_head debug_info_tlv_list;
705*a4128aadSBjoern A. Zeeb 	struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];
706bfcc09ddSBjoern A. Zeeb 	struct list_head periodic_trig_list;
707bfcc09ddSBjoern A. Zeeb 
708bfcc09ddSBjoern A. Zeeb 	u32 domains_bitmap;
709bfcc09ddSBjoern A. Zeeb 	u32 ucode_preset;
710d9836fb4SBjoern A. Zeeb 	bool restart_required;
711d9836fb4SBjoern A. Zeeb 	u32 last_tp_resetfw;
712d9836fb4SBjoern A. Zeeb 	struct iwl_imr_data imr_data;
7139af1bba4SBjoern A. Zeeb 	u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
7149af1bba4SBjoern A. Zeeb 	bool dump_file_name_ext_valid;
7159af1bba4SBjoern A. Zeeb 	u32 num_pc;
7169af1bba4SBjoern A. Zeeb 	struct iwl_pc_data *pc_data;
717*a4128aadSBjoern A. Zeeb 	bool yoyo_bin_loaded;
718bfcc09ddSBjoern A. Zeeb };
719bfcc09ddSBjoern A. Zeeb 
720bfcc09ddSBjoern A. Zeeb struct iwl_dma_ptr {
721bfcc09ddSBjoern A. Zeeb 	dma_addr_t dma;
722bfcc09ddSBjoern A. Zeeb 	void *addr;
723bfcc09ddSBjoern A. Zeeb 	size_t size;
724bfcc09ddSBjoern A. Zeeb };
725bfcc09ddSBjoern A. Zeeb 
726bfcc09ddSBjoern A. Zeeb struct iwl_cmd_meta {
727bfcc09ddSBjoern A. Zeeb 	/* only for SYNC commands, iff the reply skb is wanted */
728bfcc09ddSBjoern A. Zeeb 	struct iwl_host_cmd *source;
729*a4128aadSBjoern A. Zeeb 	u32 flags: CMD_MODE_BITS;
730*a4128aadSBjoern A. Zeeb 	/* sg_offset is valid if it is non-zero */
731*a4128aadSBjoern A. Zeeb 	u32 sg_offset: PAGE_SHIFT;
732bfcc09ddSBjoern A. Zeeb 	u32 tbs;
733bfcc09ddSBjoern A. Zeeb };
734bfcc09ddSBjoern A. Zeeb 
735bfcc09ddSBjoern A. Zeeb /*
736bfcc09ddSBjoern A. Zeeb  * The FH will write back to the first TB only, so we need to copy some data
737bfcc09ddSBjoern A. Zeeb  * into the buffer regardless of whether it should be mapped or not.
738bfcc09ddSBjoern A. Zeeb  * This indicates how big the first TB must be to include the scratch buffer
739bfcc09ddSBjoern A. Zeeb  * and the assigned PN.
740bfcc09ddSBjoern A. Zeeb  * Since PN location is 8 bytes at offset 12, it's 20 now.
741bfcc09ddSBjoern A. Zeeb  * If we make it bigger then allocations will be bigger and copy slower, so
742bfcc09ddSBjoern A. Zeeb  * that's probably not useful.
743bfcc09ddSBjoern A. Zeeb  */
744bfcc09ddSBjoern A. Zeeb #define IWL_FIRST_TB_SIZE	20
745bfcc09ddSBjoern A. Zeeb #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
746bfcc09ddSBjoern A. Zeeb 
747bfcc09ddSBjoern A. Zeeb struct iwl_pcie_txq_entry {
748bfcc09ddSBjoern A. Zeeb 	void *cmd;
749bfcc09ddSBjoern A. Zeeb 	struct sk_buff *skb;
750bfcc09ddSBjoern A. Zeeb 	/* buffer to free after command completes */
751bfcc09ddSBjoern A. Zeeb 	const void *free_buf;
752bfcc09ddSBjoern A. Zeeb 	struct iwl_cmd_meta meta;
753bfcc09ddSBjoern A. Zeeb };
754bfcc09ddSBjoern A. Zeeb 
755bfcc09ddSBjoern A. Zeeb struct iwl_pcie_first_tb_buf {
756bfcc09ddSBjoern A. Zeeb 	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
757bfcc09ddSBjoern A. Zeeb };
758bfcc09ddSBjoern A. Zeeb 
759bfcc09ddSBjoern A. Zeeb /**
760bfcc09ddSBjoern A. Zeeb  * struct iwl_txq - Tx Queue for DMA
761bfcc09ddSBjoern A. Zeeb  * @tfds: transmit frame descriptors (DMA memory)
762bfcc09ddSBjoern A. Zeeb  * @first_tb_bufs: start of command headers, including scratch buffers, for
763bfcc09ddSBjoern A. Zeeb  *	the writeback -- this is DMA memory and an array holding one buffer
764bfcc09ddSBjoern A. Zeeb  *	for each command on the queue
765bfcc09ddSBjoern A. Zeeb  * @first_tb_dma: DMA address for the first_tb_bufs start
766bfcc09ddSBjoern A. Zeeb  * @entries: transmit entries (driver state)
767bfcc09ddSBjoern A. Zeeb  * @lock: queue lock
768*a4128aadSBjoern A. Zeeb  * @reclaim_lock: reclaim lock
769bfcc09ddSBjoern A. Zeeb  * @stuck_timer: timer that fires if queue gets stuck
770bfcc09ddSBjoern A. Zeeb  * @trans: pointer back to transport (for timer)
771bfcc09ddSBjoern A. Zeeb  * @need_update: indicates need to update read/write index
772bfcc09ddSBjoern A. Zeeb  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
773bfcc09ddSBjoern A. Zeeb  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
774bfcc09ddSBjoern A. Zeeb  * @frozen: tx stuck queue timer is frozen
775bfcc09ddSBjoern A. Zeeb  * @frozen_expiry_remainder: remember how long until the timer fires
776*a4128aadSBjoern A. Zeeb  * @block: queue is blocked
777bfcc09ddSBjoern A. Zeeb  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
778bfcc09ddSBjoern A. Zeeb  * @write_ptr: 1-st empty entry (index) host_w
779bfcc09ddSBjoern A. Zeeb  * @read_ptr: last used entry (index) host_r
780bfcc09ddSBjoern A. Zeeb  * @dma_addr:  physical addr for BD's
781bfcc09ddSBjoern A. Zeeb  * @n_window: safe queue window
782bfcc09ddSBjoern A. Zeeb  * @id: queue id
783bfcc09ddSBjoern A. Zeeb  * @low_mark: low watermark, resume queue if free space more than this
784bfcc09ddSBjoern A. Zeeb  * @high_mark: high watermark, stop queue if free space less than this
785*a4128aadSBjoern A. Zeeb  * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
786*a4128aadSBjoern A. Zeeb  * @overflow_tx: need to transmit from overflow
787bfcc09ddSBjoern A. Zeeb  *
788bfcc09ddSBjoern A. Zeeb  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
789bfcc09ddSBjoern A. Zeeb  * descriptors) and required locking structures.
790bfcc09ddSBjoern A. Zeeb  *
791bfcc09ddSBjoern A. Zeeb  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
792bfcc09ddSBjoern A. Zeeb  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
793bfcc09ddSBjoern A. Zeeb  * there might be HW changes in the future). For the normal TX
794bfcc09ddSBjoern A. Zeeb  * queues, n_window, which is the size of the software queue data
795bfcc09ddSBjoern A. Zeeb  * is also 256; however, for the command queue, n_window is only
796bfcc09ddSBjoern A. Zeeb  * 32 since we don't need so many commands pending. Since the HW
797bfcc09ddSBjoern A. Zeeb  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
798bfcc09ddSBjoern A. Zeeb  * This means that we end up with the following:
799bfcc09ddSBjoern A. Zeeb  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
800bfcc09ddSBjoern A. Zeeb  *  SW entries:           | 0      | ... | 31          |
801bfcc09ddSBjoern A. Zeeb  * where N is a number between 0 and 7. This means that the SW
802bfcc09ddSBjoern A. Zeeb  * data is a window overlayed over the HW queue.
803bfcc09ddSBjoern A. Zeeb  */
804bfcc09ddSBjoern A. Zeeb struct iwl_txq {
805bfcc09ddSBjoern A. Zeeb 	void *tfds;
806bfcc09ddSBjoern A. Zeeb 	struct iwl_pcie_first_tb_buf *first_tb_bufs;
807bfcc09ddSBjoern A. Zeeb 	dma_addr_t first_tb_dma;
808bfcc09ddSBjoern A. Zeeb 	struct iwl_pcie_txq_entry *entries;
809bfcc09ddSBjoern A. Zeeb 	/* lock for syncing changes on the queue */
810bfcc09ddSBjoern A. Zeeb 	spinlock_t lock;
811*a4128aadSBjoern A. Zeeb 	/* lock to prevent concurrent reclaim */
812*a4128aadSBjoern A. Zeeb 	spinlock_t reclaim_lock;
813bfcc09ddSBjoern A. Zeeb 	unsigned long frozen_expiry_remainder;
814bfcc09ddSBjoern A. Zeeb 	struct timer_list stuck_timer;
815bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans;
816bfcc09ddSBjoern A. Zeeb 	bool need_update;
817bfcc09ddSBjoern A. Zeeb 	bool frozen;
818bfcc09ddSBjoern A. Zeeb 	bool ampdu;
819bfcc09ddSBjoern A. Zeeb 	int block;
820bfcc09ddSBjoern A. Zeeb 	unsigned long wd_timeout;
821bfcc09ddSBjoern A. Zeeb 	struct sk_buff_head overflow_q;
822bfcc09ddSBjoern A. Zeeb 	struct iwl_dma_ptr bc_tbl;
823bfcc09ddSBjoern A. Zeeb 
824bfcc09ddSBjoern A. Zeeb 	int write_ptr;
825bfcc09ddSBjoern A. Zeeb 	int read_ptr;
826bfcc09ddSBjoern A. Zeeb 	dma_addr_t dma_addr;
827bfcc09ddSBjoern A. Zeeb 	int n_window;
828bfcc09ddSBjoern A. Zeeb 	u32 id;
829bfcc09ddSBjoern A. Zeeb 	int low_mark;
830bfcc09ddSBjoern A. Zeeb 	int high_mark;
831bfcc09ddSBjoern A. Zeeb 
832bfcc09ddSBjoern A. Zeeb 	bool overflow_tx;
833bfcc09ddSBjoern A. Zeeb };
834bfcc09ddSBjoern A. Zeeb 
835bfcc09ddSBjoern A. Zeeb /**
836bfcc09ddSBjoern A. Zeeb  * struct iwl_trans - transport common data
837bfcc09ddSBjoern A. Zeeb  *
838*a4128aadSBjoern A. Zeeb  * @csme_own: true if we couldn't get ownership on the device
839*a4128aadSBjoern A. Zeeb  * @op_mode: pointer to the op_mode
840bfcc09ddSBjoern A. Zeeb  * @trans_cfg: the trans-specific configuration part
841*a4128aadSBjoern A. Zeeb  * @cfg: pointer to the configuration
842*a4128aadSBjoern A. Zeeb  * @drv: pointer to iwl_drv
843*a4128aadSBjoern A. Zeeb  * @state: current device state
844bfcc09ddSBjoern A. Zeeb  * @status: a bit-mask of transport status flags
845*a4128aadSBjoern A. Zeeb  * @dev: pointer to struct device * that represents the device
846bfcc09ddSBjoern A. Zeeb  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
847bfcc09ddSBjoern A. Zeeb  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
848*a4128aadSBjoern A. Zeeb  * @hw_rf_id: a u32 with the device RF ID
849*a4128aadSBjoern A. Zeeb  * @hw_cnv_id: a u32 with the device CNV ID
850*a4128aadSBjoern A. Zeeb  * @hw_crf_id: a u32 with the device CRF ID
851*a4128aadSBjoern A. Zeeb  * @hw_wfpm_id: a u32 with the device wfpm ID
852bfcc09ddSBjoern A. Zeeb  * @hw_id: a u32 with the ID of the device / sub-device.
853bfcc09ddSBjoern A. Zeeb  *	Set during transport allocation.
854bfcc09ddSBjoern A. Zeeb  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
855*a4128aadSBjoern A. Zeeb  * @sku_id: the SKU identifier (for PNVM matching)
856*a4128aadSBjoern A. Zeeb  * @pnvm_loaded: indicates PNVM was loaded
857*a4128aadSBjoern A. Zeeb  * @hw_rev: the revision data of the HW
858d9836fb4SBjoern A. Zeeb  * @hw_rev_step: The mac step of the HW
859bfcc09ddSBjoern A. Zeeb  * @pm_support: set to true in start_hw if link pm is supported
860bfcc09ddSBjoern A. Zeeb  * @ltr_enabled: set to true if the LTR is enabled
8619af1bba4SBjoern A. Zeeb  * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
862*a4128aadSBjoern A. Zeeb  * @reduce_power_loaded: indicates reduced power section was loaded
8639af1bba4SBjoern A. Zeeb  * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
864*a4128aadSBjoern A. Zeeb  * @command_groups: pointer to command group name list array
865*a4128aadSBjoern A. Zeeb  * @command_groups_size: array size of @command_groups
866bfcc09ddSBjoern A. Zeeb  * @wide_cmd_header: true when ucode supports wide command header format
867bfcc09ddSBjoern A. Zeeb  * @wait_command_queue: wait queue for sync commands
868bfcc09ddSBjoern A. Zeeb  * @num_rx_queues: number of RX queues allocated by the transport;
869bfcc09ddSBjoern A. Zeeb  *	the transport must set this before calling iwl_drv_start()
870bfcc09ddSBjoern A. Zeeb  * @iml_len: the length of the image loader
871bfcc09ddSBjoern A. Zeeb  * @iml: a pointer to the image loader itself
872bfcc09ddSBjoern A. Zeeb  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
873bfcc09ddSBjoern A. Zeeb  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
874*a4128aadSBjoern A. Zeeb  * @dev_cmd_pool_name: name for the TX command allocation pool
875*a4128aadSBjoern A. Zeeb  * @dbgfs_dir: iwlwifi debugfs base dir for this device
876*a4128aadSBjoern A. Zeeb  * @sync_cmd_lockdep_map: lockdep map for checking sync commands
877bfcc09ddSBjoern A. Zeeb  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
878bfcc09ddSBjoern A. Zeeb  *	starting the firmware, used for tracing
879bfcc09ddSBjoern A. Zeeb  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
880bfcc09ddSBjoern A. Zeeb  *	start of the 802.11 header in the @rx_mpdu_cmd
881*a4128aadSBjoern A. Zeeb  * @dbg: additional debug data, see &struct iwl_trans_debug
882*a4128aadSBjoern A. Zeeb  * @init_dram: FW initialization DMA data
883bfcc09ddSBjoern A. Zeeb  * @system_pm_mode: the system-wide power management mode in use.
884bfcc09ddSBjoern A. Zeeb  *	This mode is set dynamically, depending on the WoWLAN values
885bfcc09ddSBjoern A. Zeeb  *	configured from the userspace at runtime.
886*a4128aadSBjoern A. Zeeb  * @name: the device name
8879af1bba4SBjoern A. Zeeb  * @mbx_addr_0_step: step address data 0
8889af1bba4SBjoern A. Zeeb  * @mbx_addr_1_step: step address data 1
8899af1bba4SBjoern A. Zeeb  * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
8909af1bba4SBjoern A. Zeeb  *	only valid for discrete (not integrated) NICs
891*a4128aadSBjoern A. Zeeb  * @invalid_tx_cmd: invalid TX command buffer
892*a4128aadSBjoern A. Zeeb  * @reduced_cap_sku: reduced capability supported SKU
893*a4128aadSBjoern A. Zeeb  * @no_160: device not supporting 160 MHz
894*a4128aadSBjoern A. Zeeb  * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
895*a4128aadSBjoern A. Zeeb  * @trans_specific: data for the specific transport this is allocated for/with
896bfcc09ddSBjoern A. Zeeb  */
897bfcc09ddSBjoern A. Zeeb struct iwl_trans {
898d9836fb4SBjoern A. Zeeb 	bool csme_own;
899bfcc09ddSBjoern A. Zeeb 	struct iwl_op_mode *op_mode;
900bfcc09ddSBjoern A. Zeeb 	const struct iwl_cfg_trans_params *trans_cfg;
901bfcc09ddSBjoern A. Zeeb 	const struct iwl_cfg *cfg;
902bfcc09ddSBjoern A. Zeeb 	struct iwl_drv *drv;
903bfcc09ddSBjoern A. Zeeb 	enum iwl_trans_state state;
904bfcc09ddSBjoern A. Zeeb 	unsigned long status;
905bfcc09ddSBjoern A. Zeeb 
906bfcc09ddSBjoern A. Zeeb 	struct device *dev;
907bfcc09ddSBjoern A. Zeeb 	u32 max_skb_frags;
908bfcc09ddSBjoern A. Zeeb 	u32 hw_rev;
909d9836fb4SBjoern A. Zeeb 	u32 hw_rev_step;
910bfcc09ddSBjoern A. Zeeb 	u32 hw_rf_id;
9119af1bba4SBjoern A. Zeeb 	u32 hw_crf_id;
9129af1bba4SBjoern A. Zeeb 	u32 hw_cnv_id;
9139af1bba4SBjoern A. Zeeb 	u32 hw_wfpm_id;
914bfcc09ddSBjoern A. Zeeb 	u32 hw_id;
915bfcc09ddSBjoern A. Zeeb 	char hw_id_str[52];
916bfcc09ddSBjoern A. Zeeb 	u32 sku_id[3];
917*a4128aadSBjoern A. Zeeb 	bool reduced_cap_sku;
918*a4128aadSBjoern A. Zeeb 	u8 no_160:1, step_urm:1;
919bfcc09ddSBjoern A. Zeeb 
920bfcc09ddSBjoern A. Zeeb 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
921bfcc09ddSBjoern A. Zeeb 
922bfcc09ddSBjoern A. Zeeb 	bool pm_support;
923bfcc09ddSBjoern A. Zeeb 	bool ltr_enabled;
924bfcc09ddSBjoern A. Zeeb 	u8 pnvm_loaded:1;
9259af1bba4SBjoern A. Zeeb 	u8 fail_to_parse_pnvm_image:1;
926bfcc09ddSBjoern A. Zeeb 	u8 reduce_power_loaded:1;
9279af1bba4SBjoern A. Zeeb 	u8 failed_to_load_reduce_power_image:1;
928bfcc09ddSBjoern A. Zeeb 
929bfcc09ddSBjoern A. Zeeb 	const struct iwl_hcmd_arr *command_groups;
930bfcc09ddSBjoern A. Zeeb 	int command_groups_size;
931bfcc09ddSBjoern A. Zeeb 	bool wide_cmd_header;
932bfcc09ddSBjoern A. Zeeb 
933bfcc09ddSBjoern A. Zeeb 	wait_queue_head_t wait_command_queue;
934bfcc09ddSBjoern A. Zeeb 	u8 num_rx_queues;
935bfcc09ddSBjoern A. Zeeb 
936bfcc09ddSBjoern A. Zeeb 	size_t iml_len;
937bfcc09ddSBjoern A. Zeeb 	u8 *iml;
938bfcc09ddSBjoern A. Zeeb 
939bfcc09ddSBjoern A. Zeeb 	/* The following fields are internal only */
940bfcc09ddSBjoern A. Zeeb 	struct kmem_cache *dev_cmd_pool;
941bfcc09ddSBjoern A. Zeeb 	char dev_cmd_pool_name[50];
942bfcc09ddSBjoern A. Zeeb 
943bfcc09ddSBjoern A. Zeeb 	struct dentry *dbgfs_dir;
944bfcc09ddSBjoern A. Zeeb 
945bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_LOCKDEP
946bfcc09ddSBjoern A. Zeeb 	struct lockdep_map sync_cmd_lockdep_map;
947bfcc09ddSBjoern A. Zeeb #endif
948bfcc09ddSBjoern A. Zeeb 
949bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_debug dbg;
950bfcc09ddSBjoern A. Zeeb 	struct iwl_self_init_dram init_dram;
951bfcc09ddSBjoern A. Zeeb 
952bfcc09ddSBjoern A. Zeeb 	enum iwl_plat_pm_mode system_pm_mode;
953bfcc09ddSBjoern A. Zeeb 
954bfcc09ddSBjoern A. Zeeb 	const char *name;
9559af1bba4SBjoern A. Zeeb 	u32 mbx_addr_0_step;
9569af1bba4SBjoern A. Zeeb 	u32 mbx_addr_1_step;
9579af1bba4SBjoern A. Zeeb 
9589af1bba4SBjoern A. Zeeb 	u8 pcie_link_speed;
959bfcc09ddSBjoern A. Zeeb 
960*a4128aadSBjoern A. Zeeb 	struct iwl_dma_ptr invalid_tx_cmd;
961*a4128aadSBjoern A. Zeeb 
962bfcc09ddSBjoern A. Zeeb 	/* pointer to trans specific struct */
963bfcc09ddSBjoern A. Zeeb 	/*Ensure that this pointer will always be aligned to sizeof pointer */
964bfcc09ddSBjoern A. Zeeb 	char trans_specific[] __aligned(sizeof(void *));
965bfcc09ddSBjoern A. Zeeb };
966bfcc09ddSBjoern A. Zeeb 
967bfcc09ddSBjoern A. Zeeb const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
968bfcc09ddSBjoern A. Zeeb int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
969bfcc09ddSBjoern A. Zeeb 
970*a4128aadSBjoern A. Zeeb void iwl_trans_configure(struct iwl_trans *trans,
971*a4128aadSBjoern A. Zeeb 			 const struct iwl_trans_config *trans_cfg);
972bfcc09ddSBjoern A. Zeeb 
973*a4128aadSBjoern A. Zeeb int iwl_trans_start_hw(struct iwl_trans *trans);
974bfcc09ddSBjoern A. Zeeb 
975*a4128aadSBjoern A. Zeeb void iwl_trans_op_mode_leave(struct iwl_trans *trans);
976bfcc09ddSBjoern A. Zeeb 
977*a4128aadSBjoern A. Zeeb void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
978bfcc09ddSBjoern A. Zeeb 
979*a4128aadSBjoern A. Zeeb int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
980*a4128aadSBjoern A. Zeeb 		       bool run_in_rfkill);
981bfcc09ddSBjoern A. Zeeb 
982*a4128aadSBjoern A. Zeeb void iwl_trans_stop_device(struct iwl_trans *trans);
983bfcc09ddSBjoern A. Zeeb 
984*a4128aadSBjoern A. Zeeb int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
985bfcc09ddSBjoern A. Zeeb 
986*a4128aadSBjoern A. Zeeb int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
987*a4128aadSBjoern A. Zeeb 			bool test, bool reset);
988bfcc09ddSBjoern A. Zeeb 
989*a4128aadSBjoern A. Zeeb struct iwl_trans_dump_data *
990bfcc09ddSBjoern A. Zeeb iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
991bfcc09ddSBjoern A. Zeeb 		    const struct iwl_dump_sanitize_ops *sanitize_ops,
992*a4128aadSBjoern A. Zeeb 		    void *sanitize_ctx);
993bfcc09ddSBjoern A. Zeeb 
994bfcc09ddSBjoern A. Zeeb static inline struct iwl_device_tx_cmd *
995bfcc09ddSBjoern A. Zeeb iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
996bfcc09ddSBjoern A. Zeeb {
997bfcc09ddSBjoern A. Zeeb 	return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
998bfcc09ddSBjoern A. Zeeb }
999bfcc09ddSBjoern A. Zeeb 
1000bfcc09ddSBjoern A. Zeeb int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1001bfcc09ddSBjoern A. Zeeb 
1002bfcc09ddSBjoern A. Zeeb static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1003bfcc09ddSBjoern A. Zeeb 					 struct iwl_device_tx_cmd *dev_cmd)
1004bfcc09ddSBjoern A. Zeeb {
1005bfcc09ddSBjoern A. Zeeb 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1006bfcc09ddSBjoern A. Zeeb }
1007bfcc09ddSBjoern A. Zeeb 
1008*a4128aadSBjoern A. Zeeb int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1009*a4128aadSBjoern A. Zeeb 		 struct iwl_device_tx_cmd *dev_cmd, int queue);
1010bfcc09ddSBjoern A. Zeeb 
1011*a4128aadSBjoern A. Zeeb void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
1012*a4128aadSBjoern A. Zeeb 		       struct sk_buff_head *skbs, bool is_flush);
1013bfcc09ddSBjoern A. Zeeb 
1014*a4128aadSBjoern A. Zeeb void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
1015bfcc09ddSBjoern A. Zeeb 
1016*a4128aadSBjoern A. Zeeb void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1017*a4128aadSBjoern A. Zeeb 			   bool configure_scd);
1018bfcc09ddSBjoern A. Zeeb 
1019*a4128aadSBjoern A. Zeeb bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1020bfcc09ddSBjoern A. Zeeb 			      const struct iwl_trans_txq_scd_cfg *cfg,
1021*a4128aadSBjoern A. Zeeb 			      unsigned int queue_wdg_timeout);
1022bfcc09ddSBjoern A. Zeeb 
1023*a4128aadSBjoern A. Zeeb int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1024*a4128aadSBjoern A. Zeeb 			       struct iwl_trans_rxq_dma_data *data);
1025bfcc09ddSBjoern A. Zeeb 
1026*a4128aadSBjoern A. Zeeb void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
1027bfcc09ddSBjoern A. Zeeb 
1028*a4128aadSBjoern A. Zeeb int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1029*a4128aadSBjoern A. Zeeb 			u8 tid, int size, unsigned int wdg_timeout);
1030bfcc09ddSBjoern A. Zeeb 
1031*a4128aadSBjoern A. Zeeb void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1032*a4128aadSBjoern A. Zeeb 				   int txq_id, bool shared_mode);
1033bfcc09ddSBjoern A. Zeeb 
1034bfcc09ddSBjoern A. Zeeb static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1035bfcc09ddSBjoern A. Zeeb 					int fifo, int sta_id, int tid,
1036bfcc09ddSBjoern A. Zeeb 					int frame_limit, u16 ssn,
1037bfcc09ddSBjoern A. Zeeb 					unsigned int queue_wdg_timeout)
1038bfcc09ddSBjoern A. Zeeb {
1039bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_txq_scd_cfg cfg = {
1040bfcc09ddSBjoern A. Zeeb 		.fifo = fifo,
1041bfcc09ddSBjoern A. Zeeb 		.sta_id = sta_id,
1042bfcc09ddSBjoern A. Zeeb 		.tid = tid,
1043bfcc09ddSBjoern A. Zeeb 		.frame_limit = frame_limit,
1044bfcc09ddSBjoern A. Zeeb 		.aggregate = sta_id >= 0,
1045bfcc09ddSBjoern A. Zeeb 	};
1046bfcc09ddSBjoern A. Zeeb 
1047bfcc09ddSBjoern A. Zeeb 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1048bfcc09ddSBjoern A. Zeeb }
1049bfcc09ddSBjoern A. Zeeb 
1050bfcc09ddSBjoern A. Zeeb static inline
1051bfcc09ddSBjoern A. Zeeb void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1052bfcc09ddSBjoern A. Zeeb 			     unsigned int queue_wdg_timeout)
1053bfcc09ddSBjoern A. Zeeb {
1054bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_txq_scd_cfg cfg = {
1055bfcc09ddSBjoern A. Zeeb 		.fifo = fifo,
1056bfcc09ddSBjoern A. Zeeb 		.sta_id = -1,
1057bfcc09ddSBjoern A. Zeeb 		.tid = IWL_MAX_TID_COUNT,
1058bfcc09ddSBjoern A. Zeeb 		.frame_limit = IWL_FRAME_LIMIT,
1059bfcc09ddSBjoern A. Zeeb 		.aggregate = false,
1060bfcc09ddSBjoern A. Zeeb 	};
1061bfcc09ddSBjoern A. Zeeb 
1062bfcc09ddSBjoern A. Zeeb 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1063bfcc09ddSBjoern A. Zeeb }
1064bfcc09ddSBjoern A. Zeeb 
1065*a4128aadSBjoern A. Zeeb void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1066*a4128aadSBjoern A. Zeeb 				unsigned long txqs, bool freeze);
1067bfcc09ddSBjoern A. Zeeb 
1068*a4128aadSBjoern A. Zeeb int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
1069bfcc09ddSBjoern A. Zeeb 
1070*a4128aadSBjoern A. Zeeb int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
1071bfcc09ddSBjoern A. Zeeb 
1072*a4128aadSBjoern A. Zeeb void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
1073bfcc09ddSBjoern A. Zeeb 
1074*a4128aadSBjoern A. Zeeb void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
1075bfcc09ddSBjoern A. Zeeb 
1076*a4128aadSBjoern A. Zeeb u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
1077bfcc09ddSBjoern A. Zeeb 
1078*a4128aadSBjoern A. Zeeb u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
1079bfcc09ddSBjoern A. Zeeb 
1080*a4128aadSBjoern A. Zeeb void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
1081bfcc09ddSBjoern A. Zeeb 
1082*a4128aadSBjoern A. Zeeb int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1083*a4128aadSBjoern A. Zeeb 		       void *buf, int dwords);
1084bfcc09ddSBjoern A. Zeeb 
1085*a4128aadSBjoern A. Zeeb int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
1086*a4128aadSBjoern A. Zeeb 			    u32 *val);
1087bfcc09ddSBjoern A. Zeeb 
1088*a4128aadSBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUGFS
1089*a4128aadSBjoern A. Zeeb void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
1090*a4128aadSBjoern A. Zeeb #endif
1091bfcc09ddSBjoern A. Zeeb 
1092bfcc09ddSBjoern A. Zeeb #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1093bfcc09ddSBjoern A. Zeeb 	do {								      \
1094bfcc09ddSBjoern A. Zeeb 		if (__builtin_constant_p(bufsize))			      \
1095bfcc09ddSBjoern A. Zeeb 			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1096bfcc09ddSBjoern A. Zeeb 		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1097bfcc09ddSBjoern A. Zeeb 	} while (0)
1098bfcc09ddSBjoern A. Zeeb 
1099*a4128aadSBjoern A. Zeeb int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
1100*a4128aadSBjoern A. Zeeb 			    u64 src_addr, u32 byte_cnt);
1101d9836fb4SBjoern A. Zeeb 
1102bfcc09ddSBjoern A. Zeeb static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1103bfcc09ddSBjoern A. Zeeb {
1104bfcc09ddSBjoern A. Zeeb 	u32 value;
1105bfcc09ddSBjoern A. Zeeb 
1106*a4128aadSBjoern A. Zeeb 	if (iwl_trans_read_mem(trans, addr, &value, 1))
1107bfcc09ddSBjoern A. Zeeb 		return 0xa5a5a5a5;
1108bfcc09ddSBjoern A. Zeeb 
1109bfcc09ddSBjoern A. Zeeb 	return value;
1110bfcc09ddSBjoern A. Zeeb }
1111bfcc09ddSBjoern A. Zeeb 
1112*a4128aadSBjoern A. Zeeb int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1113*a4128aadSBjoern A. Zeeb 			const void *buf, int dwords);
1114bfcc09ddSBjoern A. Zeeb 
1115bfcc09ddSBjoern A. Zeeb static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1116bfcc09ddSBjoern A. Zeeb 					u32 val)
1117bfcc09ddSBjoern A. Zeeb {
1118bfcc09ddSBjoern A. Zeeb 	return iwl_trans_write_mem(trans, addr, &val, 1);
1119bfcc09ddSBjoern A. Zeeb }
1120bfcc09ddSBjoern A. Zeeb 
1121*a4128aadSBjoern A. Zeeb void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
1122bfcc09ddSBjoern A. Zeeb 
1123*a4128aadSBjoern A. Zeeb int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership);
1124bfcc09ddSBjoern A. Zeeb 
1125*a4128aadSBjoern A. Zeeb void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
1126*a4128aadSBjoern A. Zeeb 			     u32 mask, u32 value);
1127*a4128aadSBjoern A. Zeeb 
1128*a4128aadSBjoern A. Zeeb bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
1129bfcc09ddSBjoern A. Zeeb 
1130bfcc09ddSBjoern A. Zeeb #define iwl_trans_grab_nic_access(trans)		\
1131bfcc09ddSBjoern A. Zeeb 	__cond_lock(nic_access,				\
1132*a4128aadSBjoern A. Zeeb 		    likely(_iwl_trans_grab_nic_access(trans)))
1133bfcc09ddSBjoern A. Zeeb 
1134*a4128aadSBjoern A. Zeeb void __releases(nic_access)
1135*a4128aadSBjoern A. Zeeb iwl_trans_release_nic_access(struct iwl_trans *trans);
1136bfcc09ddSBjoern A. Zeeb 
1137bfcc09ddSBjoern A. Zeeb static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1138bfcc09ddSBjoern A. Zeeb {
1139bfcc09ddSBjoern A. Zeeb 	if (WARN_ON_ONCE(!trans->op_mode))
1140bfcc09ddSBjoern A. Zeeb 		return;
1141bfcc09ddSBjoern A. Zeeb 
1142bfcc09ddSBjoern A. Zeeb 	/* prevent double restarts due to the same erroneous FW */
1143bfcc09ddSBjoern A. Zeeb 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1144bfcc09ddSBjoern A. Zeeb 		trans->state = IWL_TRANS_NO_FW;
1145*a4128aadSBjoern A. Zeeb 		iwl_op_mode_nic_error(trans->op_mode, sync);
1146bfcc09ddSBjoern A. Zeeb 	}
1147bfcc09ddSBjoern A. Zeeb }
1148bfcc09ddSBjoern A. Zeeb 
1149bfcc09ddSBjoern A. Zeeb static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1150bfcc09ddSBjoern A. Zeeb {
1151bfcc09ddSBjoern A. Zeeb 	return trans->state == IWL_TRANS_FW_ALIVE;
1152bfcc09ddSBjoern A. Zeeb }
1153bfcc09ddSBjoern A. Zeeb 
1154*a4128aadSBjoern A. Zeeb void iwl_trans_sync_nmi(struct iwl_trans *trans);
1155bfcc09ddSBjoern A. Zeeb 
1156bfcc09ddSBjoern A. Zeeb void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1157bfcc09ddSBjoern A. Zeeb 				  u32 sw_err_bit);
1158bfcc09ddSBjoern A. Zeeb 
1159*a4128aadSBjoern A. Zeeb int iwl_trans_load_pnvm(struct iwl_trans *trans,
11609af1bba4SBjoern A. Zeeb 			const struct iwl_pnvm_image *pnvm_data,
1161*a4128aadSBjoern A. Zeeb 			const struct iwl_ucode_capabilities *capa);
1162bfcc09ddSBjoern A. Zeeb 
1163*a4128aadSBjoern A. Zeeb void iwl_trans_set_pnvm(struct iwl_trans *trans,
1164*a4128aadSBjoern A. Zeeb 			const struct iwl_ucode_capabilities *capa);
1165bfcc09ddSBjoern A. Zeeb 
1166*a4128aadSBjoern A. Zeeb int iwl_trans_load_reduce_power(struct iwl_trans *trans,
11679af1bba4SBjoern A. Zeeb 				const struct iwl_pnvm_image *payloads,
1168*a4128aadSBjoern A. Zeeb 				const struct iwl_ucode_capabilities *capa);
11699af1bba4SBjoern A. Zeeb 
1170*a4128aadSBjoern A. Zeeb void iwl_trans_set_reduce_power(struct iwl_trans *trans,
1171*a4128aadSBjoern A. Zeeb 				const struct iwl_ucode_capabilities *capa);
1172bfcc09ddSBjoern A. Zeeb 
1173bfcc09ddSBjoern A. Zeeb static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1174bfcc09ddSBjoern A. Zeeb {
1175bfcc09ddSBjoern A. Zeeb 	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1176bfcc09ddSBjoern A. Zeeb 		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1177bfcc09ddSBjoern A. Zeeb }
1178bfcc09ddSBjoern A. Zeeb 
1179*a4128aadSBjoern A. Zeeb void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
1180bfcc09ddSBjoern A. Zeeb 
1181bfcc09ddSBjoern A. Zeeb /*****************************************************
1182bfcc09ddSBjoern A. Zeeb  * transport helper functions
1183bfcc09ddSBjoern A. Zeeb  *****************************************************/
1184bfcc09ddSBjoern A. Zeeb struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1185bfcc09ddSBjoern A. Zeeb 			  struct device *dev,
1186bfcc09ddSBjoern A. Zeeb 			  const struct iwl_cfg_trans_params *cfg_trans);
1187bfcc09ddSBjoern A. Zeeb int iwl_trans_init(struct iwl_trans *trans);
1188bfcc09ddSBjoern A. Zeeb void iwl_trans_free(struct iwl_trans *trans);
1189bfcc09ddSBjoern A. Zeeb 
11909af1bba4SBjoern A. Zeeb static inline bool iwl_trans_is_hw_error_value(u32 val)
11919af1bba4SBjoern A. Zeeb {
11929af1bba4SBjoern A. Zeeb 	return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
11939af1bba4SBjoern A. Zeeb }
11949af1bba4SBjoern A. Zeeb 
1195bfcc09ddSBjoern A. Zeeb /*****************************************************
1196*a4128aadSBjoern A. Zeeb  * PCIe handling
1197*a4128aadSBjoern A. Zeeb  *****************************************************/
1198bfcc09ddSBjoern A. Zeeb int __must_check iwl_pci_register_driver(void);
1199bfcc09ddSBjoern A. Zeeb void iwl_pci_unregister_driver(void);
12009af1bba4SBjoern A. Zeeb void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
1201bfcc09ddSBjoern A. Zeeb 
1202*a4128aadSBjoern A. Zeeb int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
1203*a4128aadSBjoern A. Zeeb 			     struct iwl_host_cmd *cmd);
1204*a4128aadSBjoern A. Zeeb 
1205bfcc09ddSBjoern A. Zeeb #endif /* __iwl_trans_h__ */
1206