xref: /linux/drivers/net/wireless/intel/iwlwifi/iwl-trans.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #ifndef __iwl_trans_h__
68 #define __iwl_trans_h__
69 
70 #include <linux/ieee80211.h>
71 #include <linux/mm.h> /* for page_address */
72 #include <linux/lockdep.h>
73 #include <linux/kernel.h>
74 
75 #include "iwl-debug.h"
76 #include "iwl-config.h"
77 #include "iwl-fw.h"
78 #include "iwl-op-mode.h"
79 
80 /**
81  * DOC: Transport layer - what is it ?
82  *
83  * The transport layer is the layer that deals with the HW directly. It provides
84  * an abstraction of the underlying HW to the upper layer. The transport layer
85  * doesn't provide any policy, algorithm or anything of this kind, but only
86  * mechanisms to make the HW do something. It is not completely stateless but
87  * close to it.
88  * We will have an implementation for each different supported bus.
89  */
90 
91 /**
92  * DOC: Life cycle of the transport layer
93  *
94  * The transport layer has a very precise life cycle.
95  *
96  *	1) A helper function is called during the module initialization and
97  *	   registers the bus driver's ops with the transport's alloc function.
98  *	2) Bus's probe calls to the transport layer's allocation functions.
99  *	   Of course this function is bus specific.
100  *	3) This allocation functions will spawn the upper layer which will
101  *	   register mac80211.
102  *
103  *	4) At some point (i.e. mac80211's start call), the op_mode will call
104  *	   the following sequence:
105  *	   start_hw
106  *	   start_fw
107  *
108  *	5) Then when finished (or reset):
109  *	   stop_device
110  *
111  *	6) Eventually, the free function will be called.
112  */
113 
114 /**
115  * DOC: Host command section
116  *
117  * A host command is a command issued by the upper layer to the fw. There are
118  * several versions of fw that have several APIs. The transport layer is
119  * completely agnostic to these differences.
120  * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
121  */
122 #define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
123 #define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
124 #define SEQ_TO_INDEX(s)	((s) & 0xff)
125 #define INDEX_TO_SEQ(i)	((i) & 0xff)
126 #define SEQ_RX_FRAME	cpu_to_le16(0x8000)
127 
128 /*
129  * those functions retrieve specific information from
130  * the id field in the iwl_host_cmd struct which contains
131  * the command id, the group id and the version of the command
132  * and vice versa
133 */
134 static inline u8 iwl_cmd_opcode(u32 cmdid)
135 {
136 	return cmdid & 0xFF;
137 }
138 
139 static inline u8 iwl_cmd_groupid(u32 cmdid)
140 {
141 	return ((cmdid & 0xFF00) >> 8);
142 }
143 
144 static inline u8 iwl_cmd_version(u32 cmdid)
145 {
146 	return ((cmdid & 0xFF0000) >> 16);
147 }
148 
149 static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
150 {
151 	return opcode + (groupid << 8) + (version << 16);
152 }
153 
154 /* make u16 wide id out of u8 group and opcode */
155 #define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
156 #define DEF_ID(opcode) ((1 << 8) | (opcode))
157 
158 /* due to the conversion, this group is special; new groups
159  * should be defined in the appropriate fw-api header files
160  */
161 #define IWL_ALWAYS_LONG_GROUP	1
162 
163 /**
164  * struct iwl_cmd_header
165  *
166  * This header format appears in the beginning of each command sent from the
167  * driver, and each response/notification received from uCode.
168  */
169 struct iwl_cmd_header {
170 	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
171 	u8 group_id;
172 	/*
173 	 * The driver sets up the sequence number to values of its choosing.
174 	 * uCode does not use this value, but passes it back to the driver
175 	 * when sending the response to each driver-originated command, so
176 	 * the driver can match the response to the command.  Since the values
177 	 * don't get used by uCode, the driver may set up an arbitrary format.
178 	 *
179 	 * There is one exception:  uCode sets bit 15 when it originates
180 	 * the response/notification, i.e. when the response/notification
181 	 * is not a direct response to a command sent by the driver.  For
182 	 * example, uCode issues REPLY_RX when it sends a received frame
183 	 * to the driver; it is not a direct response to any driver command.
184 	 *
185 	 * The Linux driver uses the following format:
186 	 *
187 	 *  0:7		tfd index - position within TX queue
188 	 *  8:12	TX queue id
189 	 *  13:14	reserved
190 	 *  15		unsolicited RX or uCode-originated notification
191 	 */
192 	__le16 sequence;
193 } __packed;
194 
195 /**
196  * struct iwl_cmd_header_wide
197  *
198  * This header format appears in the beginning of each command sent from the
199  * driver, and each response/notification received from uCode.
200  * this is the wide version that contains more information about the command
201  * like length, version and command type
202  */
203 struct iwl_cmd_header_wide {
204 	u8 cmd;
205 	u8 group_id;
206 	__le16 sequence;
207 	__le16 length;
208 	u8 reserved;
209 	u8 version;
210 } __packed;
211 
212 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
213 #define FH_RSCSR_FRAME_INVALID		0x55550000
214 #define FH_RSCSR_FRAME_ALIGN		0x40
215 #define FH_RSCSR_RPA_EN			BIT(25)
216 #define FH_RSCSR_RXQ_POS		16
217 #define FH_RSCSR_RXQ_MASK		0x3F0000
218 
219 struct iwl_rx_packet {
220 	/*
221 	 * The first 4 bytes of the RX frame header contain both the RX frame
222 	 * size and some flags.
223 	 * Bit fields:
224 	 * 31:    flag flush RB request
225 	 * 30:    flag ignore TC (terminal counter) request
226 	 * 29:    flag fast IRQ request
227 	 * 28-26: Reserved
228 	 * 25:    Offload enabled
229 	 * 24:    RPF enabled
230 	 * 23:    RSS enabled
231 	 * 22:    Checksum enabled
232 	 * 21-16: RX queue
233 	 * 15-14: Reserved
234 	 * 13-00: RX frame size
235 	 */
236 	__le32 len_n_flags;
237 	struct iwl_cmd_header hdr;
238 	u8 data[];
239 } __packed;
240 
241 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
242 {
243 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
244 }
245 
246 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
247 {
248 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
249 }
250 
251 /**
252  * enum CMD_MODE - how to send the host commands ?
253  *
254  * @CMD_ASYNC: Return right away and don't wait for the response
255  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
256  *	the response. The caller needs to call iwl_free_resp when done.
257  * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
258  *	command queue, but after other high priority commands. Valid only
259  *	with CMD_ASYNC.
260  * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
261  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
262  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
263  *	(i.e. mark it as non-idle).
264  * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
265  *	called after this command completes. Valid only with CMD_ASYNC.
266  */
267 enum CMD_MODE {
268 	CMD_ASYNC		= BIT(0),
269 	CMD_WANT_SKB		= BIT(1),
270 	CMD_SEND_IN_RFKILL	= BIT(2),
271 	CMD_HIGH_PRIO		= BIT(3),
272 	CMD_SEND_IN_IDLE	= BIT(4),
273 	CMD_MAKE_TRANS_IDLE	= BIT(5),
274 	CMD_WAKE_UP_TRANS	= BIT(6),
275 	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
276 };
277 
278 #define DEF_CMD_PAYLOAD_SIZE 320
279 
280 /**
281  * struct iwl_device_cmd
282  *
283  * For allocation of the command and tx queues, this establishes the overall
284  * size of the largest command we send to uCode, except for commands that
285  * aren't fully copied and use other TFD space.
286  */
287 struct iwl_device_cmd {
288 	union {
289 		struct {
290 			struct iwl_cmd_header hdr;	/* uCode API */
291 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
292 		};
293 		struct {
294 			struct iwl_cmd_header_wide hdr_wide;
295 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
296 					sizeof(struct iwl_cmd_header_wide) +
297 					sizeof(struct iwl_cmd_header)];
298 		};
299 	};
300 } __packed;
301 
302 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
303 
304 /*
305  * number of transfer buffers (fragments) per transmit frame descriptor;
306  * this is just the driver's idea, the hardware supports 20
307  */
308 #define IWL_MAX_CMD_TBS_PER_TFD	2
309 
310 /**
311  * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
312  *
313  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
314  *	ring. The transport layer doesn't map the command's buffer to DMA, but
315  *	rather copies it to a previously allocated DMA buffer. This flag tells
316  *	the transport layer not to copy the command, but to map the existing
317  *	buffer (that is passed in) instead. This saves the memcpy and allows
318  *	commands that are bigger than the fixed buffer to be submitted.
319  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
320  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
321  *	chunk internally and free it again after the command completes. This
322  *	can (currently) be used only once per command.
323  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
324  */
325 enum iwl_hcmd_dataflag {
326 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
327 	IWL_HCMD_DFL_DUP	= BIT(1),
328 };
329 
330 /**
331  * struct iwl_host_cmd - Host command to the uCode
332  *
333  * @data: array of chunks that composes the data of the host command
334  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
335  * @_rx_page_order: (internally used to free response packet)
336  * @_rx_page_addr: (internally used to free response packet)
337  * @flags: can be CMD_*
338  * @len: array of the lengths of the chunks in data
339  * @dataflags: IWL_HCMD_DFL_*
340  * @id: command id of the host command, for wide commands encoding the
341  *	version and group as well
342  */
343 struct iwl_host_cmd {
344 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
345 	struct iwl_rx_packet *resp_pkt;
346 	unsigned long _rx_page_addr;
347 	u32 _rx_page_order;
348 
349 	u32 flags;
350 	u32 id;
351 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
352 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
353 };
354 
355 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
356 {
357 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
358 }
359 
360 struct iwl_rx_cmd_buffer {
361 	struct page *_page;
362 	int _offset;
363 	bool _page_stolen;
364 	u32 _rx_page_order;
365 	unsigned int truesize;
366 };
367 
368 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
369 {
370 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
371 }
372 
373 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
374 {
375 	return r->_offset;
376 }
377 
378 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
379 {
380 	r->_page_stolen = true;
381 	get_page(r->_page);
382 	return r->_page;
383 }
384 
385 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
386 {
387 	__free_pages(r->_page, r->_rx_page_order);
388 }
389 
390 #define MAX_NO_RECLAIM_CMDS	6
391 
392 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
393 
394 /*
395  * Maximum number of HW queues the transport layer
396  * currently supports
397  */
398 #define IWL_MAX_HW_QUEUES		32
399 #define IWL_MAX_TVQM_QUEUES		512
400 
401 #define IWL_MAX_TID_COUNT	8
402 #define IWL_MGMT_TID		15
403 #define IWL_FRAME_LIMIT	64
404 #define IWL_MAX_RX_HW_QUEUES	16
405 
406 /**
407  * enum iwl_wowlan_status - WoWLAN image/device status
408  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
409  * @IWL_D3_STATUS_RESET: device was reset while suspended
410  */
411 enum iwl_d3_status {
412 	IWL_D3_STATUS_ALIVE,
413 	IWL_D3_STATUS_RESET,
414 };
415 
416 /**
417  * enum iwl_trans_status: transport status flags
418  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
419  * @STATUS_DEVICE_ENABLED: APM is enabled
420  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
421  * @STATUS_INT_ENABLED: interrupts are enabled
422  * @STATUS_RFKILL: the HW RFkill switch is in KILL position
423  * @STATUS_FW_ERROR: the fw is in error state
424  * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
425  *	are sent
426  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
427  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
428  */
429 enum iwl_trans_status {
430 	STATUS_SYNC_HCMD_ACTIVE,
431 	STATUS_DEVICE_ENABLED,
432 	STATUS_TPOWER_PMI,
433 	STATUS_INT_ENABLED,
434 	STATUS_RFKILL,
435 	STATUS_FW_ERROR,
436 	STATUS_TRANS_GOING_IDLE,
437 	STATUS_TRANS_IDLE,
438 	STATUS_TRANS_DEAD,
439 };
440 
441 static inline int
442 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
443 {
444 	switch (rb_size) {
445 	case IWL_AMSDU_4K:
446 		return get_order(4 * 1024);
447 	case IWL_AMSDU_8K:
448 		return get_order(8 * 1024);
449 	case IWL_AMSDU_12K:
450 		return get_order(12 * 1024);
451 	default:
452 		WARN_ON(1);
453 		return -1;
454 	}
455 }
456 
457 struct iwl_hcmd_names {
458 	u8 cmd_id;
459 	const char *const cmd_name;
460 };
461 
462 #define HCMD_NAME(x)	\
463 	{ .cmd_id = x, .cmd_name = #x }
464 
465 struct iwl_hcmd_arr {
466 	const struct iwl_hcmd_names *arr;
467 	int size;
468 };
469 
470 #define HCMD_ARR(x)	\
471 	{ .arr = x, .size = ARRAY_SIZE(x) }
472 
473 /**
474  * struct iwl_trans_config - transport configuration
475  *
476  * @op_mode: pointer to the upper layer.
477  * @cmd_queue: the index of the command queue.
478  *	Must be set before start_fw.
479  * @cmd_fifo: the fifo for host commands
480  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
481  * @no_reclaim_cmds: Some devices erroneously don't set the
482  *	SEQ_RX_FRAME bit on some notifications, this is the
483  *	list of such notifications to filter. Max length is
484  *	%MAX_NO_RECLAIM_CMDS.
485  * @n_no_reclaim_cmds: # of commands in list
486  * @rx_buf_size: RX buffer size needed for A-MSDUs
487  *	if unset 4k will be the RX buffer size
488  * @bc_table_dword: set to true if the BC table expects the byte count to be
489  *	in DWORD (as opposed to bytes)
490  * @scd_set_active: should the transport configure the SCD for HCMD queue
491  * @sw_csum_tx: transport should compute the TCP checksum
492  * @command_groups: array of command groups, each member is an array of the
493  *	commands in the group; for debugging only
494  * @command_groups_size: number of command groups, to avoid illegal access
495  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
496  *	we get the ALIVE from the uCode
497  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
498  *	space for at least two pointers
499  */
500 struct iwl_trans_config {
501 	struct iwl_op_mode *op_mode;
502 
503 	u8 cmd_queue;
504 	u8 cmd_fifo;
505 	unsigned int cmd_q_wdg_timeout;
506 	const u8 *no_reclaim_cmds;
507 	unsigned int n_no_reclaim_cmds;
508 
509 	enum iwl_amsdu_size rx_buf_size;
510 	bool bc_table_dword;
511 	bool scd_set_active;
512 	bool sw_csum_tx;
513 	const struct iwl_hcmd_arr *command_groups;
514 	int command_groups_size;
515 
516 	u32 sdio_adma_addr;
517 
518 	u8 cb_data_offs;
519 };
520 
521 struct iwl_trans_dump_data {
522 	u32 len;
523 	u8 data[];
524 };
525 
526 struct iwl_trans;
527 
528 struct iwl_trans_txq_scd_cfg {
529 	u8 fifo;
530 	u8 sta_id;
531 	u8 tid;
532 	bool aggregate;
533 	int frame_limit;
534 };
535 
536 /* Available options for &struct iwl_tx_queue_cfg_cmd */
537 enum iwl_tx_queue_cfg_actions {
538 	TX_QUEUE_CFG_ENABLE_QUEUE		= BIT(0),
539 	TX_QUEUE_CFG_TFD_SHORT_FORMAT		= BIT(1),
540 };
541 
542 /**
543  * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
544  * @sta_id: station id
545  * @tid: tid of the queue
546  * @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
547  * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
548  *	Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
549  * @byte_cnt_addr: address of byte count table
550  * @tfdq_addr: address of TFD circular buffer
551  */
552 struct iwl_tx_queue_cfg_cmd {
553 	u8 sta_id;
554 	u8 tid;
555 	__le16 flags;
556 	__le32 cb_size;
557 	__le64 byte_cnt_addr;
558 	__le64 tfdq_addr;
559 } __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
560 
561 /**
562  * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
563  * @queue_number: queue number assigned to this RA -TID
564  * @flags: set on failure
565  * @write_pointer: initial value for write pointer
566  */
567 struct iwl_tx_queue_cfg_rsp {
568 	__le16 queue_number;
569 	__le16 flags;
570 	__le16 write_pointer;
571 	__le16 reserved;
572 } __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
573 
574 /**
575  * struct iwl_trans_ops - transport specific operations
576  *
577  * All the handlers MUST be implemented
578  *
579  * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
580  *	out of a low power state. From that point on, the HW can send
581  *	interrupts. May sleep.
582  * @op_mode_leave: Turn off the HW RF kill indication if on
583  *	May sleep
584  * @start_fw: allocates and inits all the resources for the transport
585  *	layer. Also kick a fw image.
586  *	May sleep
587  * @fw_alive: called when the fw sends alive notification. If the fw provides
588  *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
589  *	May sleep
590  * @stop_device: stops the whole device (embedded CPU put to reset) and stops
591  *	the HW. If low_power is true, the NIC will be put in low power state.
592  *	From that point on, the HW will be stopped but will still issue an
593  *	interrupt if the HW RF kill switch is triggered.
594  *	This callback must do the right thing and not crash even if %start_hw()
595  *	was called but not &start_fw(). May sleep.
596  * @d3_suspend: put the device into the correct mode for WoWLAN during
597  *	suspend. This is optional, if not implemented WoWLAN will not be
598  *	supported. This callback may sleep.
599  * @d3_resume: resume the device after WoWLAN, enabling the opmode to
600  *	talk to the WoWLAN image to get its status. This is optional, if not
601  *	implemented WoWLAN will not be supported. This callback may sleep.
602  * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
603  *	If RFkill is asserted in the middle of a SYNC host command, it must
604  *	return -ERFKILL straight away.
605  *	May sleep only if CMD_ASYNC is not set
606  * @tx: send an skb. The transport relies on the op_mode to zero the
607  *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
608  *	the CSUM will be taken care of (TCP CSUM and IP header in case of
609  *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
610  *	header if it is IPv4.
611  *	Must be atomic
612  * @reclaim: free packet until ssn. Returns a list of freed packets.
613  *	Must be atomic
614  * @txq_enable: setup a queue. To setup an AC queue, use the
615  *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
616  *	this one. The op_mode must not configure the HCMD queue. The scheduler
617  *	configuration may be %NULL, in which case the hardware will not be
618  *	configured. May sleep.
619  * @txq_disable: de-configure a Tx queue to send AMPDUs
620  *	Must be atomic
621  * @txq_set_shared_mode: change Tx queue shared/unshared marking
622  * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
623  * @freeze_txq_timer: prevents the timer of the queue from firing until the
624  *	queue is set to awake. Must be atomic.
625  * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
626  *	that the transport needs to refcount the calls since this function
627  *	will be called several times with block = true, and then the queues
628  *	need to be unblocked only after the same number of calls with
629  *	block = false.
630  * @write8: write a u8 to a register at offset ofs from the BAR
631  * @write32: write a u32 to a register at offset ofs from the BAR
632  * @read32: read a u32 register at offset ofs from the BAR
633  * @read_prph: read a DWORD from a periphery register
634  * @write_prph: write a DWORD to a periphery register
635  * @read_mem: read device's SRAM in DWORD
636  * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
637  *	will be zeroed.
638  * @configure: configure parameters required by the transport layer from
639  *	the op_mode. May be called several times before start_fw, can't be
640  *	called after that.
641  * @set_pmi: set the power pmi state
642  * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
643  *	Sleeping is not allowed between grab_nic_access and
644  *	release_nic_access.
645  * @release_nic_access: let the NIC go to sleep. The "flags" parameter
646  *	must be the same one that was sent before to the grab_nic_access.
647  * @set_bits_mask - set SRAM register according to value and mask.
648  * @ref: grab a reference to the transport/FW layers, disallowing
649  *	certain low power states
650  * @unref: release a reference previously taken with @ref. Note that
651  *	initially the reference count is 1, making an initial @unref
652  *	necessary to allow low power states.
653  * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
654  *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
655  *	Note that the transport must fill in the proper file headers.
656  */
657 struct iwl_trans_ops {
658 
659 	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
660 	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
661 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
662 			bool run_in_rfkill);
663 	int (*update_sf)(struct iwl_trans *trans,
664 			 struct iwl_sf_region *st_fwrd_space);
665 	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
666 	void (*stop_device)(struct iwl_trans *trans, bool low_power);
667 
668 	void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
669 	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
670 			 bool test, bool reset);
671 
672 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
673 
674 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
675 		  struct iwl_device_cmd *dev_cmd, int queue);
676 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
677 			struct sk_buff_head *skbs);
678 
679 	void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
680 			   const struct iwl_trans_txq_scd_cfg *cfg,
681 			   unsigned int queue_wdg_timeout);
682 	void (*txq_disable)(struct iwl_trans *trans, int queue,
683 			    bool configure_scd);
684 	/* a000 functions */
685 	int (*txq_alloc)(struct iwl_trans *trans,
686 			 struct iwl_tx_queue_cfg_cmd *cmd,
687 			 int cmd_id,
688 			 unsigned int queue_wdg_timeout);
689 	void (*txq_free)(struct iwl_trans *trans, int queue);
690 
691 	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
692 				    bool shared);
693 
694 	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
695 	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
696 				 bool freeze);
697 	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
698 
699 	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
700 	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
701 	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
702 	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
703 	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
704 	int (*read_mem)(struct iwl_trans *trans, u32 addr,
705 			void *buf, int dwords);
706 	int (*write_mem)(struct iwl_trans *trans, u32 addr,
707 			 const void *buf, int dwords);
708 	void (*configure)(struct iwl_trans *trans,
709 			  const struct iwl_trans_config *trans_cfg);
710 	void (*set_pmi)(struct iwl_trans *trans, bool state);
711 	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
712 	void (*release_nic_access)(struct iwl_trans *trans,
713 				   unsigned long *flags);
714 	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
715 			      u32 value);
716 	void (*ref)(struct iwl_trans *trans);
717 	void (*unref)(struct iwl_trans *trans);
718 	int  (*suspend)(struct iwl_trans *trans);
719 	void (*resume)(struct iwl_trans *trans);
720 
721 	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
722 						 const struct iwl_fw_dbg_trigger_tlv
723 						 *trigger);
724 };
725 
726 /**
727  * enum iwl_trans_state - state of the transport layer
728  *
729  * @IWL_TRANS_NO_FW: no fw has sent an alive response
730  * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
731  */
732 enum iwl_trans_state {
733 	IWL_TRANS_NO_FW = 0,
734 	IWL_TRANS_FW_ALIVE	= 1,
735 };
736 
737 /**
738  * DOC: Platform power management
739  *
740  * There are two types of platform power management: system-wide
741  * (WoWLAN) and runtime.
742  *
743  * In system-wide power management the entire platform goes into a low
744  * power state (e.g. idle or suspend to RAM) at the same time and the
745  * device is configured as a wakeup source for the entire platform.
746  * This is usually triggered by userspace activity (e.g. the user
747  * presses the suspend button or a power management daemon decides to
748  * put the platform in low power mode).  The device's behavior in this
749  * mode is dictated by the wake-on-WLAN configuration.
750  *
751  * In runtime power management, only the devices which are themselves
752  * idle enter a low power state.  This is done at runtime, which means
753  * that the entire system is still running normally.  This mode is
754  * usually triggered automatically by the device driver and requires
755  * the ability to enter and exit the low power modes in a very short
756  * time, so there is not much impact in usability.
757  *
758  * The terms used for the device's behavior are as follows:
759  *
760  *	- D0: the device is fully powered and the host is awake;
761  *	- D3: the device is in low power mode and only reacts to
762  *		specific events (e.g. magic-packet received or scan
763  *		results found);
764  *	- D0I3: the device is in low power mode and reacts to any
765  *		activity (e.g. RX);
766  *
767  * These terms reflect the power modes in the firmware and are not to
768  * be confused with the physical device power state.  The NIC can be
769  * in D0I3 mode even if, for instance, the PCI device is in D3 state.
770  */
771 
772 /**
773  * enum iwl_plat_pm_mode - platform power management mode
774  *
775  * This enumeration describes the device's platform power management
776  * behavior when in idle mode (i.e. runtime power management) or when
777  * in system-wide suspend (i.e WoWLAN).
778  *
779  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
780  *	device.  At runtime, this means that nothing happens and the
781  *	device always remains in active.  In system-wide suspend mode,
782  *	it means that the all connections will be closed automatically
783  *	by mac80211 before the platform is suspended.
784  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
785  *	For runtime power management, this mode is not officially
786  *	supported.
787  * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
788  */
789 enum iwl_plat_pm_mode {
790 	IWL_PLAT_PM_MODE_DISABLED,
791 	IWL_PLAT_PM_MODE_D3,
792 	IWL_PLAT_PM_MODE_D0I3,
793 };
794 
795 /* Max time to wait for trans to become idle/non-idle on d0i3
796  * enter/exit (in msecs).
797  */
798 #define IWL_TRANS_IDLE_TIMEOUT 2000
799 
800 /**
801  * struct iwl_trans - transport common data
802  *
803  * @ops - pointer to iwl_trans_ops
804  * @op_mode - pointer to the op_mode
805  * @cfg - pointer to the configuration
806  * @drv - pointer to iwl_drv
807  * @status: a bit-mask of transport status flags
808  * @dev - pointer to struct device * that represents the device
809  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
810  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
811  * @hw_rf_id a u32 with the device RF ID
812  * @hw_id: a u32 with the ID of the device / sub-device.
813  *	Set during transport allocation.
814  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
815  * @pm_support: set to true in start_hw if link pm is supported
816  * @ltr_enabled: set to true if the LTR is enabled
817  * @wide_cmd_header: true when ucode supports wide command header format
818  * @num_rx_queues: number of RX queues allocated by the transport;
819  *	the transport must set this before calling iwl_drv_start()
820  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
821  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
822  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
823  *	starting the firmware, used for tracing
824  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
825  *	start of the 802.11 header in the @rx_mpdu_cmd
826  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
827  * @dbg_dest_tlv: points to the destination TLV for debug
828  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
829  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
830  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
831  * @paging_req_addr: The location were the FW will upload / download the pages
832  *	from. The address is set by the opmode
833  * @paging_db: Pointer to the opmode paging data base, the pointer is set by
834  *	the opmode.
835  * @paging_download_buf: Buffer used for copying all of the pages before
836  *	downloading them to the FW. The buffer is allocated in the opmode
837  * @system_pm_mode: the system-wide power management mode in use.
838  *	This mode is set dynamically, depending on the WoWLAN values
839  *	configured from the userspace at runtime.
840  * @runtime_pm_mode: the runtime power management mode in use.  This
841  *	mode is set during the initialization phase and is not
842  *	supposed to change during runtime.
843  */
844 struct iwl_trans {
845 	const struct iwl_trans_ops *ops;
846 	struct iwl_op_mode *op_mode;
847 	const struct iwl_cfg *cfg;
848 	struct iwl_drv *drv;
849 	enum iwl_trans_state state;
850 	unsigned long status;
851 
852 	struct device *dev;
853 	u32 max_skb_frags;
854 	u32 hw_rev;
855 	u32 hw_rf_id;
856 	u32 hw_id;
857 	char hw_id_str[52];
858 
859 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
860 
861 	bool pm_support;
862 	bool ltr_enabled;
863 
864 	const struct iwl_hcmd_arr *command_groups;
865 	int command_groups_size;
866 	bool wide_cmd_header;
867 
868 	u8 num_rx_queues;
869 
870 	/* The following fields are internal only */
871 	struct kmem_cache *dev_cmd_pool;
872 	char dev_cmd_pool_name[50];
873 
874 	struct dentry *dbgfs_dir;
875 
876 #ifdef CONFIG_LOCKDEP
877 	struct lockdep_map sync_cmd_lockdep_map;
878 #endif
879 
880 	u64 dflt_pwr_limit;
881 
882 	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
883 	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
884 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
885 	u8 dbg_dest_reg_num;
886 
887 	/*
888 	 * Paging parameters - All of the parameters should be set by the
889 	 * opmode when paging is enabled
890 	 */
891 	u32 paging_req_addr;
892 	struct iwl_fw_paging *paging_db;
893 	void *paging_download_buf;
894 
895 	enum iwl_plat_pm_mode system_pm_mode;
896 	enum iwl_plat_pm_mode runtime_pm_mode;
897 	bool suspending;
898 
899 	/* pointer to trans specific struct */
900 	/*Ensure that this pointer will always be aligned to sizeof pointer */
901 	char trans_specific[0] __aligned(sizeof(void *));
902 };
903 
904 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
905 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
906 
907 static inline void iwl_trans_configure(struct iwl_trans *trans,
908 				       const struct iwl_trans_config *trans_cfg)
909 {
910 	trans->op_mode = trans_cfg->op_mode;
911 
912 	trans->ops->configure(trans, trans_cfg);
913 	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
914 }
915 
916 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
917 {
918 	might_sleep();
919 
920 	return trans->ops->start_hw(trans, low_power);
921 }
922 
923 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
924 {
925 	return trans->ops->start_hw(trans, true);
926 }
927 
928 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
929 {
930 	might_sleep();
931 
932 	if (trans->ops->op_mode_leave)
933 		trans->ops->op_mode_leave(trans);
934 
935 	trans->op_mode = NULL;
936 
937 	trans->state = IWL_TRANS_NO_FW;
938 }
939 
940 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
941 {
942 	might_sleep();
943 
944 	trans->state = IWL_TRANS_FW_ALIVE;
945 
946 	trans->ops->fw_alive(trans, scd_addr);
947 }
948 
949 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
950 				     const struct fw_img *fw,
951 				     bool run_in_rfkill)
952 {
953 	might_sleep();
954 
955 	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
956 
957 	clear_bit(STATUS_FW_ERROR, &trans->status);
958 	return trans->ops->start_fw(trans, fw, run_in_rfkill);
959 }
960 
961 static inline int iwl_trans_update_sf(struct iwl_trans *trans,
962 				      struct iwl_sf_region *st_fwrd_space)
963 {
964 	might_sleep();
965 
966 	if (trans->ops->update_sf)
967 		return trans->ops->update_sf(trans, st_fwrd_space);
968 
969 	return 0;
970 }
971 
972 static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
973 					  bool low_power)
974 {
975 	might_sleep();
976 
977 	trans->ops->stop_device(trans, low_power);
978 
979 	trans->state = IWL_TRANS_NO_FW;
980 }
981 
982 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
983 {
984 	_iwl_trans_stop_device(trans, true);
985 }
986 
987 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
988 					bool reset)
989 {
990 	might_sleep();
991 	if (trans->ops->d3_suspend)
992 		trans->ops->d3_suspend(trans, test, reset);
993 }
994 
995 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
996 				      enum iwl_d3_status *status,
997 				      bool test, bool reset)
998 {
999 	might_sleep();
1000 	if (!trans->ops->d3_resume)
1001 		return 0;
1002 
1003 	return trans->ops->d3_resume(trans, status, test, reset);
1004 }
1005 
1006 static inline void iwl_trans_ref(struct iwl_trans *trans)
1007 {
1008 	if (trans->ops->ref)
1009 		trans->ops->ref(trans);
1010 }
1011 
1012 static inline void iwl_trans_unref(struct iwl_trans *trans)
1013 {
1014 	if (trans->ops->unref)
1015 		trans->ops->unref(trans);
1016 }
1017 
1018 static inline int iwl_trans_suspend(struct iwl_trans *trans)
1019 {
1020 	if (!trans->ops->suspend)
1021 		return 0;
1022 
1023 	return trans->ops->suspend(trans);
1024 }
1025 
1026 static inline void iwl_trans_resume(struct iwl_trans *trans)
1027 {
1028 	if (trans->ops->resume)
1029 		trans->ops->resume(trans);
1030 }
1031 
1032 static inline struct iwl_trans_dump_data *
1033 iwl_trans_dump_data(struct iwl_trans *trans,
1034 		    const struct iwl_fw_dbg_trigger_tlv *trigger)
1035 {
1036 	if (!trans->ops->dump_data)
1037 		return NULL;
1038 	return trans->ops->dump_data(trans, trigger);
1039 }
1040 
1041 static inline struct iwl_device_cmd *
1042 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1043 {
1044 	struct iwl_device_cmd *dev_cmd_ptr =
1045 		kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
1046 
1047 	if (unlikely(dev_cmd_ptr == NULL))
1048 		return NULL;
1049 
1050 	return dev_cmd_ptr;
1051 }
1052 
1053 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1054 
1055 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1056 					 struct iwl_device_cmd *dev_cmd)
1057 {
1058 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1059 }
1060 
1061 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1062 			       struct iwl_device_cmd *dev_cmd, int queue)
1063 {
1064 	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1065 		return -EIO;
1066 
1067 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1068 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1069 		return -EIO;
1070 	}
1071 
1072 	return trans->ops->tx(trans, skb, dev_cmd, queue);
1073 }
1074 
1075 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1076 				     int ssn, struct sk_buff_head *skbs)
1077 {
1078 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1079 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1080 		return;
1081 	}
1082 
1083 	trans->ops->reclaim(trans, queue, ssn, skbs);
1084 }
1085 
1086 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1087 					 bool configure_scd)
1088 {
1089 	trans->ops->txq_disable(trans, queue, configure_scd);
1090 }
1091 
1092 static inline void
1093 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1094 			 const struct iwl_trans_txq_scd_cfg *cfg,
1095 			 unsigned int queue_wdg_timeout)
1096 {
1097 	might_sleep();
1098 
1099 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1100 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1101 		return;
1102 	}
1103 
1104 	trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
1105 }
1106 
1107 static inline void
1108 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1109 {
1110 	if (WARN_ON_ONCE(!trans->ops->txq_free))
1111 		return;
1112 
1113 	trans->ops->txq_free(trans, queue);
1114 }
1115 
1116 static inline int
1117 iwl_trans_txq_alloc(struct iwl_trans *trans,
1118 		    struct iwl_tx_queue_cfg_cmd *cmd,
1119 		    int cmd_id,
1120 		    unsigned int queue_wdg_timeout)
1121 {
1122 	might_sleep();
1123 
1124 	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1125 		return -ENOTSUPP;
1126 
1127 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1128 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1129 		return -EIO;
1130 	}
1131 
1132 	return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
1133 }
1134 
1135 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1136 						 int queue, bool shared_mode)
1137 {
1138 	if (trans->ops->txq_set_shared_mode)
1139 		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1140 }
1141 
1142 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1143 					int fifo, int sta_id, int tid,
1144 					int frame_limit, u16 ssn,
1145 					unsigned int queue_wdg_timeout)
1146 {
1147 	struct iwl_trans_txq_scd_cfg cfg = {
1148 		.fifo = fifo,
1149 		.sta_id = sta_id,
1150 		.tid = tid,
1151 		.frame_limit = frame_limit,
1152 		.aggregate = sta_id >= 0,
1153 	};
1154 
1155 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1156 }
1157 
1158 static inline
1159 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1160 			     unsigned int queue_wdg_timeout)
1161 {
1162 	struct iwl_trans_txq_scd_cfg cfg = {
1163 		.fifo = fifo,
1164 		.sta_id = -1,
1165 		.tid = IWL_MAX_TID_COUNT,
1166 		.frame_limit = IWL_FRAME_LIMIT,
1167 		.aggregate = false,
1168 	};
1169 
1170 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1171 }
1172 
1173 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1174 					      unsigned long txqs,
1175 					      bool freeze)
1176 {
1177 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1178 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1179 		return;
1180 	}
1181 
1182 	if (trans->ops->freeze_txq_timer)
1183 		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1184 }
1185 
1186 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1187 					    bool block)
1188 {
1189 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1190 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1191 		return;
1192 	}
1193 
1194 	if (trans->ops->block_txq_ptrs)
1195 		trans->ops->block_txq_ptrs(trans, block);
1196 }
1197 
1198 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1199 						 u32 txqs)
1200 {
1201 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1202 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1203 		return -EIO;
1204 	}
1205 
1206 	return trans->ops->wait_tx_queues_empty(trans, txqs);
1207 }
1208 
1209 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1210 {
1211 	trans->ops->write8(trans, ofs, val);
1212 }
1213 
1214 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1215 {
1216 	trans->ops->write32(trans, ofs, val);
1217 }
1218 
1219 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1220 {
1221 	return trans->ops->read32(trans, ofs);
1222 }
1223 
1224 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1225 {
1226 	return trans->ops->read_prph(trans, ofs);
1227 }
1228 
1229 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1230 					u32 val)
1231 {
1232 	return trans->ops->write_prph(trans, ofs, val);
1233 }
1234 
1235 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1236 				     void *buf, int dwords)
1237 {
1238 	return trans->ops->read_mem(trans, addr, buf, dwords);
1239 }
1240 
1241 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1242 	do {								      \
1243 		if (__builtin_constant_p(bufsize))			      \
1244 			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1245 		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1246 	} while (0)
1247 
1248 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1249 {
1250 	u32 value;
1251 
1252 	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1253 		return 0xa5a5a5a5;
1254 
1255 	return value;
1256 }
1257 
1258 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1259 				      const void *buf, int dwords)
1260 {
1261 	return trans->ops->write_mem(trans, addr, buf, dwords);
1262 }
1263 
1264 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1265 					u32 val)
1266 {
1267 	return iwl_trans_write_mem(trans, addr, &val, 1);
1268 }
1269 
1270 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1271 {
1272 	if (trans->ops->set_pmi)
1273 		trans->ops->set_pmi(trans, state);
1274 }
1275 
1276 static inline void
1277 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1278 {
1279 	trans->ops->set_bits_mask(trans, reg, mask, value);
1280 }
1281 
1282 #define iwl_trans_grab_nic_access(trans, flags)	\
1283 	__cond_lock(nic_access,				\
1284 		    likely((trans)->ops->grab_nic_access(trans, flags)))
1285 
1286 static inline void __releases(nic_access)
1287 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1288 {
1289 	trans->ops->release_nic_access(trans, flags);
1290 	__release(nic_access);
1291 }
1292 
1293 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1294 {
1295 	if (WARN_ON_ONCE(!trans->op_mode))
1296 		return;
1297 
1298 	/* prevent double restarts due to the same erroneous FW */
1299 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1300 		iwl_op_mode_nic_error(trans->op_mode);
1301 }
1302 
1303 /*****************************************************
1304  * transport helper functions
1305  *****************************************************/
1306 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1307 				  struct device *dev,
1308 				  const struct iwl_cfg *cfg,
1309 				  const struct iwl_trans_ops *ops);
1310 void iwl_trans_free(struct iwl_trans *trans);
1311 
1312 /*****************************************************
1313 * driver (transport) register/unregister functions
1314 ******************************************************/
1315 int __must_check iwl_pci_register_driver(void);
1316 void iwl_pci_unregister_driver(void);
1317 
1318 #endif /* __iwl_trans_h__ */
1319