1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #ifndef __iwl_trans_int_pcie_h__
8 #define __iwl_trans_int_pcie_h__
9
10 #include <linux/spinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/pci.h>
15 #include <linux/timer.h>
16 #include <linux/cpu.h>
17
18 #include "iwl-fh.h"
19 #include "iwl-csr.h"
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
22 #include "iwl-io.h"
23 #include "iwl-op-mode.h"
24 #include "iwl-drv.h"
25 #include "pcie/iwl-context-info.h"
26
27 /*
28 * RX related structures and functions
29 */
30 #define RX_NUM_QUEUES 1
31 #define RX_POST_REQ_ALLOC 2
32 #define RX_CLAIM_REQ_ALLOC 8
33 #define RX_PENDING_WATERMARK 16
34 #define FIRST_RX_QUEUE 512
35
36 struct iwl_host_cmd;
37
38 /*This file includes the declaration that are internal to the
39 * trans_pcie layer */
40
41 /**
42 * struct iwl_rx_mem_buffer - driver-side RX buffer descriptor
43 * @page_dma: bus address of rxb page
44 * @page: driver's pointer to the rxb page
45 * @list: list entry for the membuffer
46 * @invalid: rxb is in driver ownership - not owned by HW
47 * @vid: index of this rxb in the global table
48 * @offset: indicates which offset of the page (in bytes)
49 * this buffer uses (if multiple RBs fit into one page)
50 */
51 struct iwl_rx_mem_buffer {
52 dma_addr_t page_dma;
53 struct page *page;
54 struct list_head list;
55 u32 offset;
56 u16 vid;
57 bool invalid;
58 };
59
60 /* interrupt statistics */
61 struct isr_statistics {
62 u32 hw;
63 u32 sw;
64 u32 err_code;
65 u32 sch;
66 u32 alive;
67 u32 rfkill;
68 u32 ctkill;
69 u32 wakeup;
70 u32 rx;
71 u32 tx;
72 u32 unhandled;
73 };
74
75 /**
76 * struct iwl_rx_transfer_desc - transfer descriptor
77 * @addr: ptr to free buffer start address
78 * @rbid: unique tag of the buffer
79 * @reserved: reserved
80 */
81 struct iwl_rx_transfer_desc {
82 __le16 rbid;
83 __le16 reserved[3];
84 __le64 addr;
85 } __packed;
86
87 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
88
89 /**
90 * struct iwl_rx_completion_desc - completion descriptor
91 * @reserved1: reserved
92 * @rbid: unique tag of the received buffer
93 * @flags: flags (0: fragmented, all others: reserved)
94 * @reserved2: reserved
95 */
96 struct iwl_rx_completion_desc {
97 __le32 reserved1;
98 __le16 rbid;
99 u8 flags;
100 u8 reserved2[25];
101 } __packed;
102
103 /**
104 * struct iwl_rx_completion_desc_bz - Bz completion descriptor
105 * @rbid: unique tag of the received buffer
106 * @flags: flags (0: fragmented, all others: reserved)
107 * @reserved: reserved
108 */
109 struct iwl_rx_completion_desc_bz {
110 __le16 rbid;
111 u8 flags;
112 u8 reserved[1];
113 } __packed;
114
115 /**
116 * struct iwl_rxq - Rx queue
117 * @id: queue index
118 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
119 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
120 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
121 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
122 * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
123 * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
124 * @read: Shared index to newest available Rx buffer
125 * @write: Shared index to oldest written Rx packet
126 * @write_actual: actual write pointer written to device, since we update in
127 * blocks of 8 only
128 * @free_count: Number of pre-allocated buffers in rx_free
129 * @used_count: Number of RBDs handled to allocator to use for allocation
130 * @write_actual:
131 * @rx_free: list of RBDs with allocated RB ready for use
132 * @rx_used: list of RBDs with no RB attached
133 * @need_update: flag to indicate we need to update read/write index
134 * @rb_stts: driver's pointer to receive buffer status
135 * @rb_stts_dma: bus address of receive buffer status
136 * @lock: per-queue lock
137 * @queue: actual rx queue. Not used for multi-rx queue.
138 * @next_rb_is_fragment: indicates that the previous RB that we handled set
139 * the fragmented flag, so the next one is still another fragment
140 * @napi: NAPI struct for this queue
141 * @queue_size: size of this queue
142 *
143 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
144 */
145 struct iwl_rxq {
146 int id;
147 void *bd;
148 dma_addr_t bd_dma;
149 void *used_bd;
150 dma_addr_t used_bd_dma;
151 u32 read;
152 u32 write;
153 u32 free_count;
154 u32 used_count;
155 u32 write_actual;
156 u32 queue_size;
157 struct list_head rx_free;
158 struct list_head rx_used;
159 bool need_update, next_rb_is_fragment;
160 void *rb_stts;
161 dma_addr_t rb_stts_dma;
162 spinlock_t lock;
163 struct napi_struct napi;
164 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
165 };
166
167 /**
168 * struct iwl_rb_allocator - Rx allocator
169 * @req_pending: number of requests the allcator had not processed yet
170 * @req_ready: number of requests honored and ready for claiming
171 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
172 * the queue. This is a list of &struct iwl_rx_mem_buffer
173 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
174 * of &struct iwl_rx_mem_buffer
175 * @lock: protects the rbd_allocated and rbd_empty lists
176 * @alloc_wq: work queue for background calls
177 * @rx_alloc: work struct for background calls
178 */
179 struct iwl_rb_allocator {
180 atomic_t req_pending;
181 atomic_t req_ready;
182 struct list_head rbd_allocated;
183 struct list_head rbd_empty;
184 spinlock_t lock;
185 struct workqueue_struct *alloc_wq;
186 struct work_struct rx_alloc;
187 };
188
189 /**
190 * iwl_get_closed_rb_stts - get closed rb stts from different structs
191 * @trans: transport pointer (for configuration)
192 * @rxq: the rxq to get the rb stts from
193 * Return: last closed RB index
194 */
iwl_get_closed_rb_stts(struct iwl_trans * trans,struct iwl_rxq * rxq)195 static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
196 struct iwl_rxq *rxq)
197 {
198 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
199 __le16 *rb_stts = rxq->rb_stts;
200
201 return le16_to_cpu(READ_ONCE(*rb_stts));
202 } else {
203 struct iwl_rb_status *rb_stts = rxq->rb_stts;
204
205 return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
206 }
207 }
208
209 #ifdef CONFIG_IWLWIFI_DEBUGFS
210 /**
211 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
212 * debugfs file
213 *
214 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
215 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
216 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
217 * set the file can no longer be used.
218 */
219 enum iwl_fw_mon_dbgfs_state {
220 IWL_FW_MON_DBGFS_STATE_CLOSED,
221 IWL_FW_MON_DBGFS_STATE_OPEN,
222 IWL_FW_MON_DBGFS_STATE_DISABLED,
223 };
224 #endif
225
226 /**
227 * enum iwl_shared_irq_flags - level of sharing for irq
228 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
229 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
230 */
231 enum iwl_shared_irq_flags {
232 IWL_SHARED_IRQ_NON_RX = BIT(0),
233 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
234 };
235
236 /**
237 * enum iwl_image_response_code - image response values
238 * @IWL_IMAGE_RESP_DEF: the default value of the register
239 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
240 * @IWL_IMAGE_RESP_FAIL: iml reading failed
241 */
242 enum iwl_image_response_code {
243 IWL_IMAGE_RESP_DEF = 0,
244 IWL_IMAGE_RESP_SUCCESS = 1,
245 IWL_IMAGE_RESP_FAIL = 2,
246 };
247
248 #ifdef CONFIG_IWLWIFI_DEBUGFS
249 /**
250 * struct cont_rec: continuous recording data structure
251 * @prev_wr_ptr: the last address that was read in monitor_data
252 * debugfs file
253 * @prev_wrap_cnt: the wrap count that was used during the last read in
254 * monitor_data debugfs file
255 * @state: the state of monitor_data debugfs file as described
256 * in &iwl_fw_mon_dbgfs_state enum
257 * @mutex: locked while reading from monitor_data debugfs file
258 */
259 struct cont_rec {
260 u32 prev_wr_ptr;
261 u32 prev_wrap_cnt;
262 u8 state;
263 /* Used to sync monitor_data debugfs file with driver unload flow */
264 struct mutex mutex;
265 };
266 #endif
267
268 enum iwl_pcie_fw_reset_state {
269 FW_RESET_IDLE,
270 FW_RESET_REQUESTED,
271 FW_RESET_OK,
272 FW_RESET_ERROR,
273 FW_RESET_TOP_REQUESTED,
274 };
275
276 /**
277 * enum iwl_pcie_imr_status - imr dma transfer state
278 * @IMR_D2S_IDLE: default value of the dma transfer
279 * @IMR_D2S_REQUESTED: dma transfer requested
280 * @IMR_D2S_COMPLETED: dma transfer completed
281 * @IMR_D2S_ERROR: dma transfer error
282 */
283 enum iwl_pcie_imr_status {
284 IMR_D2S_IDLE,
285 IMR_D2S_REQUESTED,
286 IMR_D2S_COMPLETED,
287 IMR_D2S_ERROR,
288 };
289
290 /**
291 * struct iwl_pcie_txqs - TX queues data
292 *
293 * @queue_used: bit mask of used queues
294 * @queue_stopped: bit mask of stopped queues
295 * @txq: array of TXQ data structures representing the TXQs
296 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
297 * @bc_pool: bytecount DMA allocations pool
298 * @bc_tbl_size: bytecount table size
299 * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
300 * (and similar usage)
301 * @tfd: TFD data
302 * @tfd.max_tbs: max number of buffers per TFD
303 * @tfd.size: TFD size
304 * @tfd.addr_size: TFD/TB address size
305 */
306 struct iwl_pcie_txqs {
307 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
308 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
309 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
310 struct dma_pool *bc_pool;
311 size_t bc_tbl_size;
312 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
313
314 struct {
315 u8 max_tbs;
316 u16 size;
317 u8 addr_size;
318 } tfd;
319
320 struct iwl_dma_ptr scd_bc_tbls;
321 };
322
323 /**
324 * struct iwl_trans_pcie - PCIe transport specific data
325 * @rxq: all the RX queue data
326 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
327 * @global_table: table mapping received VID from hw to rxb
328 * @rba: allocator for RX replenishing
329 * @ctxt_info: context information for FW self init
330 * @ctxt_info_v2: context information for v1 devices
331 * @prph_info: prph info for self init
332 * @prph_scratch: prph scratch for self init
333 * @ctxt_info_dma_addr: dma addr of context information
334 * @prph_info_dma_addr: dma addr of prph info
335 * @prph_scratch_dma_addr: dma addr of prph scratch
336 * @ctxt_info_dma_addr: dma addr of context information
337 * @iml: image loader image virtual address
338 * @iml_len: image loader image size
339 * @iml_dma_addr: image loader image DMA address
340 * @trans: pointer to the generic transport area
341 * @scd_base_addr: scheduler sram base address in SRAM
342 * @kw: keep warm address
343 * @pnvm_data: holds info about pnvm payloads allocated in DRAM
344 * @reduced_tables_data: holds info about power reduced tablse
345 * payloads allocated in DRAM
346 * @pci_dev: basic pci-network driver stuff
347 * @hw_base: pci hardware address support
348 * @ucode_write_complete: indicates that the ucode has been copied.
349 * @ucode_write_waitq: wait queue for uCode load
350 * @rx_page_order: page order for receive buffer size
351 * @rx_buf_bytes: RX buffer (RB) size in bytes
352 * @reg_lock: protect hw register access
353 * @mutex: to protect stop_device / start_fw / start_hw
354 * @fw_mon_data: fw continuous recording data
355 * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround
356 * during commands in flight
357 * @msix_entries: array of MSI-X entries
358 * @msix_enabled: true if managed to enable MSI-X
359 * @shared_vec_mask: the type of causes the shared vector handles
360 * (see iwl_shared_irq_flags).
361 * @alloc_vecs: the number of interrupt vectors allocated by the OS
362 * @def_irq: default irq for non rx causes
363 * @fh_init_mask: initial unmasked fh causes
364 * @hw_init_mask: initial unmasked hw causes
365 * @fh_mask: current unmasked fh causes
366 * @hw_mask: current unmasked hw causes
367 * @in_rescan: true if we have triggered a device rescan
368 * @base_rb_stts: base virtual address of receive buffer status for all queues
369 * @base_rb_stts_dma: base physical address of receive buffer status
370 * @supported_dma_mask: DMA mask to validate the actual address against,
371 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
372 * @alloc_page_lock: spinlock for the page allocator
373 * @alloc_page: allocated page to still use parts of
374 * @alloc_page_used: how much of the allocated page was already used (bytes)
375 * @imr_status: imr dma state machine
376 * @imr_waitq: imr wait queue for dma completion
377 * @rf_name: name/version of the CRF, if any
378 * @use_ict: whether or not ICT (interrupt table) is used
379 * @ict_index: current ICT read index
380 * @ict_tbl: ICT table pointer
381 * @ict_tbl_dma: ICT table DMA address
382 * @inta_mask: interrupt (INT-A) mask
383 * @irq_lock: lock to synchronize IRQ handling
384 * @txq_memory: TXQ allocation array
385 * @sx_waitq: waitqueue for Sx transitions
386 * @sx_state: state tracking Sx transitions
387 * @opmode_down: indicates opmode went away
388 * @num_rx_bufs: number of RX buffers to allocate/use
389 * @affinity_mask: IRQ affinity mask for each RX queue
390 * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
391 * enable/disable
392 * @fw_reset_state: state of FW reset handshake
393 * @fw_reset_waitq: waitqueue for FW reset handshake
394 * @is_down: indicates the NIC is down
395 * @isr_stats: interrupt statistics
396 * @napi_dev: (fake) netdev for NAPI registration
397 * @txqs: transport tx queues data.
398 * @me_present: WiAMT/CSME is detected as present (1), not present (0)
399 * or unknown (-1, so can still use it as a boolean safely)
400 * @me_recheck_wk: worker to recheck WiAMT/CSME presence
401 * @invalid_tx_cmd: invalid TX command buffer
402 * @wait_command_queue: wait queue for sync commands
403 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
404 * The user should use iwl_trans_{alloc,free}_tx_cmd.
405 * @dev_cmd_pool_name: name for the TX command allocation pool
406 * @pm_support: set to true in start_hw if link pm is supported
407 * @ltr_enabled: set to true if the LTR is enabled
408 */
409 struct iwl_trans_pcie {
410 struct iwl_rxq *rxq;
411 struct iwl_rx_mem_buffer *rx_pool;
412 struct iwl_rx_mem_buffer **global_table;
413 struct iwl_rb_allocator rba;
414 union {
415 struct iwl_context_info *ctxt_info;
416 struct iwl_context_info_v2 *ctxt_info_v2;
417 };
418 struct iwl_prph_info *prph_info;
419 struct iwl_prph_scratch *prph_scratch;
420 void *iml;
421 size_t iml_len;
422 dma_addr_t ctxt_info_dma_addr;
423 dma_addr_t prph_info_dma_addr;
424 dma_addr_t prph_scratch_dma_addr;
425 dma_addr_t iml_dma_addr;
426 struct iwl_trans *trans;
427
428 struct net_device *napi_dev;
429
430 /* INT ICT Table */
431 __le32 *ict_tbl;
432 dma_addr_t ict_tbl_dma;
433 int ict_index;
434 bool use_ict;
435 bool is_down, opmode_down;
436 s8 debug_rfkill;
437 struct isr_statistics isr_stats;
438
439 spinlock_t irq_lock;
440 struct mutex mutex;
441 u32 inta_mask;
442 u32 scd_base_addr;
443 struct iwl_dma_ptr kw;
444
445 /* pnvm data */
446 struct iwl_dram_regions pnvm_data;
447 struct iwl_dram_regions reduced_tables_data;
448
449 struct iwl_txq *txq_memory;
450
451 /* PCI bus related data */
452 struct pci_dev *pci_dev;
453 u8 __iomem *hw_base;
454
455 bool ucode_write_complete;
456 enum {
457 IWL_SX_INVALID = 0,
458 IWL_SX_WAITING,
459 IWL_SX_ERROR,
460 IWL_SX_COMPLETE,
461 } sx_state;
462 wait_queue_head_t ucode_write_waitq;
463 wait_queue_head_t sx_waitq;
464
465 u16 num_rx_bufs;
466
467 u32 rx_page_order;
468 u32 rx_buf_bytes;
469 u32 supported_dma_mask;
470
471 /* allocator lock for the two values below */
472 spinlock_t alloc_page_lock;
473 struct page *alloc_page;
474 u32 alloc_page_used;
475
476 /*protect hw register */
477 spinlock_t reg_lock;
478 bool cmd_hold_nic_awake;
479
480 #ifdef CONFIG_IWLWIFI_DEBUGFS
481 struct cont_rec fw_mon_data;
482 #endif
483
484 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
485 bool msix_enabled;
486 u8 shared_vec_mask;
487 u32 alloc_vecs;
488 u32 def_irq;
489 u32 fh_init_mask;
490 u32 hw_init_mask;
491 u32 fh_mask;
492 u32 hw_mask;
493 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
494 u16 tx_cmd_queue_size;
495 bool in_rescan;
496
497 void *base_rb_stts;
498 dma_addr_t base_rb_stts_dma;
499
500 enum iwl_pcie_fw_reset_state fw_reset_state;
501 wait_queue_head_t fw_reset_waitq;
502 enum iwl_pcie_imr_status imr_status;
503 wait_queue_head_t imr_waitq;
504 char rf_name[32];
505
506 struct iwl_pcie_txqs txqs;
507
508 s8 me_present;
509 struct delayed_work me_recheck_wk;
510
511 struct iwl_dma_ptr invalid_tx_cmd;
512
513 wait_queue_head_t wait_command_queue;
514
515 struct kmem_cache *dev_cmd_pool;
516 char dev_cmd_pool_name[50];
517
518 bool pm_support;
519 bool ltr_enabled;
520 };
521
522 static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans * trans)523 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
524 {
525 return (void *)trans->trans_specific;
526 }
527
iwl_pcie_clear_irq(struct iwl_trans * trans,int queue)528 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
529 {
530 /*
531 * Before sending the interrupt the HW disables it to prevent
532 * a nested interrupt. This is done by writing 1 to the corresponding
533 * bit in the mask register. After handling the interrupt, it should be
534 * re-enabled by clearing this bit. This register is defined as
535 * write 1 clear (W1C) register, meaning that it's being clear
536 * by writing 1 to the bit.
537 */
538 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
539 }
540
541 static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie * trans_pcie)542 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
543 {
544 return container_of((void *)trans_pcie, struct iwl_trans,
545 trans_specific);
546 }
547
548 /*
549 * Convention: trans API functions: iwl_trans_pcie_XXX
550 * Other functions: iwl_pcie_XXX
551 */
552 void iwl_trans_pcie_free(struct iwl_trans *trans);
553 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
554 struct device *dev);
555
556 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
557 #define _iwl_trans_pcie_grab_nic_access(trans, silent) \
558 __cond_lock(nic_access_nobh, \
559 likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
560
561 void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
562 void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
563
564 /*****************************************************
565 * RX
566 ******************************************************/
567 int iwl_pcie_rx_init(struct iwl_trans *trans);
568 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
569 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
570 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
571 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
572 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
573 int iwl_pcie_rx_stop(struct iwl_trans *trans);
574 void iwl_pcie_rx_free(struct iwl_trans *trans);
575 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
576 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
577 void iwl_pcie_rx_napi_sync(struct iwl_trans *trans);
578 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
579 struct iwl_rxq *rxq);
580
581 /*****************************************************
582 * ICT - interrupt handling
583 ******************************************************/
584 irqreturn_t iwl_pcie_isr(int irq, void *data);
585 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
586 void iwl_pcie_free_ict(struct iwl_trans *trans);
587 void iwl_pcie_reset_ict(struct iwl_trans *trans);
588 void iwl_pcie_disable_ict(struct iwl_trans *trans);
589
590 /*****************************************************
591 * TX / HCMD
592 ******************************************************/
593 /* We need 2 entries for the TX command and header, and another one might
594 * be needed for potential data in the SKB's head. The remaining ones can
595 * be used for frags.
596 */
597 #define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
598
599 struct iwl_tso_hdr_page {
600 struct page *page;
601 u8 *pos;
602 };
603
604 /*
605 * Note that we put this struct *last* in the page. By doing that, we ensure
606 * that no TB referencing this page can trigger the 32-bit boundary hardware
607 * bug.
608 */
609 struct iwl_tso_page_info {
610 dma_addr_t dma_addr;
611 struct page *next;
612 refcount_t use_count;
613 };
614
615 #define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
616 #define IWL_TSO_PAGE_INFO(addr) \
617 ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
618 IWL_TSO_PAGE_DATA_SIZE))
619
620 int iwl_pcie_tx_init(struct iwl_trans *trans);
621 void iwl_pcie_tx_start(struct iwl_trans *trans);
622 int iwl_pcie_tx_stop(struct iwl_trans *trans);
623 void iwl_pcie_tx_free(struct iwl_trans *trans);
624 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
625 const struct iwl_trans_txq_scd_cfg *cfg,
626 unsigned int wdg_timeout);
627 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
628 bool configure_scd);
629 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
630 bool shared_mode);
631 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
632 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
633 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
634 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
635 struct iwl_rx_cmd_buffer *rxb);
636 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
637 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
638 int slots_num, bool cmd_queue);
639
640 dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
641 unsigned int len);
642 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
643 struct iwl_cmd_meta *cmd_meta,
644 u8 **hdr, unsigned int hdr_room,
645 unsigned int offset);
646
647 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
648 struct iwl_cmd_meta *cmd_meta);
649
iwl_pcie_get_tso_page_phys(void * addr)650 static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
651 {
652 dma_addr_t res;
653
654 res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
655 res += (unsigned long)addr & ~PAGE_MASK;
656
657 return res;
658 }
659
660 static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq * txq,int idx)661 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
662 {
663 return txq->first_tb_dma +
664 sizeof(struct iwl_pcie_first_tb_buf) * idx;
665 }
666
iwl_txq_get_cmd_index(const struct iwl_txq * q,u32 index)667 static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
668 {
669 return index & (q->n_window - 1);
670 }
671
iwl_txq_get_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int idx)672 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
673 struct iwl_txq *txq, int idx)
674 {
675 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
676
677 if (trans->mac_cfg->gen2)
678 idx = iwl_txq_get_cmd_index(txq, idx);
679
680 return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
681 }
682
683 /*
684 * We need this inline in case dma_addr_t is only 32-bits - since the
685 * hardware is always 64-bit, the issue can still occur in that case,
686 * so use u64 for 'phys' here to force the addition in 64-bit.
687 */
iwl_txq_crosses_4g_boundary(u64 phys,u16 len)688 static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
689 {
690 return upper_32_bits(phys) != upper_32_bits(phys + len);
691 }
692
693 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
694
iwl_txq_stop(struct iwl_trans * trans,struct iwl_txq * txq)695 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
696 {
697 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
698
699 if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
700 iwl_op_mode_queue_full(trans->op_mode, txq->id);
701 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
702 } else {
703 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
704 txq->id);
705 }
706 }
707
708 /**
709 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
710 * @trans: the transport (for configuration data)
711 * @index: current index
712 * Return: the queue index incremented, subject to wrapping
713 */
iwl_txq_inc_wrap(struct iwl_trans * trans,int index)714 static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
715 {
716 return ++index &
717 (trans->mac_cfg->base->max_tfd_queue_size - 1);
718 }
719
720 /**
721 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
722 * @trans: the transport (for configuration data)
723 * @index: current index
724 * Return: the queue index decremented, subject to wrapping
725 */
iwl_txq_dec_wrap(struct iwl_trans * trans,int index)726 static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
727 {
728 return --index &
729 (trans->mac_cfg->base->max_tfd_queue_size - 1);
730 }
731
732 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
733
734 static inline void
iwl_trans_pcie_wake_queue(struct iwl_trans * trans,struct iwl_txq * txq)735 iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
736 {
737 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
738
739 if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
740 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
741 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
742 }
743 }
744
745 int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
746 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
747 u16 len);
748
iwl_txq_set_tfd_invalid_gen2(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)749 static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
750 struct iwl_tfh_tfd *tfd)
751 {
752 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
753
754 tfd->num_tbs = 0;
755
756 iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma,
757 trans_pcie->invalid_tx_cmd.size);
758 }
759
760 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
761 struct iwl_cmd_meta *meta,
762 struct iwl_tfh_tfd *tfd);
763
764 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
765 u32 sta_mask, u8 tid,
766 int size, unsigned int timeout);
767
768 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
769 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
770
771 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
772 void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
773 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
774 int slots_num, bool cmd_queue);
775 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
776 int queue_size);
777
iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans * trans,void * _tfd,u8 idx)778 static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
779 void *_tfd, u8 idx)
780 {
781 struct iwl_tfd *tfd;
782 struct iwl_tfd_tb *tb;
783
784 if (trans->mac_cfg->gen2) {
785 struct iwl_tfh_tfd *tfh_tfd = _tfd;
786 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
787
788 return le16_to_cpu(tfh_tb->tb_len);
789 }
790
791 tfd = (struct iwl_tfd *)_tfd;
792 tb = &tfd->tbs[idx];
793
794 return le16_to_cpu(tb->hi_n_len) >> 4;
795 }
796
797 static inline struct iwl_device_tx_cmd *
iwl_pcie_gen1_2_alloc_tx_cmd(struct iwl_trans * trans)798 iwl_pcie_gen1_2_alloc_tx_cmd(struct iwl_trans *trans)
799 {
800 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
801
802 return kmem_cache_zalloc(trans_pcie->dev_cmd_pool, GFP_ATOMIC);
803 }
804
805 static inline void
iwl_pcie_gen1_2_free_tx_cmd(struct iwl_trans * trans,struct iwl_device_tx_cmd * dev_cmd)806 iwl_pcie_gen1_2_free_tx_cmd(struct iwl_trans *trans,
807 struct iwl_device_tx_cmd *dev_cmd)
808 {
809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
810
811 kmem_cache_free(trans_pcie->dev_cmd_pool, dev_cmd);
812 }
813
814 void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
815 struct sk_buff_head *skbs, bool is_flush);
816 void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
817 void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
818 unsigned long txqs, bool freeze);
819 int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
820 int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
821
822 /*****************************************************
823 * Error handling
824 ******************************************************/
825 void iwl_pcie_dump_csr(struct iwl_trans *trans);
826
827 /*****************************************************
828 * Helpers
829 ******************************************************/
_iwl_disable_interrupts(struct iwl_trans * trans)830 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
831 {
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
833
834 clear_bit(STATUS_INT_ENABLED, &trans->status);
835 if (!trans_pcie->msix_enabled) {
836 /* disable interrupts from uCode/NIC to host */
837 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
838
839 /* acknowledge/clear/reset any interrupts still pending
840 * from uCode or flow handler (Rx/Tx DMA) */
841 iwl_write32(trans, CSR_INT, 0xffffffff);
842 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
843 } else {
844 /* disable all the interrupt we might use */
845 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
846 trans_pcie->fh_init_mask);
847 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
848 trans_pcie->hw_init_mask);
849 trans_pcie->fh_mask = 0;
850 trans_pcie->hw_mask = 0;
851 }
852 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
853 }
854
iwl_pcie_get_num_sections(const struct fw_img * fw,int start)855 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
856 int start)
857 {
858 int i = 0;
859
860 while (start < fw->num_sec &&
861 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
862 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
863 start++;
864 i++;
865 }
866
867 return i;
868 }
869
iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans * trans)870 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
871 {
872 struct iwl_self_init_dram *dram = &trans->init_dram;
873 int i;
874
875 if (!dram->fw) {
876 WARN_ON(dram->fw_cnt);
877 return;
878 }
879
880 for (i = 0; i < dram->fw_cnt; i++)
881 dma_free_coherent(trans->dev, dram->fw[i].size,
882 dram->fw[i].block, dram->fw[i].physical);
883
884 kfree(dram->fw);
885 dram->fw_cnt = 0;
886 dram->fw = NULL;
887 }
888
iwl_disable_interrupts(struct iwl_trans * trans)889 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
890 {
891 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
892
893 spin_lock_bh(&trans_pcie->irq_lock);
894 _iwl_disable_interrupts(trans);
895 spin_unlock_bh(&trans_pcie->irq_lock);
896 }
897
_iwl_enable_interrupts(struct iwl_trans * trans)898 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
899 {
900 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
901
902 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
903 set_bit(STATUS_INT_ENABLED, &trans->status);
904 if (!trans_pcie->msix_enabled) {
905 trans_pcie->inta_mask = CSR_INI_SET_MASK;
906 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
907 } else {
908 /*
909 * fh/hw_mask keeps all the unmasked causes.
910 * Unlike msi, in msix cause is enabled when it is unset.
911 */
912 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
913 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
914 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
915 ~trans_pcie->fh_mask);
916 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
917 ~trans_pcie->hw_mask);
918 }
919 }
920
iwl_enable_interrupts(struct iwl_trans * trans)921 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
922 {
923 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
924
925 spin_lock_bh(&trans_pcie->irq_lock);
926 _iwl_enable_interrupts(trans);
927 spin_unlock_bh(&trans_pcie->irq_lock);
928 }
iwl_enable_hw_int_msk_msix(struct iwl_trans * trans,u32 msk)929 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
930 {
931 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
932
933 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
934 trans_pcie->hw_mask = msk;
935 }
936
iwl_enable_fh_int_msk_msix(struct iwl_trans * trans,u32 msk)937 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
938 {
939 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
940
941 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
942 trans_pcie->fh_mask = msk;
943 }
944
iwl_enable_fw_load_int(struct iwl_trans * trans)945 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
946 {
947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
948
949 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
950 if (!trans_pcie->msix_enabled) {
951 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
952 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
953 } else {
954 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
955 trans_pcie->hw_init_mask);
956 iwl_enable_fh_int_msk_msix(trans,
957 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
958 }
959 }
960
iwl_enable_fw_load_int_ctx_info(struct iwl_trans * trans,bool top_reset)961 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans,
962 bool top_reset)
963 {
964 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
965
966 IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n",
967 top_reset ? "RESET" : "ALIVE");
968
969 if (!trans_pcie->msix_enabled) {
970 /*
971 * When we'll receive the ALIVE interrupt, the ISR will call
972 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
973 * interrupt (which is not really needed anymore) but also the
974 * RX interrupt which will allow us to receive the ALIVE
975 * notification (which is Rx) and continue the flow.
976 */
977 if (top_reset)
978 trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE;
979 else
980 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
981 CSR_INT_BIT_FH_RX;
982 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
983 } else {
984 u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
985 : MSIX_HW_INT_CAUSES_REG_ALIVE;
986
987 iwl_enable_hw_int_msk_msix(trans, val);
988
989 if (top_reset)
990 return;
991 /*
992 * Leave all the FH causes enabled to get the ALIVE
993 * notification.
994 */
995 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
996 }
997 }
998
queue_name(struct device * dev,struct iwl_trans_pcie * trans_p,int i)999 static inline const char *queue_name(struct device *dev,
1000 struct iwl_trans_pcie *trans_p, int i)
1001 {
1002 if (trans_p->shared_vec_mask) {
1003 int vec = trans_p->shared_vec_mask &
1004 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1005
1006 if (i == 0)
1007 return DRV_NAME ":shared_IRQ";
1008
1009 return devm_kasprintf(dev, GFP_KERNEL,
1010 DRV_NAME ":queue_%d", i + vec);
1011 }
1012 if (i == 0)
1013 return DRV_NAME ":default_queue";
1014
1015 if (i == trans_p->alloc_vecs - 1)
1016 return DRV_NAME ":exception";
1017
1018 return devm_kasprintf(dev, GFP_KERNEL,
1019 DRV_NAME ":queue_%d", i);
1020 }
1021
iwl_enable_rfkill_int(struct iwl_trans * trans)1022 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
1023 {
1024 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1025
1026 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
1027 if (!trans_pcie->msix_enabled) {
1028 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
1029 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
1030 } else {
1031 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
1032 trans_pcie->fh_init_mask);
1033 trans_pcie->fh_mask = 0;
1034 iwl_enable_hw_int_msk_msix(trans,
1035 MSIX_HW_INT_CAUSES_REG_RF_KILL);
1036 }
1037
1038 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
1039 /*
1040 * On 9000-series devices this bit isn't enabled by default, so
1041 * when we power down the device we need set the bit to allow it
1042 * to wake up the PCI-E bus for RF-kill interrupts.
1043 */
1044 iwl_set_bit(trans, CSR_GP_CNTRL,
1045 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1046 }
1047 }
1048
1049 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
1050
iwl_is_rfkill_set(struct iwl_trans * trans)1051 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
1052 {
1053 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1054
1055 lockdep_assert_held(&trans_pcie->mutex);
1056
1057 if (trans_pcie->debug_rfkill == 1)
1058 return true;
1059
1060 return !(iwl_read32(trans, CSR_GP_CNTRL) &
1061 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1062 }
1063
iwl_pcie_dbg_on(struct iwl_trans * trans)1064 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1065 {
1066 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
1067 }
1068
1069 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
1070
1071 #ifdef CONFIG_IWLWIFI_DEBUGFS
1072 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1073 void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
1074 #else
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans)1075 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1076 #endif
1077
1078 void iwl_pcie_rx_allocator_work(struct work_struct *data);
1079
1080 /* common trans ops for all generations transports */
1081 void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans);
1082 int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);
1083 int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
1084 void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
1085 void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
1086 void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
1087 u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
1088 u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
1089 void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
1090 int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1091 void *buf, int dwords);
1092 int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
1093 struct iwl_trans_dump_data *
1094 iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
1095 const struct iwl_dump_sanitize_ops *sanitize_ops,
1096 void *sanitize_ctx);
1097 int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1098 bool reset);
1099 int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset);
1100 void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
1101 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1102 void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1103 u32 mask, u32 value);
1104 int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
1105 u32 *val);
1106 bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
1107 void __releases(nic_access_nobh)
1108 iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
1109 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1110 int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
1111 const struct pci_device_id *ent,
1112 const struct iwl_mac_cfg *mac_cfg,
1113 u8 __iomem *hw_base, u32 hw_rev);
1114 void iwl_pcie_gen1_2_remove(struct iwl_trans *trans);
1115
1116 /* transport gen 1 exported functions */
1117 void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
1118 int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1119 const struct iwl_fw *fw,
1120 const struct fw_img *img,
1121 bool run_in_rfkill);
1122 void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
1123
1124 /* common functions that are used by gen2 transport */
1125 void iwl_trans_pcie_gen2_op_mode_leave(struct iwl_trans *trans);
1126 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1127 void iwl_pcie_apm_config(struct iwl_trans *trans);
1128 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1129 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1130 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1131 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1132 bool was_in_rfkill);
1133 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1134 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1135 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1136 struct iwl_dma_ptr *ptr, size_t size);
1137 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1138 void iwl_pcie_apply_destination(struct iwl_trans *trans);
1139 int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans);
1140
1141 /* transport gen 2 exported functions */
1142 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1143 const struct iwl_fw *fw,
1144 const struct fw_img *img,
1145 bool run_in_rfkill);
1146 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
1147 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1148 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
1149 struct iwl_host_cmd *cmd);
1150 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1151 struct iwl_host_cmd *cmd);
1152 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
1153 u32 dst_addr, u64 src_addr, u32 byte_cnt);
1154 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
1155 u32 dst_addr, u64 src_addr, u32 byte_cnt);
1156 int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
1157 struct iwl_trans_rxq_dma_data *data);
1158
iwl_pcie_gen1_is_pm_supported(struct iwl_trans * trans)1159 static inline bool iwl_pcie_gen1_is_pm_supported(struct iwl_trans *trans)
1160 {
1161 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1162
1163 return trans_pcie->pm_support;
1164 }
1165
iwl_pcie_gen1_2_is_ltr_enabled(struct iwl_trans * trans)1166 static inline bool iwl_pcie_gen1_2_is_ltr_enabled(struct iwl_trans *trans)
1167 {
1168 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1169
1170 return trans_pcie->ltr_enabled;
1171 }
1172 #endif /* __iwl_trans_int_pcie_h__ */
1173