1bfcc09ddSBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb * Copyright (C) 2015-2017 Intel Deutschland GmbH 5bfcc09ddSBjoern A. Zeeb */ 6bfcc09ddSBjoern A. Zeeb #ifndef __iwl_fh_h__ 7bfcc09ddSBjoern A. Zeeb #define __iwl_fh_h__ 8bfcc09ddSBjoern A. Zeeb 9bfcc09ddSBjoern A. Zeeb #include <linux/types.h> 10bfcc09ddSBjoern A. Zeeb #include <linux/bitfield.h> 11bfcc09ddSBjoern A. Zeeb 12bfcc09ddSBjoern A. Zeeb #include "iwl-trans.h" 13bfcc09ddSBjoern A. Zeeb 14bfcc09ddSBjoern A. Zeeb /****************************/ 15bfcc09ddSBjoern A. Zeeb /* Flow Handler Definitions */ 16bfcc09ddSBjoern A. Zeeb /****************************/ 17bfcc09ddSBjoern A. Zeeb 18*a4128aadSBjoern A. Zeeb /* 19bfcc09ddSBjoern A. Zeeb * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) 20bfcc09ddSBjoern A. Zeeb * Addresses are offsets from device's PCI hardware base address. 21bfcc09ddSBjoern A. Zeeb */ 22bfcc09ddSBjoern A. Zeeb #define FH_MEM_LOWER_BOUND (0x1000) 23bfcc09ddSBjoern A. Zeeb #define FH_MEM_UPPER_BOUND (0x2000) 24bfcc09ddSBjoern A. Zeeb #define FH_MEM_LOWER_BOUND_GEN2 (0xa06000) 25bfcc09ddSBjoern A. Zeeb #define FH_MEM_UPPER_BOUND_GEN2 (0xa08000) 26bfcc09ddSBjoern A. Zeeb 27*a4128aadSBjoern A. Zeeb /* 28bfcc09ddSBjoern A. Zeeb * Keep-Warm (KW) buffer base address. 29bfcc09ddSBjoern A. Zeeb * 30bfcc09ddSBjoern A. Zeeb * Driver must allocate a 4KByte buffer that is for keeping the 31bfcc09ddSBjoern A. Zeeb * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency 32bfcc09ddSBjoern A. Zeeb * DRAM access when doing Txing or Rxing. The dummy accesses prevent host 33bfcc09ddSBjoern A. Zeeb * from going into a power-savings mode that would cause higher DRAM latency, 34bfcc09ddSBjoern A. Zeeb * and possible data over/under-runs, before all Tx/Rx is complete. 35bfcc09ddSBjoern A. Zeeb * 36bfcc09ddSBjoern A. Zeeb * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) 37bfcc09ddSBjoern A. Zeeb * of the buffer, which must be 4K aligned. Once this is set up, the device 38bfcc09ddSBjoern A. Zeeb * automatically invokes keep-warm accesses when normal accesses might not 39bfcc09ddSBjoern A. Zeeb * be sufficient to maintain fast DRAM response. 40bfcc09ddSBjoern A. Zeeb * 41bfcc09ddSBjoern A. Zeeb * Bit fields: 42bfcc09ddSBjoern A. Zeeb * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned 43bfcc09ddSBjoern A. Zeeb */ 44bfcc09ddSBjoern A. Zeeb #define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) 45bfcc09ddSBjoern A. Zeeb 46bfcc09ddSBjoern A. Zeeb 47*a4128aadSBjoern A. Zeeb /* 48bfcc09ddSBjoern A. Zeeb * TFD Circular Buffers Base (CBBC) addresses 49bfcc09ddSBjoern A. Zeeb * 50bfcc09ddSBjoern A. Zeeb * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident 51bfcc09ddSBjoern A. Zeeb * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) 52bfcc09ddSBjoern A. Zeeb * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 53bfcc09ddSBjoern A. Zeeb * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte 54bfcc09ddSBjoern A. Zeeb * aligned (address bits 0-7 must be 0). 55bfcc09ddSBjoern A. Zeeb * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers 56bfcc09ddSBjoern A. Zeeb * for them are in different places. 57bfcc09ddSBjoern A. Zeeb * 58bfcc09ddSBjoern A. Zeeb * Bit fields in each pointer register: 59bfcc09ddSBjoern A. Zeeb * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned 60bfcc09ddSBjoern A. Zeeb */ 61bfcc09ddSBjoern A. Zeeb #define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) 62bfcc09ddSBjoern A. Zeeb #define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) 63bfcc09ddSBjoern A. Zeeb #define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) 64bfcc09ddSBjoern A. Zeeb #define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) 65bfcc09ddSBjoern A. Zeeb #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) 66bfcc09ddSBjoern A. Zeeb #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) 67bfcc09ddSBjoern A. Zeeb /* 22000 TFD table address, 64 bit */ 68bfcc09ddSBjoern A. Zeeb #define TFH_TFDQ_CBB_TABLE (0x1C00) 69bfcc09ddSBjoern A. Zeeb 70bfcc09ddSBjoern A. Zeeb /* Find TFD CB base pointer for given queue */ 71bfcc09ddSBjoern A. Zeeb static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, 72bfcc09ddSBjoern A. Zeeb unsigned int chnl) 73bfcc09ddSBjoern A. Zeeb { 749af1bba4SBjoern A. Zeeb if (trans->trans_cfg->gen2) { 75bfcc09ddSBjoern A. Zeeb WARN_ON_ONCE(chnl >= 64); 76bfcc09ddSBjoern A. Zeeb return TFH_TFDQ_CBB_TABLE + 8 * chnl; 77bfcc09ddSBjoern A. Zeeb } 78bfcc09ddSBjoern A. Zeeb if (chnl < 16) 79bfcc09ddSBjoern A. Zeeb return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; 80bfcc09ddSBjoern A. Zeeb if (chnl < 20) 81bfcc09ddSBjoern A. Zeeb return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); 82bfcc09ddSBjoern A. Zeeb WARN_ON_ONCE(chnl >= 32); 83bfcc09ddSBjoern A. Zeeb return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); 84bfcc09ddSBjoern A. Zeeb } 85bfcc09ddSBjoern A. Zeeb 86bfcc09ddSBjoern A. Zeeb /* 22000 configuration registers */ 87bfcc09ddSBjoern A. Zeeb 88bfcc09ddSBjoern A. Zeeb /* 89bfcc09ddSBjoern A. Zeeb * TFH Configuration register. 90bfcc09ddSBjoern A. Zeeb * 91bfcc09ddSBjoern A. Zeeb * BIT fields: 92bfcc09ddSBjoern A. Zeeb * 93bfcc09ddSBjoern A. Zeeb * Bits 3:0: 94bfcc09ddSBjoern A. Zeeb * Define the maximum number of pending read requests. 95bfcc09ddSBjoern A. Zeeb * Maximum configuration value allowed is 0xC 96bfcc09ddSBjoern A. Zeeb * Bits 9:8: 97bfcc09ddSBjoern A. Zeeb * Define the maximum transfer size. (64 / 128 / 256) 98bfcc09ddSBjoern A. Zeeb * Bit 10: 99bfcc09ddSBjoern A. Zeeb * When bit is set and transfer size is set to 128B, the TFH will enable 100bfcc09ddSBjoern A. Zeeb * reading chunks of more than 64B only if the read address is aligned to 128B. 101bfcc09ddSBjoern A. Zeeb * In case of DRAM read address which is not aligned to 128B, the TFH will 102bfcc09ddSBjoern A. Zeeb * enable transfer size which doesn't cross 64B DRAM address boundary. 103bfcc09ddSBjoern A. Zeeb */ 104bfcc09ddSBjoern A. Zeeb #define TFH_TRANSFER_MODE (0x1F40) 105bfcc09ddSBjoern A. Zeeb #define TFH_TRANSFER_MAX_PENDING_REQ 0xc 106bfcc09ddSBjoern A. Zeeb #define TFH_CHUNK_SIZE_128 BIT(8) 107bfcc09ddSBjoern A. Zeeb #define TFH_CHUNK_SPLIT_MODE BIT(10) 108bfcc09ddSBjoern A. Zeeb /* 109bfcc09ddSBjoern A. Zeeb * Defines the offset address in dwords referring from the beginning of the 110bfcc09ddSBjoern A. Zeeb * Tx CMD which will be updated in DRAM. 111bfcc09ddSBjoern A. Zeeb * Note that the TFH offset address for Tx CMD update is always referring to 112bfcc09ddSBjoern A. Zeeb * the start of the TFD first TB. 113bfcc09ddSBjoern A. Zeeb * In case of a DRAM Tx CMD update the TFH will update PN and Key ID 114bfcc09ddSBjoern A. Zeeb */ 115bfcc09ddSBjoern A. Zeeb #define TFH_TXCMD_UPDATE_CFG (0x1F48) 116bfcc09ddSBjoern A. Zeeb /* 117bfcc09ddSBjoern A. Zeeb * Controls TX DMA operation 118bfcc09ddSBjoern A. Zeeb * 119bfcc09ddSBjoern A. Zeeb * BIT fields: 120bfcc09ddSBjoern A. Zeeb * 121bfcc09ddSBjoern A. Zeeb * Bits 31:30: Enable the SRAM DMA channel. 122bfcc09ddSBjoern A. Zeeb * Turning on bit 31 will kick the SRAM2DRAM DMA. 123bfcc09ddSBjoern A. Zeeb * Note that the sram2dram may be enabled only after configuring the DRAM and 124bfcc09ddSBjoern A. Zeeb * SRAM addresses registers and the byte count register. 125bfcc09ddSBjoern A. Zeeb * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When 126bfcc09ddSBjoern A. Zeeb * set to 1 - interrupt is sent to the driver 127bfcc09ddSBjoern A. Zeeb * Bit 0: Indicates the snoop configuration 128bfcc09ddSBjoern A. Zeeb */ 129bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_CHNL0_CTRL (0x1F60) 130bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_SNOOP BIT(0) 131bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_TO_DRIVER BIT(24) 132bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_START BIT(31) 133bfcc09ddSBjoern A. Zeeb 134bfcc09ddSBjoern A. Zeeb /* Defines the DMA SRAM write start address to transfer a data block */ 135bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64) 136bfcc09ddSBjoern A. Zeeb 137bfcc09ddSBjoern A. Zeeb /* Defines the 64bits DRAM start address to read the DMA data block from */ 138bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68) 139bfcc09ddSBjoern A. Zeeb 140bfcc09ddSBjoern A. Zeeb /* 141bfcc09ddSBjoern A. Zeeb * Defines the number of bytes to transfer from DRAM to SRAM. 142bfcc09ddSBjoern A. Zeeb * Note that this register may be configured with non-dword aligned size. 143bfcc09ddSBjoern A. Zeeb */ 144bfcc09ddSBjoern A. Zeeb #define TFH_SRV_DMA_CHNL0_BC (0x1F70) 145bfcc09ddSBjoern A. Zeeb 146*a4128aadSBjoern A. Zeeb /* 147bfcc09ddSBjoern A. Zeeb * Rx SRAM Control and Status Registers (RSCSR) 148bfcc09ddSBjoern A. Zeeb * 149bfcc09ddSBjoern A. Zeeb * These registers provide handshake between driver and device for the Rx queue 150bfcc09ddSBjoern A. Zeeb * (this queue handles *all* command responses, notifications, Rx data, etc. 151bfcc09ddSBjoern A. Zeeb * sent from uCode to host driver). Unlike Tx, there is only one Rx 152bfcc09ddSBjoern A. Zeeb * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can 153bfcc09ddSBjoern A. Zeeb * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer 154bfcc09ddSBjoern A. Zeeb * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 155bfcc09ddSBjoern A. Zeeb * mapping between RBDs and RBs. 156bfcc09ddSBjoern A. Zeeb * 157bfcc09ddSBjoern A. Zeeb * Driver must allocate host DRAM memory for the following, and set the 158bfcc09ddSBjoern A. Zeeb * physical address of each into device registers: 159bfcc09ddSBjoern A. Zeeb * 160bfcc09ddSBjoern A. Zeeb * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 161bfcc09ddSBjoern A. Zeeb * entries (although any power of 2, up to 4096, is selectable by driver). 162bfcc09ddSBjoern A. Zeeb * Each entry (1 dword) points to a receive buffer (RB) of consistent size 163bfcc09ddSBjoern A. Zeeb * (typically 4K, although 8K or 16K are also selectable by driver). 164bfcc09ddSBjoern A. Zeeb * Driver sets up RB size and number of RBDs in the CB via Rx config 165bfcc09ddSBjoern A. Zeeb * register FH_MEM_RCSR_CHNL0_CONFIG_REG. 166bfcc09ddSBjoern A. Zeeb * 167bfcc09ddSBjoern A. Zeeb * Bit fields within one RBD: 168bfcc09ddSBjoern A. Zeeb * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned 169bfcc09ddSBjoern A. Zeeb * 170bfcc09ddSBjoern A. Zeeb * Driver sets physical address [35:8] of base of RBD circular buffer 171bfcc09ddSBjoern A. Zeeb * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. 172bfcc09ddSBjoern A. Zeeb * 173bfcc09ddSBjoern A. Zeeb * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers 174bfcc09ddSBjoern A. Zeeb * (RBs) have been filled, via a "write pointer", actually the index of 175bfcc09ddSBjoern A. Zeeb * the RB's corresponding RBD within the circular buffer. Driver sets 176bfcc09ddSBjoern A. Zeeb * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. 177bfcc09ddSBjoern A. Zeeb * 178bfcc09ddSBjoern A. Zeeb * Bit fields in lower dword of Rx status buffer (upper dword not used 179bfcc09ddSBjoern A. Zeeb * by driver: 180bfcc09ddSBjoern A. Zeeb * 31-12: Not used by driver 181bfcc09ddSBjoern A. Zeeb * 11- 0: Index of last filled Rx buffer descriptor 182bfcc09ddSBjoern A. Zeeb * (device writes, driver reads this value) 183bfcc09ddSBjoern A. Zeeb * 184bfcc09ddSBjoern A. Zeeb * As the driver prepares Receive Buffers (RBs) for device to fill, driver must 185bfcc09ddSBjoern A. Zeeb * enter pointers to these RBs into contiguous RBD circular buffer entries, 186bfcc09ddSBjoern A. Zeeb * and update the device's "write" index register, 187bfcc09ddSBjoern A. Zeeb * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. 188bfcc09ddSBjoern A. Zeeb * 189bfcc09ddSBjoern A. Zeeb * This "write" index corresponds to the *next* RBD that the driver will make 190bfcc09ddSBjoern A. Zeeb * available, i.e. one RBD past the tail of the ready-to-fill RBDs within 191bfcc09ddSBjoern A. Zeeb * the circular buffer. This value should initially be 0 (before preparing any 192bfcc09ddSBjoern A. Zeeb * RBs), should be 8 after preparing the first 8 RBs (for example), and must 193bfcc09ddSBjoern A. Zeeb * wrap back to 0 at the end of the circular buffer (but don't wrap before 194bfcc09ddSBjoern A. Zeeb * "read" index has advanced past 1! See below). 195bfcc09ddSBjoern A. Zeeb * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. 196bfcc09ddSBjoern A. Zeeb * 197bfcc09ddSBjoern A. Zeeb * As the device fills RBs (referenced from contiguous RBDs within the circular 198bfcc09ddSBjoern A. Zeeb * buffer), it updates the Rx status buffer in host DRAM, 2) described above, 199bfcc09ddSBjoern A. Zeeb * to tell the driver the index of the latest filled RBD. The driver must 200bfcc09ddSBjoern A. Zeeb * read this "read" index from DRAM after receiving an Rx interrupt from device 201bfcc09ddSBjoern A. Zeeb * 202bfcc09ddSBjoern A. Zeeb * The driver must also internally keep track of a third index, which is the 203bfcc09ddSBjoern A. Zeeb * next RBD to process. When receiving an Rx interrupt, driver should process 204bfcc09ddSBjoern A. Zeeb * all filled but unprocessed RBs up to, but not including, the RB 205bfcc09ddSBjoern A. Zeeb * corresponding to the "read" index. For example, if "read" index becomes "1", 206bfcc09ddSBjoern A. Zeeb * driver may process the RB pointed to by RBD 0. Depending on volume of 207bfcc09ddSBjoern A. Zeeb * traffic, there may be many RBs to process. 208bfcc09ddSBjoern A. Zeeb * 209bfcc09ddSBjoern A. Zeeb * If read index == write index, device thinks there is no room to put new data. 210bfcc09ddSBjoern A. Zeeb * Due to this, the maximum number of filled RBs is 255, instead of 256. To 211bfcc09ddSBjoern A. Zeeb * be safe, make sure that there is a gap of at least 2 RBDs between "write" 212bfcc09ddSBjoern A. Zeeb * and "read" indexes; that is, make sure that there are no more than 254 213bfcc09ddSBjoern A. Zeeb * buffers waiting to be filled. 214bfcc09ddSBjoern A. Zeeb */ 215bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) 216bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) 217bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) 218bfcc09ddSBjoern A. Zeeb 219*a4128aadSBjoern A. Zeeb /* 220bfcc09ddSBjoern A. Zeeb * Physical base address of 8-byte Rx Status buffer. 221bfcc09ddSBjoern A. Zeeb * Bit fields: 222bfcc09ddSBjoern A. Zeeb * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. 223bfcc09ddSBjoern A. Zeeb */ 224bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) 225bfcc09ddSBjoern A. Zeeb 226*a4128aadSBjoern A. Zeeb /* 227bfcc09ddSBjoern A. Zeeb * Physical base address of Rx Buffer Descriptor Circular Buffer. 228bfcc09ddSBjoern A. Zeeb * Bit fields: 229bfcc09ddSBjoern A. Zeeb * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. 230bfcc09ddSBjoern A. Zeeb */ 231bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) 232bfcc09ddSBjoern A. Zeeb 233*a4128aadSBjoern A. Zeeb /* 234bfcc09ddSBjoern A. Zeeb * Rx write pointer (index, really!). 235bfcc09ddSBjoern A. Zeeb * Bit fields: 236bfcc09ddSBjoern A. Zeeb * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. 237bfcc09ddSBjoern A. Zeeb * NOTE: For 256-entry circular buffer, use only bits [7:0]. 238bfcc09ddSBjoern A. Zeeb */ 239bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) 240bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) 241bfcc09ddSBjoern A. Zeeb 242bfcc09ddSBjoern A. Zeeb #define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) 243bfcc09ddSBjoern A. Zeeb #define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG 244bfcc09ddSBjoern A. Zeeb 245*a4128aadSBjoern A. Zeeb /* 246bfcc09ddSBjoern A. Zeeb * Rx Config/Status Registers (RCSR) 247bfcc09ddSBjoern A. Zeeb * Rx Config Reg for channel 0 (only channel used) 248bfcc09ddSBjoern A. Zeeb * 249bfcc09ddSBjoern A. Zeeb * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for 250bfcc09ddSBjoern A. Zeeb * normal operation (see bit fields). 251bfcc09ddSBjoern A. Zeeb * 252bfcc09ddSBjoern A. Zeeb * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. 253bfcc09ddSBjoern A. Zeeb * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for 254bfcc09ddSBjoern A. Zeeb * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. 255bfcc09ddSBjoern A. Zeeb * 256bfcc09ddSBjoern A. Zeeb * Bit fields: 257bfcc09ddSBjoern A. Zeeb * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, 258bfcc09ddSBjoern A. Zeeb * '10' operate normally 259bfcc09ddSBjoern A. Zeeb * 29-24: reserved 260bfcc09ddSBjoern A. Zeeb * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), 261bfcc09ddSBjoern A. Zeeb * min "5" for 32 RBDs, max "12" for 4096 RBDs. 262bfcc09ddSBjoern A. Zeeb * 19-18: reserved 263bfcc09ddSBjoern A. Zeeb * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, 264bfcc09ddSBjoern A. Zeeb * '10' 12K, '11' 16K. 265bfcc09ddSBjoern A. Zeeb * 15-14: reserved 266bfcc09ddSBjoern A. Zeeb * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) 267bfcc09ddSBjoern A. Zeeb * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) 268bfcc09ddSBjoern A. Zeeb * typical value 0x10 (about 1/2 msec) 269bfcc09ddSBjoern A. Zeeb * 3- 0: reserved 270bfcc09ddSBjoern A. Zeeb */ 271bfcc09ddSBjoern A. Zeeb #define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) 272bfcc09ddSBjoern A. Zeeb #define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) 273bfcc09ddSBjoern A. Zeeb #define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) 274bfcc09ddSBjoern A. Zeeb 275bfcc09ddSBjoern A. Zeeb #define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) 276bfcc09ddSBjoern A. Zeeb #define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8) 277bfcc09ddSBjoern A. Zeeb #define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10) 278bfcc09ddSBjoern A. Zeeb 279bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ 280bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ 281bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ 282bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ 283bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ 284bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ 285bfcc09ddSBjoern A. Zeeb 286bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) 287bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) 288bfcc09ddSBjoern A. Zeeb #define RX_RB_TIMEOUT (0x11) 289bfcc09ddSBjoern A. Zeeb 290bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) 291bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) 292bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) 293bfcc09ddSBjoern A. Zeeb 294bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) 295bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) 296bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) 297bfcc09ddSBjoern A. Zeeb #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) 298bfcc09ddSBjoern A. Zeeb 299bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) 300bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) 301bfcc09ddSBjoern A. Zeeb #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) 302bfcc09ddSBjoern A. Zeeb 303*a4128aadSBjoern A. Zeeb /* 304bfcc09ddSBjoern A. Zeeb * Rx Shared Status Registers (RSSR) 305bfcc09ddSBjoern A. Zeeb * 306bfcc09ddSBjoern A. Zeeb * After stopping Rx DMA channel (writing 0 to 307bfcc09ddSBjoern A. Zeeb * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll 308bfcc09ddSBjoern A. Zeeb * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. 309bfcc09ddSBjoern A. Zeeb * 310bfcc09ddSBjoern A. Zeeb * Bit fields: 311bfcc09ddSBjoern A. Zeeb * 24: 1 = Channel 0 is idle 312bfcc09ddSBjoern A. Zeeb * 313bfcc09ddSBjoern A. Zeeb * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV 314bfcc09ddSBjoern A. Zeeb * contain default values that should not be altered by the driver. 315bfcc09ddSBjoern A. Zeeb */ 316bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) 317bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) 318bfcc09ddSBjoern A. Zeeb 319bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) 320bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) 321bfcc09ddSBjoern A. Zeeb #define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ 322bfcc09ddSBjoern A. Zeeb (FH_MEM_RSSR_LOWER_BOUND + 0x008) 323bfcc09ddSBjoern A. Zeeb 324bfcc09ddSBjoern A. Zeeb #define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) 325bfcc09ddSBjoern A. Zeeb 326bfcc09ddSBjoern A. Zeeb #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 327bfcc09ddSBjoern A. Zeeb #define FH_MEM_TB_MAX_LENGTH (0x00020000) 328bfcc09ddSBjoern A. Zeeb 329bfcc09ddSBjoern A. Zeeb /* 9000 rx series registers */ 330bfcc09ddSBjoern A. Zeeb 331bfcc09ddSBjoern A. Zeeb #define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ 332bfcc09ddSBjoern A. Zeeb #define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) 333bfcc09ddSBjoern A. Zeeb /* Write index table */ 334bfcc09ddSBjoern A. Zeeb #define RFH_Q0_FRBDCB_WIDX 0xA08080 335bfcc09ddSBjoern A. Zeeb #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) 336bfcc09ddSBjoern A. Zeeb /* Write index table - shadow registers */ 337bfcc09ddSBjoern A. Zeeb #define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80 338bfcc09ddSBjoern A. Zeeb #define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4) 339bfcc09ddSBjoern A. Zeeb /* Read index table */ 340bfcc09ddSBjoern A. Zeeb #define RFH_Q0_FRBDCB_RIDX 0xA080C0 341bfcc09ddSBjoern A. Zeeb #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) 342bfcc09ddSBjoern A. Zeeb /* Used list table */ 343bfcc09ddSBjoern A. Zeeb #define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ 344bfcc09ddSBjoern A. Zeeb #define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) 345bfcc09ddSBjoern A. Zeeb /* Write index table */ 346bfcc09ddSBjoern A. Zeeb #define RFH_Q0_URBDCB_WIDX 0xA08180 347bfcc09ddSBjoern A. Zeeb #define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) 348bfcc09ddSBjoern A. Zeeb #define RFH_Q0_URBDCB_VAID 0xA081C0 349bfcc09ddSBjoern A. Zeeb #define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) 350bfcc09ddSBjoern A. Zeeb /* stts */ 351bfcc09ddSBjoern A. Zeeb #define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ 352bfcc09ddSBjoern A. Zeeb #define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) 353bfcc09ddSBjoern A. Zeeb 354bfcc09ddSBjoern A. Zeeb #define RFH_Q0_ORB_WPTR_LSB 0xA08280 355bfcc09ddSBjoern A. Zeeb #define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) 356bfcc09ddSBjoern A. Zeeb #define RFH_RBDBUF_RBD0_LSB 0xA08300 357bfcc09ddSBjoern A. Zeeb #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) 358bfcc09ddSBjoern A. Zeeb 359*a4128aadSBjoern A. Zeeb /* 360bfcc09ddSBjoern A. Zeeb * RFH Status Register 361bfcc09ddSBjoern A. Zeeb * 362bfcc09ddSBjoern A. Zeeb * Bit fields: 363bfcc09ddSBjoern A. Zeeb * 364bfcc09ddSBjoern A. Zeeb * Bit 29: RBD_FETCH_IDLE 365bfcc09ddSBjoern A. Zeeb * This status flag is set by the RFH when there is no active RBD fetch from 366bfcc09ddSBjoern A. Zeeb * DRAM. 367bfcc09ddSBjoern A. Zeeb * Once the RFH RBD controller starts fetching (or when there is a pending 368bfcc09ddSBjoern A. Zeeb * RBD read response from DRAM), this flag is immediately turned off. 369bfcc09ddSBjoern A. Zeeb * 370bfcc09ddSBjoern A. Zeeb * Bit 30: SRAM_DMA_IDLE 371bfcc09ddSBjoern A. Zeeb * This status flag is set by the RFH when there is no active transaction from 372bfcc09ddSBjoern A. Zeeb * SRAM to DRAM. 373bfcc09ddSBjoern A. Zeeb * Once the SRAM to DRAM DMA is active, this flag is immediately turned off. 374bfcc09ddSBjoern A. Zeeb * 375bfcc09ddSBjoern A. Zeeb * Bit 31: RXF_DMA_IDLE 376bfcc09ddSBjoern A. Zeeb * This status flag is set by the RFH when there is no active transaction from 377bfcc09ddSBjoern A. Zeeb * RXF to DRAM. 378bfcc09ddSBjoern A. Zeeb * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. 379bfcc09ddSBjoern A. Zeeb */ 380bfcc09ddSBjoern A. Zeeb #define RFH_GEN_STATUS 0xA09808 381bfcc09ddSBjoern A. Zeeb #define RFH_GEN_STATUS_GEN3 0xA07824 382bfcc09ddSBjoern A. Zeeb #define RBD_FETCH_IDLE BIT(29) 383bfcc09ddSBjoern A. Zeeb #define SRAM_DMA_IDLE BIT(30) 384bfcc09ddSBjoern A. Zeeb #define RXF_DMA_IDLE BIT(31) 385bfcc09ddSBjoern A. Zeeb 386bfcc09ddSBjoern A. Zeeb /* DMA configuration */ 387bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_CFG 0xA09820 388bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_CFG_GEN3 0xA07880 389bfcc09ddSBjoern A. Zeeb /* RB size */ 390bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ 391bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_POS 16 392bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) 393bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) 394bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) 395bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) 396bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) 397bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) 398bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) 399bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) 400bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) 401bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) 402bfcc09ddSBjoern A. Zeeb /* RB Circular Buffer size:defines the table sizes in RBD units */ 403bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ 404bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_POS 20 405bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) 406bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) 407bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) 408bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) 409bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) 410bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) 411bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) 412bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) 413bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) 414bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ 415bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 416bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) 417bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */ 418bfcc09ddSBjoern A. Zeeb #define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ 419bfcc09ddSBjoern A. Zeeb #define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ 420bfcc09ddSBjoern A. Zeeb #define RFH_DMA_EN_ENABLE_VAL BIT(31) 421bfcc09ddSBjoern A. Zeeb 422bfcc09ddSBjoern A. Zeeb #define RFH_RXF_RXQ_ACTIVE 0xA0980C 423bfcc09ddSBjoern A. Zeeb 424bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG 0xA09800 425bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) 426bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) 427bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) 428bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1 429bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0 430bfcc09ddSBjoern A. Zeeb /* the driver assumes everywhere that the default RXQ is 0 */ 431bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00 432bfcc09ddSBjoern A. Zeeb #define RFH_GEN_CFG_VAL(_n, _v) FIELD_PREP(RFH_GEN_CFG_ ## _n, _v) 433bfcc09ddSBjoern A. Zeeb 434bfcc09ddSBjoern A. Zeeb /* end of 9000 rx series registers */ 435bfcc09ddSBjoern A. Zeeb 436bfcc09ddSBjoern A. Zeeb /* TFDB Area - TFDs buffer table */ 437bfcc09ddSBjoern A. Zeeb #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) 438bfcc09ddSBjoern A. Zeeb #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) 439bfcc09ddSBjoern A. Zeeb #define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) 440bfcc09ddSBjoern A. Zeeb #define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) 441bfcc09ddSBjoern A. Zeeb #define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) 442bfcc09ddSBjoern A. Zeeb 443*a4128aadSBjoern A. Zeeb /* 444bfcc09ddSBjoern A. Zeeb * Transmit DMA Channel Control/Status Registers (TCSR) 445bfcc09ddSBjoern A. Zeeb * 446bfcc09ddSBjoern A. Zeeb * Device has one configuration register for each of 8 Tx DMA/FIFO channels 447bfcc09ddSBjoern A. Zeeb * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, 448bfcc09ddSBjoern A. Zeeb * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. 449bfcc09ddSBjoern A. Zeeb * 450bfcc09ddSBjoern A. Zeeb * To use a Tx DMA channel, driver must initialize its 451bfcc09ddSBjoern A. Zeeb * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: 452bfcc09ddSBjoern A. Zeeb * 453bfcc09ddSBjoern A. Zeeb * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 454bfcc09ddSBjoern A. Zeeb * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL 455bfcc09ddSBjoern A. Zeeb * 456bfcc09ddSBjoern A. Zeeb * All other bits should be 0. 457bfcc09ddSBjoern A. Zeeb * 458bfcc09ddSBjoern A. Zeeb * Bit fields: 459bfcc09ddSBjoern A. Zeeb * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, 460bfcc09ddSBjoern A. Zeeb * '10' operate normally 461bfcc09ddSBjoern A. Zeeb * 29- 4: Reserved, set to "0" 462bfcc09ddSBjoern A. Zeeb * 3: Enable internal DMA requests (1, normal operation), disable (0) 463bfcc09ddSBjoern A. Zeeb * 2- 0: Reserved, set to "0" 464bfcc09ddSBjoern A. Zeeb */ 465bfcc09ddSBjoern A. Zeeb #define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) 466bfcc09ddSBjoern A. Zeeb #define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) 467bfcc09ddSBjoern A. Zeeb 468bfcc09ddSBjoern A. Zeeb /* Find Control/Status reg for given Tx DMA/FIFO channel */ 469bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_NUM (8) 470bfcc09ddSBjoern A. Zeeb 471bfcc09ddSBjoern A. Zeeb /* TCSR: tx_config register values */ 472bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ 473bfcc09ddSBjoern A. Zeeb (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) 474bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ 475bfcc09ddSBjoern A. Zeeb (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) 476bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ 477bfcc09ddSBjoern A. Zeeb (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) 478bfcc09ddSBjoern A. Zeeb 479bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) 480bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) 481bfcc09ddSBjoern A. Zeeb 482bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) 483bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) 484bfcc09ddSBjoern A. Zeeb 485bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) 486bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) 487bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) 488bfcc09ddSBjoern A. Zeeb 489bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) 490bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) 491bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) 492bfcc09ddSBjoern A. Zeeb 493bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) 494bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) 495bfcc09ddSBjoern A. Zeeb #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) 496bfcc09ddSBjoern A. Zeeb 497bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) 498bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) 499bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) 500bfcc09ddSBjoern A. Zeeb 501bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) 502bfcc09ddSBjoern A. Zeeb #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) 503bfcc09ddSBjoern A. Zeeb 504*a4128aadSBjoern A. Zeeb /* 505bfcc09ddSBjoern A. Zeeb * Tx Shared Status Registers (TSSR) 506bfcc09ddSBjoern A. Zeeb * 507bfcc09ddSBjoern A. Zeeb * After stopping Tx DMA channel (writing 0 to 508bfcc09ddSBjoern A. Zeeb * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll 509bfcc09ddSBjoern A. Zeeb * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle 510bfcc09ddSBjoern A. Zeeb * (channel's buffers empty | no pending requests). 511bfcc09ddSBjoern A. Zeeb * 512bfcc09ddSBjoern A. Zeeb * Bit fields: 513bfcc09ddSBjoern A. Zeeb * 31-24: 1 = Channel buffers empty (channel 7:0) 514bfcc09ddSBjoern A. Zeeb * 23-16: 1 = No pending requests (channel 7:0) 515bfcc09ddSBjoern A. Zeeb */ 516bfcc09ddSBjoern A. Zeeb #define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) 517bfcc09ddSBjoern A. Zeeb #define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) 518bfcc09ddSBjoern A. Zeeb 519bfcc09ddSBjoern A. Zeeb #define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) 520bfcc09ddSBjoern A. Zeeb 521*a4128aadSBjoern A. Zeeb /* 522bfcc09ddSBjoern A. Zeeb * Bit fields for TSSR(Tx Shared Status & Control) error status register: 523bfcc09ddSBjoern A. Zeeb * 31: Indicates an address error when accessed to internal memory 524bfcc09ddSBjoern A. Zeeb * uCode/driver must write "1" in order to clear this flag 525bfcc09ddSBjoern A. Zeeb * 30: Indicates that Host did not send the expected number of dwords to FH 526bfcc09ddSBjoern A. Zeeb * uCode/driver must write "1" in order to clear this flag 527bfcc09ddSBjoern A. Zeeb * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA 528bfcc09ddSBjoern A. Zeeb * command was received from the scheduler while the TRB was already full 529bfcc09ddSBjoern A. Zeeb * with previous command 530bfcc09ddSBjoern A. Zeeb * uCode/driver must write "1" in order to clear this flag 531bfcc09ddSBjoern A. Zeeb * 7-0: Each status bit indicates a channel's TxCredit error. When an error 532bfcc09ddSBjoern A. Zeeb * bit is set, it indicates that the FH has received a full indication 533bfcc09ddSBjoern A. Zeeb * from the RTC TxFIFO and the current value of the TxCredit counter was 534bfcc09ddSBjoern A. Zeeb * not equal to zero. This mean that the credit mechanism was not 535bfcc09ddSBjoern A. Zeeb * synchronized to the TxFIFO status 536bfcc09ddSBjoern A. Zeeb * uCode/driver must write "1" in order to clear this flag 537bfcc09ddSBjoern A. Zeeb */ 538bfcc09ddSBjoern A. Zeeb #define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) 539bfcc09ddSBjoern A. Zeeb #define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008) 540bfcc09ddSBjoern A. Zeeb 541bfcc09ddSBjoern A. Zeeb #define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) 542bfcc09ddSBjoern A. Zeeb 543bfcc09ddSBjoern A. Zeeb /* Tx service channels */ 544bfcc09ddSBjoern A. Zeeb #define FH_SRVC_CHNL (9) 545bfcc09ddSBjoern A. Zeeb #define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) 546bfcc09ddSBjoern A. Zeeb #define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) 547bfcc09ddSBjoern A. Zeeb #define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ 548bfcc09ddSBjoern A. Zeeb (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) 549bfcc09ddSBjoern A. Zeeb 550bfcc09ddSBjoern A. Zeeb #define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) 551bfcc09ddSBjoern A. Zeeb #define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) 552bfcc09ddSBjoern A. Zeeb 553bfcc09ddSBjoern A. Zeeb /* Instruct FH to increment the retry count of a packet when 554bfcc09ddSBjoern A. Zeeb * it is brought from the memory to TX-FIFO 555bfcc09ddSBjoern A. Zeeb */ 556bfcc09ddSBjoern A. Zeeb #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) 557bfcc09ddSBjoern A. Zeeb 558bfcc09ddSBjoern A. Zeeb #define RX_POOL_SIZE(rbds) ((rbds) - 1 + \ 559bfcc09ddSBjoern A. Zeeb IWL_MAX_RX_HW_QUEUES * \ 560bfcc09ddSBjoern A. Zeeb (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) 561bfcc09ddSBjoern A. Zeeb /* cb size is the exponent */ 562bfcc09ddSBjoern A. Zeeb #define RX_QUEUE_CB_SIZE(x) ilog2(x) 563bfcc09ddSBjoern A. Zeeb 564bfcc09ddSBjoern A. Zeeb #define RX_QUEUE_SIZE 256 565bfcc09ddSBjoern A. Zeeb #define RX_QUEUE_MASK 255 566bfcc09ddSBjoern A. Zeeb #define RX_QUEUE_SIZE_LOG 8 567bfcc09ddSBjoern A. Zeeb 568*a4128aadSBjoern A. Zeeb #define IWL_DEFAULT_RX_QUEUE 0 569*a4128aadSBjoern A. Zeeb 570bfcc09ddSBjoern A. Zeeb /** 571bfcc09ddSBjoern A. Zeeb * struct iwl_rb_status - reserve buffer status 572bfcc09ddSBjoern A. Zeeb * host memory mapped FH registers 573*a4128aadSBjoern A. Zeeb * @closed_rb_num: [0:11] Indicates the index of the RB which was closed 574*a4128aadSBjoern A. Zeeb * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed 575*a4128aadSBjoern A. Zeeb * @finished_rb_num: [0:11] Indicates the index of the current RB 576bfcc09ddSBjoern A. Zeeb * in which the last frame was written to 577*a4128aadSBjoern A. Zeeb * @finished_fr_num: [0:11] Indicates the index of the RX Frame 578bfcc09ddSBjoern A. Zeeb * which was transferred 579*a4128aadSBjoern A. Zeeb * @__spare: reserved 580bfcc09ddSBjoern A. Zeeb */ 581bfcc09ddSBjoern A. Zeeb struct iwl_rb_status { 582bfcc09ddSBjoern A. Zeeb __le16 closed_rb_num; 583bfcc09ddSBjoern A. Zeeb __le16 closed_fr_num; 584bfcc09ddSBjoern A. Zeeb __le16 finished_rb_num; 585*a4128aadSBjoern A. Zeeb __le16 finished_fr_num; 586d9836fb4SBjoern A. Zeeb __le32 __spare; 587bfcc09ddSBjoern A. Zeeb } __packed; 588bfcc09ddSBjoern A. Zeeb 589bfcc09ddSBjoern A. Zeeb 590bfcc09ddSBjoern A. Zeeb #define TFD_QUEUE_SIZE_MAX (256) 591bfcc09ddSBjoern A. Zeeb #define TFD_QUEUE_SIZE_MAX_GEN3 (65536) 592bfcc09ddSBjoern A. Zeeb /* cb size is the exponent - 3 */ 593bfcc09ddSBjoern A. Zeeb #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) 594bfcc09ddSBjoern A. Zeeb #define TFD_QUEUE_SIZE_BC_DUP (64) 595bfcc09ddSBjoern A. Zeeb #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) 596d9836fb4SBjoern A. Zeeb #define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024 597d9836fb4SBjoern A. Zeeb #define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4) 598bfcc09ddSBjoern A. Zeeb #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) 599bfcc09ddSBjoern A. Zeeb #define IWL_NUM_OF_TBS 20 600bfcc09ddSBjoern A. Zeeb #define IWL_TFH_NUM_TBS 25 601bfcc09ddSBjoern A. Zeeb 602d9836fb4SBjoern A. Zeeb /* IMR DMA registers */ 603d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c 604d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520 605d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524 606d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528 607d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c 608d9836fb4SBjoern A. Zeeb #define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530 609d9836fb4SBjoern A. Zeeb 610d9836fb4SBjoern A. Zeeb /* RFH S2D DMA registers */ 611d9836fb4SBjoern A. Zeeb #define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c 612d9836fb4SBjoern A. Zeeb #define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002 613d9836fb4SBjoern A. Zeeb 614d9836fb4SBjoern A. Zeeb /* TFH D2S DMA registers */ 615d9836fb4SBjoern A. Zeeb #define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000 616d9836fb4SBjoern A. Zeeb #define IMR_UREG_CHICK 0x00d05c00 617d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000 618d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030 619d9836fb4SBjoern A. Zeeb #define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000 620d9836fb4SBjoern A. Zeeb 621bfcc09ddSBjoern A. Zeeb static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) 622bfcc09ddSBjoern A. Zeeb { 623bfcc09ddSBjoern A. Zeeb return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF; 624bfcc09ddSBjoern A. Zeeb } 625bfcc09ddSBjoern A. Zeeb 626bfcc09ddSBjoern A. Zeeb /** 627bfcc09ddSBjoern A. Zeeb * enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits 628bfcc09ddSBjoern A. Zeeb * @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address 629bfcc09ddSBjoern A. Zeeb * @TB_HI_N_LEN_LEN_MSK: length of the TB 630bfcc09ddSBjoern A. Zeeb */ 631bfcc09ddSBjoern A. Zeeb enum iwl_tfd_tb_hi_n_len { 632bfcc09ddSBjoern A. Zeeb TB_HI_N_LEN_ADDR_HI_MSK = 0xf, 633bfcc09ddSBjoern A. Zeeb TB_HI_N_LEN_LEN_MSK = 0xfff0, 634bfcc09ddSBjoern A. Zeeb }; 635bfcc09ddSBjoern A. Zeeb 636bfcc09ddSBjoern A. Zeeb /** 637*a4128aadSBjoern A. Zeeb * struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor 638bfcc09ddSBjoern A. Zeeb * 639bfcc09ddSBjoern A. Zeeb * This structure contains dma address and length of transmission address 640bfcc09ddSBjoern A. Zeeb * 641bfcc09ddSBjoern A. Zeeb * @lo: low [31:0] portion of the dma address of TX buffer 642bfcc09ddSBjoern A. Zeeb * every even is unaligned on 16 bit boundary 643bfcc09ddSBjoern A. Zeeb * @hi_n_len: &enum iwl_tfd_tb_hi_n_len 644bfcc09ddSBjoern A. Zeeb */ 645bfcc09ddSBjoern A. Zeeb struct iwl_tfd_tb { 646bfcc09ddSBjoern A. Zeeb __le32 lo; 647bfcc09ddSBjoern A. Zeeb __le16 hi_n_len; 648bfcc09ddSBjoern A. Zeeb } __packed; 649bfcc09ddSBjoern A. Zeeb 650bfcc09ddSBjoern A. Zeeb /** 651*a4128aadSBjoern A. Zeeb * struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor 652bfcc09ddSBjoern A. Zeeb * 653bfcc09ddSBjoern A. Zeeb * This structure contains dma address and length of transmission address 654bfcc09ddSBjoern A. Zeeb * 655*a4128aadSBjoern A. Zeeb * @tb_len: length of the tx buffer 656*a4128aadSBjoern A. Zeeb * @addr: 64 bits dma address 657bfcc09ddSBjoern A. Zeeb */ 658bfcc09ddSBjoern A. Zeeb struct iwl_tfh_tb { 659bfcc09ddSBjoern A. Zeeb __le16 tb_len; 660bfcc09ddSBjoern A. Zeeb __le64 addr; 661bfcc09ddSBjoern A. Zeeb } __packed; 662bfcc09ddSBjoern A. Zeeb 663*a4128aadSBjoern A. Zeeb /* 664bfcc09ddSBjoern A. Zeeb * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. 665bfcc09ddSBjoern A. Zeeb * Both driver and device share these circular buffers, each of which must be 666bfcc09ddSBjoern A. Zeeb * contiguous 256 TFDs. 667bfcc09ddSBjoern A. Zeeb * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes 668bfcc09ddSBjoern A. Zeeb * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes 669bfcc09ddSBjoern A. Zeeb * 670bfcc09ddSBjoern A. Zeeb * Driver must indicate the physical address of the base of each 671bfcc09ddSBjoern A. Zeeb * circular buffer via the FH_MEM_CBBC_QUEUE registers. 672bfcc09ddSBjoern A. Zeeb * 673bfcc09ddSBjoern A. Zeeb * Each TFD contains pointer/size information for up to 20 / 25 data buffers 674bfcc09ddSBjoern A. Zeeb * in host DRAM. These buffers collectively contain the (one) frame described 675bfcc09ddSBjoern A. Zeeb * by the TFD. Each buffer must be a single contiguous block of memory within 676bfcc09ddSBjoern A. Zeeb * itself, but buffers may be scattered in host DRAM. Each buffer has max size 677bfcc09ddSBjoern A. Zeeb * of (4K - 4). The concatenates all of a TFD's buffers into a single 678bfcc09ddSBjoern A. Zeeb * Tx frame, up to 8 KBytes in size. 679bfcc09ddSBjoern A. Zeeb * 680bfcc09ddSBjoern A. Zeeb * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. 681bfcc09ddSBjoern A. Zeeb */ 682bfcc09ddSBjoern A. Zeeb 683bfcc09ddSBjoern A. Zeeb /** 684bfcc09ddSBjoern A. Zeeb * struct iwl_tfd - Transmit Frame Descriptor (TFD) 685*a4128aadSBjoern A. Zeeb * @__reserved1: reserved 686*a4128aadSBjoern A. Zeeb * @num_tbs: 687*a4128aadSBjoern A. Zeeb * 0-4 number of active tbs 688bfcc09ddSBjoern A. Zeeb * 5 reserved 689bfcc09ddSBjoern A. Zeeb * 6-7 padding (not used) 690*a4128aadSBjoern A. Zeeb * @tbs: transmit frame buffer descriptors 691*a4128aadSBjoern A. Zeeb * @__pad: padding 692bfcc09ddSBjoern A. Zeeb */ 693bfcc09ddSBjoern A. Zeeb struct iwl_tfd { 694bfcc09ddSBjoern A. Zeeb u8 __reserved1[3]; 695bfcc09ddSBjoern A. Zeeb u8 num_tbs; 696bfcc09ddSBjoern A. Zeeb struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; 697bfcc09ddSBjoern A. Zeeb __le32 __pad; 698bfcc09ddSBjoern A. Zeeb } __packed; 699bfcc09ddSBjoern A. Zeeb 700bfcc09ddSBjoern A. Zeeb /** 701bfcc09ddSBjoern A. Zeeb * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD) 702*a4128aadSBjoern A. Zeeb * @num_tbs: 703*a4128aadSBjoern A. Zeeb * 0-4 number of active tbs 704bfcc09ddSBjoern A. Zeeb * 5-15 reserved 705*a4128aadSBjoern A. Zeeb * @tbs: transmit frame buffer descriptors 706*a4128aadSBjoern A. Zeeb * @__pad: padding 707bfcc09ddSBjoern A. Zeeb */ 708bfcc09ddSBjoern A. Zeeb struct iwl_tfh_tfd { 709bfcc09ddSBjoern A. Zeeb __le16 num_tbs; 710bfcc09ddSBjoern A. Zeeb struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS]; 711bfcc09ddSBjoern A. Zeeb __le32 __pad; 712bfcc09ddSBjoern A. Zeeb } __packed; 713bfcc09ddSBjoern A. Zeeb 714bfcc09ddSBjoern A. Zeeb /* Keep Warm Size */ 715bfcc09ddSBjoern A. Zeeb #define IWL_KW_SIZE 0x1000 /* 4k */ 716bfcc09ddSBjoern A. Zeeb 717bfcc09ddSBjoern A. Zeeb /* Fixed (non-configurable) rx data from phy */ 718bfcc09ddSBjoern A. Zeeb 719bfcc09ddSBjoern A. Zeeb /** 720*a4128aadSBjoern A. Zeeb * struct iwlagn_scd_bc_tbl - scheduler byte count table 721bfcc09ddSBjoern A. Zeeb * base physical address provided by SCD_DRAM_BASE_ADDR 722bfcc09ddSBjoern A. Zeeb * For devices up to 22000: 723*a4128aadSBjoern A. Zeeb * @tfd_offset: 724*a4128aadSBjoern A. Zeeb * For devices up to 22000: 725*a4128aadSBjoern A. Zeeb * 0-12 - tx command byte count 726bfcc09ddSBjoern A. Zeeb * 12-16 - station index 727bfcc09ddSBjoern A. Zeeb * For 22000: 728*a4128aadSBjoern A. Zeeb * 0-12 - tx command byte count 729bfcc09ddSBjoern A. Zeeb * 12-13 - number of 64 byte chunks 730bfcc09ddSBjoern A. Zeeb * 14-16 - reserved 731bfcc09ddSBjoern A. Zeeb */ 732bfcc09ddSBjoern A. Zeeb struct iwlagn_scd_bc_tbl { 733bfcc09ddSBjoern A. Zeeb __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; 734bfcc09ddSBjoern A. Zeeb } __packed; 735bfcc09ddSBjoern A. Zeeb 736bfcc09ddSBjoern A. Zeeb /** 737*a4128aadSBjoern A. Zeeb * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3 738bfcc09ddSBjoern A. Zeeb * For AX210 and on: 739bfcc09ddSBjoern A. Zeeb * @tfd_offset: 0-12 - tx command byte count 740bfcc09ddSBjoern A. Zeeb * 12-13 - number of 64 byte chunks 741bfcc09ddSBjoern A. Zeeb * 14-16 - reserved 742bfcc09ddSBjoern A. Zeeb */ 743d9836fb4SBjoern A. Zeeb struct iwl_gen3_bc_tbl_entry { 744d9836fb4SBjoern A. Zeeb __le16 tfd_offset; 745bfcc09ddSBjoern A. Zeeb } __packed; 746bfcc09ddSBjoern A. Zeeb 747bfcc09ddSBjoern A. Zeeb #endif /* !__iwl_fh_h__ */ 748