1bfcc09ddSBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5bfcc09ddSBjoern A. Zeeb * Copyright (C) 2016-2017 Intel Deutschland GmbH 6bfcc09ddSBjoern A. Zeeb */ 7bfcc09ddSBjoern A. Zeeb #ifndef __iwl_fw_api_txq_h__ 8bfcc09ddSBjoern A. Zeeb #define __iwl_fw_api_txq_h__ 9bfcc09ddSBjoern A. Zeeb 10bfcc09ddSBjoern A. Zeeb /* 11bfcc09ddSBjoern A. Zeeb * DQA queue numbers 12bfcc09ddSBjoern A. Zeeb * 13bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW 14bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames 15bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames 16bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using 17bfcc09ddSBjoern A. Zeeb * monitor mode. Note this queue is the same as the queue for P2P device 18bfcc09ddSBjoern A. Zeeb * but we can't have active monitor mode along with P2P device anyway. 19bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames 20bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure 21bfcc09ddSBjoern A. Zeeb * that we are never left without the possibility to connect to an AP. 22bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. 23bfcc09ddSBjoern A. Zeeb * Each MGMT queue is mapped to a single STA 24bfcc09ddSBjoern A. Zeeb * MGMT frames are frames that return true on ieee80211_is_mgmt() 25bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames 26bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe 27bfcc09ddSBjoern A. Zeeb * responses 28bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. 29bfcc09ddSBjoern A. Zeeb * DATA frames are intended for !ieee80211_is_mgmt() frames, but if 30bfcc09ddSBjoern A. Zeeb * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues 31bfcc09ddSBjoern A. Zeeb * as well 32bfcc09ddSBjoern A. Zeeb * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames 33bfcc09ddSBjoern A. Zeeb */ 34bfcc09ddSBjoern A. Zeeb enum iwl_mvm_dqa_txq { 35bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_CMD_QUEUE = 0, 36bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_AUX_QUEUE = 1, 37bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, 38bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2, 39bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_GCAST_QUEUE = 3, 40bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, 41bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, 42bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, 43bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, 44bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MIN_DATA_QUEUE = 10, 45bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MAX_DATA_QUEUE = 30, 46bfcc09ddSBjoern A. Zeeb }; 47bfcc09ddSBjoern A. Zeeb 48bfcc09ddSBjoern A. Zeeb enum iwl_mvm_tx_fifo { 49bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_BK = 0, 50bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_BE, 51bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_VI, 52bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_VO, 53bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_MCAST = 5, 54bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_CMD = 7, 55bfcc09ddSBjoern A. Zeeb }; 56bfcc09ddSBjoern A. Zeeb 57bfcc09ddSBjoern A. Zeeb enum iwl_gen2_tx_fifo { 58bfcc09ddSBjoern A. Zeeb IWL_GEN2_TX_FIFO_CMD = 0, 59bfcc09ddSBjoern A. Zeeb IWL_GEN2_EDCA_TX_FIFO_BK, 60bfcc09ddSBjoern A. Zeeb IWL_GEN2_EDCA_TX_FIFO_BE, 61bfcc09ddSBjoern A. Zeeb IWL_GEN2_EDCA_TX_FIFO_VI, 62bfcc09ddSBjoern A. Zeeb IWL_GEN2_EDCA_TX_FIFO_VO, 63bfcc09ddSBjoern A. Zeeb IWL_GEN2_TRIG_TX_FIFO_BK, 64bfcc09ddSBjoern A. Zeeb IWL_GEN2_TRIG_TX_FIFO_BE, 65bfcc09ddSBjoern A. Zeeb IWL_GEN2_TRIG_TX_FIFO_VI, 66bfcc09ddSBjoern A. Zeeb IWL_GEN2_TRIG_TX_FIFO_VO, 67bfcc09ddSBjoern A. Zeeb }; 68bfcc09ddSBjoern A. Zeeb 69*a4128aadSBjoern A. Zeeb enum iwl_bz_tx_fifo { 70*a4128aadSBjoern A. Zeeb IWL_BZ_EDCA_TX_FIFO_BK, 71*a4128aadSBjoern A. Zeeb IWL_BZ_EDCA_TX_FIFO_BE, 72*a4128aadSBjoern A. Zeeb IWL_BZ_EDCA_TX_FIFO_VI, 73*a4128aadSBjoern A. Zeeb IWL_BZ_EDCA_TX_FIFO_VO, 74*a4128aadSBjoern A. Zeeb IWL_BZ_TRIG_TX_FIFO_BK, 75*a4128aadSBjoern A. Zeeb IWL_BZ_TRIG_TX_FIFO_BE, 76*a4128aadSBjoern A. Zeeb IWL_BZ_TRIG_TX_FIFO_VI, 77*a4128aadSBjoern A. Zeeb IWL_BZ_TRIG_TX_FIFO_VO, 78*a4128aadSBjoern A. Zeeb }; 79bfcc09ddSBjoern A. Zeeb /** 80bfcc09ddSBjoern A. Zeeb * enum iwl_tx_queue_cfg_actions - TXQ config options 81bfcc09ddSBjoern A. Zeeb * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue 82bfcc09ddSBjoern A. Zeeb * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format 83bfcc09ddSBjoern A. Zeeb */ 84bfcc09ddSBjoern A. Zeeb enum iwl_tx_queue_cfg_actions { 85bfcc09ddSBjoern A. Zeeb TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), 86bfcc09ddSBjoern A. Zeeb TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), 87bfcc09ddSBjoern A. Zeeb }; 88bfcc09ddSBjoern A. Zeeb 89*a4128aadSBjoern A. Zeeb #define IWL_DEFAULT_QUEUE_SIZE_EHT (512 * 4) 90d9836fb4SBjoern A. Zeeb #define IWL_DEFAULT_QUEUE_SIZE_HE 1024 91bfcc09ddSBjoern A. Zeeb #define IWL_DEFAULT_QUEUE_SIZE 256 92bfcc09ddSBjoern A. Zeeb #define IWL_MGMT_QUEUE_SIZE 16 93bfcc09ddSBjoern A. Zeeb #define IWL_CMD_QUEUE_SIZE 32 94bfcc09ddSBjoern A. Zeeb /** 95bfcc09ddSBjoern A. Zeeb * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command 96bfcc09ddSBjoern A. Zeeb * @sta_id: station id 97bfcc09ddSBjoern A. Zeeb * @tid: tid of the queue 98bfcc09ddSBjoern A. Zeeb * @flags: see &enum iwl_tx_queue_cfg_actions 99bfcc09ddSBjoern A. Zeeb * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. 100bfcc09ddSBjoern A. Zeeb * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) 101bfcc09ddSBjoern A. Zeeb * @byte_cnt_addr: address of byte count table 102bfcc09ddSBjoern A. Zeeb * @tfdq_addr: address of TFD circular buffer 103bfcc09ddSBjoern A. Zeeb */ 104bfcc09ddSBjoern A. Zeeb struct iwl_tx_queue_cfg_cmd { 105bfcc09ddSBjoern A. Zeeb u8 sta_id; 106bfcc09ddSBjoern A. Zeeb u8 tid; 107bfcc09ddSBjoern A. Zeeb __le16 flags; 108bfcc09ddSBjoern A. Zeeb __le32 cb_size; 109bfcc09ddSBjoern A. Zeeb __le64 byte_cnt_addr; 110bfcc09ddSBjoern A. Zeeb __le64 tfdq_addr; 111bfcc09ddSBjoern A. Zeeb } __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ 112bfcc09ddSBjoern A. Zeeb 113bfcc09ddSBjoern A. Zeeb /** 114bfcc09ddSBjoern A. Zeeb * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config 115bfcc09ddSBjoern A. Zeeb * @queue_number: queue number assigned to this RA -TID 116bfcc09ddSBjoern A. Zeeb * @flags: set on failure 117bfcc09ddSBjoern A. Zeeb * @write_pointer: initial value for write pointer 118bfcc09ddSBjoern A. Zeeb * @reserved: reserved 119bfcc09ddSBjoern A. Zeeb */ 120bfcc09ddSBjoern A. Zeeb struct iwl_tx_queue_cfg_rsp { 121bfcc09ddSBjoern A. Zeeb __le16 queue_number; 122bfcc09ddSBjoern A. Zeeb __le16 flags; 123bfcc09ddSBjoern A. Zeeb __le16 write_pointer; 124bfcc09ddSBjoern A. Zeeb __le16 reserved; 125bfcc09ddSBjoern A. Zeeb } __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ 126bfcc09ddSBjoern A. Zeeb 127bfcc09ddSBjoern A. Zeeb #endif /* __iwl_fw_api_txq_h__ */ 128