1*da8fa4e3SBjoern A. Zeeb /* SPDX-License-Identifier: ISC */ 2*da8fa4e3SBjoern A. Zeeb /* 3*da8fa4e3SBjoern A. Zeeb * Copyright (c) 2005-2011 Atheros Communications Inc. 4*da8fa4e3SBjoern A. Zeeb * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5*da8fa4e3SBjoern A. Zeeb * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6*da8fa4e3SBjoern A. Zeeb */ 7*da8fa4e3SBjoern A. Zeeb 8*da8fa4e3SBjoern A. Zeeb #ifndef _HTT_H_ 9*da8fa4e3SBjoern A. Zeeb #define _HTT_H_ 10*da8fa4e3SBjoern A. Zeeb 11*da8fa4e3SBjoern A. Zeeb #include <linux/bug.h> 12*da8fa4e3SBjoern A. Zeeb #include <linux/interrupt.h> 13*da8fa4e3SBjoern A. Zeeb #include <linux/dmapool.h> 14*da8fa4e3SBjoern A. Zeeb #include <linux/hashtable.h> 15*da8fa4e3SBjoern A. Zeeb #include <linux/kfifo.h> 16*da8fa4e3SBjoern A. Zeeb #include <net/mac80211.h> 17*da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__) 18*da8fa4e3SBjoern A. Zeeb #include <linux/wait.h> 19*da8fa4e3SBjoern A. Zeeb #endif 20*da8fa4e3SBjoern A. Zeeb 21*da8fa4e3SBjoern A. Zeeb #include "htc.h" 22*da8fa4e3SBjoern A. Zeeb #include "hw.h" 23*da8fa4e3SBjoern A. Zeeb #include "rx_desc.h" 24*da8fa4e3SBjoern A. Zeeb 25*da8fa4e3SBjoern A. Zeeb enum htt_dbg_stats_type { 26*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, 27*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_RX_REORDER = 1 << 1, 28*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_RX_RATE_INFO = 1 << 2, 29*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3, 30*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_TX_RATE_INFO = 1 << 4, 31*da8fa4e3SBjoern A. Zeeb /* bits 5-23 currently reserved */ 32*da8fa4e3SBjoern A. Zeeb 33*da8fa4e3SBjoern A. Zeeb HTT_DBG_NUM_STATS /* keep this last */ 34*da8fa4e3SBjoern A. Zeeb }; 35*da8fa4e3SBjoern A. Zeeb 36*da8fa4e3SBjoern A. Zeeb enum htt_h2t_msg_type { /* host-to-target */ 37*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_VERSION_REQ = 0, 38*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_TX_FRM = 1, 39*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_RX_RING_CFG = 2, 40*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_STATS_REQ = 3, 41*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_SYNC = 4, 42*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_AGGR_CFG = 5, 43*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, 44*da8fa4e3SBjoern A. Zeeb 45*da8fa4e3SBjoern A. Zeeb /* This command is used for sending management frames in HTT < 3.0. 46*da8fa4e3SBjoern A. Zeeb * HTT >= 3.0 uses TX_FRM for everything. 47*da8fa4e3SBjoern A. Zeeb */ 48*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_MGMT_TX = 7, 49*da8fa4e3SBjoern A. Zeeb HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, 50*da8fa4e3SBjoern A. Zeeb 51*da8fa4e3SBjoern A. Zeeb HTT_H2T_NUM_MSGS /* keep this last */ 52*da8fa4e3SBjoern A. Zeeb }; 53*da8fa4e3SBjoern A. Zeeb 54*da8fa4e3SBjoern A. Zeeb struct htt_cmd_hdr { 55*da8fa4e3SBjoern A. Zeeb u8 msg_type; 56*da8fa4e3SBjoern A. Zeeb } __packed; 57*da8fa4e3SBjoern A. Zeeb 58*da8fa4e3SBjoern A. Zeeb struct htt_ver_req { 59*da8fa4e3SBjoern A. Zeeb u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; 60*da8fa4e3SBjoern A. Zeeb } __packed; 61*da8fa4e3SBjoern A. Zeeb 62*da8fa4e3SBjoern A. Zeeb /* 63*da8fa4e3SBjoern A. Zeeb * HTT tx MSDU descriptor 64*da8fa4e3SBjoern A. Zeeb * 65*da8fa4e3SBjoern A. Zeeb * The HTT tx MSDU descriptor is created by the host HTT SW for each 66*da8fa4e3SBjoern A. Zeeb * tx MSDU. The HTT tx MSDU descriptor contains the information that 67*da8fa4e3SBjoern A. Zeeb * the target firmware needs for the FW's tx processing, particularly 68*da8fa4e3SBjoern A. Zeeb * for creating the HW msdu descriptor. 69*da8fa4e3SBjoern A. Zeeb * The same HTT tx descriptor is used for HL and LL systems, though 70*da8fa4e3SBjoern A. Zeeb * a few fields within the tx descriptor are used only by LL or 71*da8fa4e3SBjoern A. Zeeb * only by HL. 72*da8fa4e3SBjoern A. Zeeb * The HTT tx descriptor is defined in two manners: by a struct with 73*da8fa4e3SBjoern A. Zeeb * bitfields, and by a series of [dword offset, bit mask, bit shift] 74*da8fa4e3SBjoern A. Zeeb * definitions. 75*da8fa4e3SBjoern A. Zeeb * The target should use the struct def, for simplicitly and clarity, 76*da8fa4e3SBjoern A. Zeeb * but the host shall use the bit-mast + bit-shift defs, to be endian- 77*da8fa4e3SBjoern A. Zeeb * neutral. Specifically, the host shall use the get/set macros built 78*da8fa4e3SBjoern A. Zeeb * around the mask + shift defs. 79*da8fa4e3SBjoern A. Zeeb */ 80*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_frag { 81*da8fa4e3SBjoern A. Zeeb union { 82*da8fa4e3SBjoern A. Zeeb struct double_word_addr { 83*da8fa4e3SBjoern A. Zeeb __le32 paddr; 84*da8fa4e3SBjoern A. Zeeb __le32 len; 85*da8fa4e3SBjoern A. Zeeb } __packed dword_addr; 86*da8fa4e3SBjoern A. Zeeb struct triple_word_addr { 87*da8fa4e3SBjoern A. Zeeb __le32 paddr_lo; 88*da8fa4e3SBjoern A. Zeeb __le16 paddr_hi; 89*da8fa4e3SBjoern A. Zeeb __le16 len_16; 90*da8fa4e3SBjoern A. Zeeb } __packed tword_addr; 91*da8fa4e3SBjoern A. Zeeb } __packed; 92*da8fa4e3SBjoern A. Zeeb } __packed; 93*da8fa4e3SBjoern A. Zeeb 94*da8fa4e3SBjoern A. Zeeb struct htt_msdu_ext_desc { 95*da8fa4e3SBjoern A. Zeeb __le32 tso_flag[3]; 96*da8fa4e3SBjoern A. Zeeb __le16 ip_identification; 97*da8fa4e3SBjoern A. Zeeb u8 flags; 98*da8fa4e3SBjoern A. Zeeb u8 reserved; 99*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_frag frags[6]; 100*da8fa4e3SBjoern A. Zeeb }; 101*da8fa4e3SBjoern A. Zeeb 102*da8fa4e3SBjoern A. Zeeb struct htt_msdu_ext_desc_64 { 103*da8fa4e3SBjoern A. Zeeb __le32 tso_flag[5]; 104*da8fa4e3SBjoern A. Zeeb __le16 ip_identification; 105*da8fa4e3SBjoern A. Zeeb u8 flags; 106*da8fa4e3SBjoern A. Zeeb u8 reserved; 107*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_frag frags[6]; 108*da8fa4e3SBjoern A. Zeeb }; 109*da8fa4e3SBjoern A. Zeeb 110*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0) 111*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1) 112*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2) 113*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3) 114*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4) 115*da8fa4e3SBjoern A. Zeeb 116*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \ 117*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \ 118*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \ 119*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ 120*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) 121*da8fa4e3SBjoern A. Zeeb 122*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16) 123*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17) 124*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18) 125*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19) 126*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20) 127*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21) 128*da8fa4e3SBjoern A. Zeeb 129*da8fa4e3SBjoern A. Zeeb #define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \ 130*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \ 131*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \ 132*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \ 133*da8fa4e3SBjoern A. Zeeb | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64) 134*da8fa4e3SBjoern A. Zeeb 135*da8fa4e3SBjoern A. Zeeb enum htt_data_tx_desc_flags0 { 136*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, 137*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, 138*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2, 139*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3, 140*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4 141*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0 142*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5 143*da8fa4e3SBjoern A. Zeeb }; 144*da8fa4e3SBjoern A. Zeeb 145*da8fa4e3SBjoern A. Zeeb enum htt_data_tx_desc_flags1 { 146*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6 147*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F 148*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0 149*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5 150*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0 151*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6 152*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11, 153*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12, 154*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13, 155*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14, 156*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15 157*da8fa4e3SBjoern A. Zeeb }; 158*da8fa4e3SBjoern A. Zeeb 159*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000 160*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CREDIT_DELTA_ABS_S 16 161*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CREDIT_DELTA_ABS_GET(word) \ 162*da8fa4e3SBjoern A. Zeeb (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S) 163*da8fa4e3SBjoern A. Zeeb 164*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100 165*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CREDIT_SIGN_BIT_S 8 166*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CREDIT_SIGN_BIT_GET(word) \ 167*da8fa4e3SBjoern A. Zeeb (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S) 168*da8fa4e3SBjoern A. Zeeb 169*da8fa4e3SBjoern A. Zeeb enum htt_data_tx_ext_tid { 170*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16, 171*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_EXT_TID_MGMT = 17, 172*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_EXT_TID_INVALID = 31 173*da8fa4e3SBjoern A. Zeeb }; 174*da8fa4e3SBjoern A. Zeeb 175*da8fa4e3SBjoern A. Zeeb #define HTT_INVALID_PEERID 0xFFFF 176*da8fa4e3SBjoern A. Zeeb 177*da8fa4e3SBjoern A. Zeeb /* 178*da8fa4e3SBjoern A. Zeeb * htt_data_tx_desc - used for data tx path 179*da8fa4e3SBjoern A. Zeeb * 180*da8fa4e3SBjoern A. Zeeb * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1. 181*da8fa4e3SBjoern A. Zeeb * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_ 182*da8fa4e3SBjoern A. Zeeb * for special kinds of tids 183*da8fa4e3SBjoern A. Zeeb * postponed: only for HL hosts. indicates if this is a resend 184*da8fa4e3SBjoern A. Zeeb * (HL hosts manage queues on the host ) 185*da8fa4e3SBjoern A. Zeeb * more_in_batch: only for HL hosts. indicates if more packets are 186*da8fa4e3SBjoern A. Zeeb * pending. this allows target to wait and aggregate 187*da8fa4e3SBjoern A. Zeeb * freq: 0 means home channel of given vdev. intended for offchannel 188*da8fa4e3SBjoern A. Zeeb */ 189*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc { 190*da8fa4e3SBjoern A. Zeeb u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ 191*da8fa4e3SBjoern A. Zeeb __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ 192*da8fa4e3SBjoern A. Zeeb __le16 len; 193*da8fa4e3SBjoern A. Zeeb __le16 id; 194*da8fa4e3SBjoern A. Zeeb __le32 frags_paddr; 195*da8fa4e3SBjoern A. Zeeb union { 196*da8fa4e3SBjoern A. Zeeb __le32 peerid; 197*da8fa4e3SBjoern A. Zeeb struct { 198*da8fa4e3SBjoern A. Zeeb __le16 peerid; 199*da8fa4e3SBjoern A. Zeeb __le16 freq; 200*da8fa4e3SBjoern A. Zeeb } __packed offchan_tx; 201*da8fa4e3SBjoern A. Zeeb } __packed; 202*da8fa4e3SBjoern A. Zeeb u8 prefetch[0]; /* start of frame, for FW classification engine */ 203*da8fa4e3SBjoern A. Zeeb } __packed; 204*da8fa4e3SBjoern A. Zeeb 205*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_64 { 206*da8fa4e3SBjoern A. Zeeb u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ 207*da8fa4e3SBjoern A. Zeeb __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ 208*da8fa4e3SBjoern A. Zeeb __le16 len; 209*da8fa4e3SBjoern A. Zeeb __le16 id; 210*da8fa4e3SBjoern A. Zeeb __le64 frags_paddr; 211*da8fa4e3SBjoern A. Zeeb union { 212*da8fa4e3SBjoern A. Zeeb __le32 peerid; 213*da8fa4e3SBjoern A. Zeeb struct { 214*da8fa4e3SBjoern A. Zeeb __le16 peerid; 215*da8fa4e3SBjoern A. Zeeb __le16 freq; 216*da8fa4e3SBjoern A. Zeeb } __packed offchan_tx; 217*da8fa4e3SBjoern A. Zeeb } __packed; 218*da8fa4e3SBjoern A. Zeeb u8 prefetch[0]; /* start of frame, for FW classification engine */ 219*da8fa4e3SBjoern A. Zeeb } __packed; 220*da8fa4e3SBjoern A. Zeeb 221*da8fa4e3SBjoern A. Zeeb enum htt_rx_ring_flags { 222*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0, 223*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1, 224*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_PPDU_START = 1 << 2, 225*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_PPDU_END = 1 << 3, 226*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MPDU_START = 1 << 4, 227*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MPDU_END = 1 << 5, 228*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MSDU_START = 1 << 6, 229*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MSDU_END = 1 << 7, 230*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8, 231*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9, 232*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10, 233*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11, 234*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12, 235*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13, 236*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_NULL_RX = 1 << 14, 237*da8fa4e3SBjoern A. Zeeb HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 238*da8fa4e3SBjoern A. Zeeb }; 239*da8fa4e3SBjoern A. Zeeb 240*da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_SIZE_MIN 128 241*da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_SIZE_MAX 2048 242*da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX 243*da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) 244*da8fa4e3SBjoern A. Zeeb #define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1) 245*da8fa4e3SBjoern A. Zeeb 246*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_rx_desc_offsets { 247*da8fa4e3SBjoern A. Zeeb /* the following offsets are in 4-byte units */ 248*da8fa4e3SBjoern A. Zeeb __le16 mac80211_hdr_offset; 249*da8fa4e3SBjoern A. Zeeb __le16 msdu_payload_offset; 250*da8fa4e3SBjoern A. Zeeb __le16 ppdu_start_offset; 251*da8fa4e3SBjoern A. Zeeb __le16 ppdu_end_offset; 252*da8fa4e3SBjoern A. Zeeb __le16 mpdu_start_offset; 253*da8fa4e3SBjoern A. Zeeb __le16 mpdu_end_offset; 254*da8fa4e3SBjoern A. Zeeb __le16 msdu_start_offset; 255*da8fa4e3SBjoern A. Zeeb __le16 msdu_end_offset; 256*da8fa4e3SBjoern A. Zeeb __le16 rx_attention_offset; 257*da8fa4e3SBjoern A. Zeeb __le16 frag_info_offset; 258*da8fa4e3SBjoern A. Zeeb } __packed; 259*da8fa4e3SBjoern A. Zeeb 260*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_ring32 { 261*da8fa4e3SBjoern A. Zeeb __le32 fw_idx_shadow_reg_paddr; 262*da8fa4e3SBjoern A. Zeeb __le32 rx_ring_base_paddr; 263*da8fa4e3SBjoern A. Zeeb __le16 rx_ring_len; /* in 4-byte words */ 264*da8fa4e3SBjoern A. Zeeb __le16 rx_ring_bufsize; /* rx skb size - in bytes */ 265*da8fa4e3SBjoern A. Zeeb __le16 flags; /* %HTT_RX_RING_FLAGS_ */ 266*da8fa4e3SBjoern A. Zeeb __le16 fw_idx_init_val; 267*da8fa4e3SBjoern A. Zeeb 268*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_rx_desc_offsets offsets; 269*da8fa4e3SBjoern A. Zeeb } __packed; 270*da8fa4e3SBjoern A. Zeeb 271*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_ring64 { 272*da8fa4e3SBjoern A. Zeeb __le64 fw_idx_shadow_reg_paddr; 273*da8fa4e3SBjoern A. Zeeb __le64 rx_ring_base_paddr; 274*da8fa4e3SBjoern A. Zeeb __le16 rx_ring_len; /* in 4-byte words */ 275*da8fa4e3SBjoern A. Zeeb __le16 rx_ring_bufsize; /* rx skb size - in bytes */ 276*da8fa4e3SBjoern A. Zeeb __le16 flags; /* %HTT_RX_RING_FLAGS_ */ 277*da8fa4e3SBjoern A. Zeeb __le16 fw_idx_init_val; 278*da8fa4e3SBjoern A. Zeeb 279*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_rx_desc_offsets offsets; 280*da8fa4e3SBjoern A. Zeeb } __packed; 281*da8fa4e3SBjoern A. Zeeb 282*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_hdr { 283*da8fa4e3SBjoern A. Zeeb u8 num_rings; /* supported values: 1, 2 */ 284*da8fa4e3SBjoern A. Zeeb __le16 rsvd0; 285*da8fa4e3SBjoern A. Zeeb } __packed; 286*da8fa4e3SBjoern A. Zeeb 287*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_32 { 288*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_hdr hdr; 289*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_ring32 rings[]; 290*da8fa4e3SBjoern A. Zeeb } __packed; 291*da8fa4e3SBjoern A. Zeeb 292*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_64 { 293*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_hdr hdr; 294*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_ring64 rings[]; 295*da8fa4e3SBjoern A. Zeeb } __packed; 296*da8fa4e3SBjoern A. Zeeb 297*da8fa4e3SBjoern A. Zeeb /* 298*da8fa4e3SBjoern A. Zeeb * htt_stats_req - request target to send specified statistics 299*da8fa4e3SBjoern A. Zeeb * 300*da8fa4e3SBjoern A. Zeeb * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ 301*da8fa4e3SBjoern A. Zeeb * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually 302*da8fa4e3SBjoern A. Zeeb * so make sure its little-endian. 303*da8fa4e3SBjoern A. Zeeb * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually 304*da8fa4e3SBjoern A. Zeeb * so make sure its little-endian. 305*da8fa4e3SBjoern A. Zeeb * @cfg_val: stat_type specific configuration 306*da8fa4e3SBjoern A. Zeeb * @stat_type: see %htt_dbg_stats_type 307*da8fa4e3SBjoern A. Zeeb * @cookie_lsb: used for confirmation message from target->host 308*da8fa4e3SBjoern A. Zeeb * @cookie_msb: ditto as %cookie 309*da8fa4e3SBjoern A. Zeeb */ 310*da8fa4e3SBjoern A. Zeeb struct htt_stats_req { 311*da8fa4e3SBjoern A. Zeeb u8 upload_types[3]; 312*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 313*da8fa4e3SBjoern A. Zeeb u8 reset_types[3]; 314*da8fa4e3SBjoern A. Zeeb struct { 315*da8fa4e3SBjoern A. Zeeb u8 mpdu_bytes; 316*da8fa4e3SBjoern A. Zeeb u8 mpdu_num_msdus; 317*da8fa4e3SBjoern A. Zeeb u8 msdu_bytes; 318*da8fa4e3SBjoern A. Zeeb } __packed; 319*da8fa4e3SBjoern A. Zeeb u8 stat_type; 320*da8fa4e3SBjoern A. Zeeb __le32 cookie_lsb; 321*da8fa4e3SBjoern A. Zeeb __le32 cookie_msb; 322*da8fa4e3SBjoern A. Zeeb } __packed; 323*da8fa4e3SBjoern A. Zeeb 324*da8fa4e3SBjoern A. Zeeb #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff 325*da8fa4e3SBjoern A. Zeeb #define HTT_STATS_BIT_MASK GENMASK(16, 0) 326*da8fa4e3SBjoern A. Zeeb 327*da8fa4e3SBjoern A. Zeeb /* 328*da8fa4e3SBjoern A. Zeeb * htt_oob_sync_req - request out-of-band sync 329*da8fa4e3SBjoern A. Zeeb * 330*da8fa4e3SBjoern A. Zeeb * The HTT SYNC tells the target to suspend processing of subsequent 331*da8fa4e3SBjoern A. Zeeb * HTT host-to-target messages until some other target agent locally 332*da8fa4e3SBjoern A. Zeeb * informs the target HTT FW that the current sync counter is equal to 333*da8fa4e3SBjoern A. Zeeb * or greater than (in a modulo sense) the sync counter specified in 334*da8fa4e3SBjoern A. Zeeb * the SYNC message. 335*da8fa4e3SBjoern A. Zeeb * 336*da8fa4e3SBjoern A. Zeeb * This allows other host-target components to synchronize their operation 337*da8fa4e3SBjoern A. Zeeb * with HTT, e.g. to ensure that tx frames don't get transmitted until a 338*da8fa4e3SBjoern A. Zeeb * security key has been downloaded to and activated by the target. 339*da8fa4e3SBjoern A. Zeeb * In the absence of any explicit synchronization counter value 340*da8fa4e3SBjoern A. Zeeb * specification, the target HTT FW will use zero as the default current 341*da8fa4e3SBjoern A. Zeeb * sync value. 342*da8fa4e3SBjoern A. Zeeb * 343*da8fa4e3SBjoern A. Zeeb * The HTT target FW will suspend its host->target message processing as long 344*da8fa4e3SBjoern A. Zeeb * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128. 345*da8fa4e3SBjoern A. Zeeb */ 346*da8fa4e3SBjoern A. Zeeb struct htt_oob_sync_req { 347*da8fa4e3SBjoern A. Zeeb u8 sync_count; 348*da8fa4e3SBjoern A. Zeeb __le16 rsvd0; 349*da8fa4e3SBjoern A. Zeeb } __packed; 350*da8fa4e3SBjoern A. Zeeb 351*da8fa4e3SBjoern A. Zeeb struct htt_aggr_conf { 352*da8fa4e3SBjoern A. Zeeb u8 max_num_ampdu_subframes; 353*da8fa4e3SBjoern A. Zeeb /* amsdu_subframes is limited by 0x1F mask */ 354*da8fa4e3SBjoern A. Zeeb u8 max_num_amsdu_subframes; 355*da8fa4e3SBjoern A. Zeeb } __packed; 356*da8fa4e3SBjoern A. Zeeb 357*da8fa4e3SBjoern A. Zeeb struct htt_aggr_conf_v2 { 358*da8fa4e3SBjoern A. Zeeb u8 max_num_ampdu_subframes; 359*da8fa4e3SBjoern A. Zeeb /* amsdu_subframes is limited by 0x1F mask */ 360*da8fa4e3SBjoern A. Zeeb u8 max_num_amsdu_subframes; 361*da8fa4e3SBjoern A. Zeeb u8 reserved; 362*da8fa4e3SBjoern A. Zeeb } __packed; 363*da8fa4e3SBjoern A. Zeeb 364*da8fa4e3SBjoern A. Zeeb #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 365*da8fa4e3SBjoern A. Zeeb struct htt_mgmt_tx_desc_qca99x0 { 366*da8fa4e3SBjoern A. Zeeb __le32 rate; 367*da8fa4e3SBjoern A. Zeeb } __packed; 368*da8fa4e3SBjoern A. Zeeb 369*da8fa4e3SBjoern A. Zeeb struct htt_mgmt_tx_desc { 370*da8fa4e3SBjoern A. Zeeb u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; 371*da8fa4e3SBjoern A. Zeeb __le32 msdu_paddr; 372*da8fa4e3SBjoern A. Zeeb __le32 desc_id; 373*da8fa4e3SBjoern A. Zeeb __le32 len; 374*da8fa4e3SBjoern A. Zeeb __le32 vdev_id; 375*da8fa4e3SBjoern A. Zeeb u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; 376*da8fa4e3SBjoern A. Zeeb union { 377*da8fa4e3SBjoern A. Zeeb struct htt_mgmt_tx_desc_qca99x0 qca99x0; 378*da8fa4e3SBjoern A. Zeeb } __packed; 379*da8fa4e3SBjoern A. Zeeb } __packed; 380*da8fa4e3SBjoern A. Zeeb 381*da8fa4e3SBjoern A. Zeeb enum htt_mgmt_tx_status { 382*da8fa4e3SBjoern A. Zeeb HTT_MGMT_TX_STATUS_OK = 0, 383*da8fa4e3SBjoern A. Zeeb HTT_MGMT_TX_STATUS_RETRY = 1, 384*da8fa4e3SBjoern A. Zeeb HTT_MGMT_TX_STATUS_DROP = 2 385*da8fa4e3SBjoern A. Zeeb }; 386*da8fa4e3SBjoern A. Zeeb 387*da8fa4e3SBjoern A. Zeeb /*=== target -> host messages ===============================================*/ 388*da8fa4e3SBjoern A. Zeeb 389*da8fa4e3SBjoern A. Zeeb enum htt_main_t2h_msg_type { 390*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0, 391*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1, 392*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2, 393*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3, 394*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4, 395*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5, 396*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6, 397*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, 398*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8, 399*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9, 400*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, 401*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb, 402*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, 403*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, 404*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, 405*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10, 406*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, 407*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_MSG_TYPE_TEST, 408*da8fa4e3SBjoern A. Zeeb /* keep this last */ 409*da8fa4e3SBjoern A. Zeeb HTT_MAIN_T2H_NUM_MSGS 410*da8fa4e3SBjoern A. Zeeb }; 411*da8fa4e3SBjoern A. Zeeb 412*da8fa4e3SBjoern A. Zeeb enum htt_10x_t2h_msg_type { 413*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0, 414*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1, 415*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2, 416*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3, 417*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4, 418*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5, 419*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6, 420*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, 421*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8, 422*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9, 423*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, 424*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb, 425*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, 426*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, 427*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_TEST = 0xe, 428*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, 429*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11, 430*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12, 431*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13, 432*da8fa4e3SBjoern A. Zeeb /* keep this last */ 433*da8fa4e3SBjoern A. Zeeb HTT_10X_T2H_NUM_MSGS 434*da8fa4e3SBjoern A. Zeeb }; 435*da8fa4e3SBjoern A. Zeeb 436*da8fa4e3SBjoern A. Zeeb enum htt_tlv_t2h_msg_type { 437*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0, 438*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1, 439*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2, 440*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3, 441*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4, 442*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5, 443*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6, 444*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, 445*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8, 446*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9, 447*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, 448*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb, 449*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */ 450*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, 451*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, 452*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, 453*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10, 454*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, 455*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12, 456*da8fa4e3SBjoern A. Zeeb /* 0x13 reservd */ 457*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14, 458*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15, 459*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16, 460*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_MSG_TYPE_TEST, 461*da8fa4e3SBjoern A. Zeeb /* keep this last */ 462*da8fa4e3SBjoern A. Zeeb HTT_TLV_T2H_NUM_MSGS 463*da8fa4e3SBjoern A. Zeeb }; 464*da8fa4e3SBjoern A. Zeeb 465*da8fa4e3SBjoern A. Zeeb enum htt_10_4_t2h_msg_type { 466*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0, 467*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1, 468*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2, 469*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3, 470*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4, 471*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5, 472*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6, 473*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, 474*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8, 475*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9, 476*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, 477*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb, 478*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, 479*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, 480*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, 481*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, 482*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10, 483*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11, 484*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12, 485*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TEST = 0x13, 486*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, 487*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, 488*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, 489*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17, 490*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, 491*da8fa4e3SBjoern A. Zeeb /* 0x19 to 0x2f are reserved */ 492*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30, 493*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31, 494*da8fa4e3SBjoern A. Zeeb /* keep this last */ 495*da8fa4e3SBjoern A. Zeeb HTT_10_4_T2H_NUM_MSGS 496*da8fa4e3SBjoern A. Zeeb }; 497*da8fa4e3SBjoern A. Zeeb 498*da8fa4e3SBjoern A. Zeeb enum htt_t2h_msg_type { 499*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_VERSION_CONF, 500*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_IND, 501*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_FLUSH, 502*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_PEER_MAP, 503*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_PEER_UNMAP, 504*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_ADDBA, 505*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_DELBA, 506*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TX_COMPL_IND, 507*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_PKTLOG, 508*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_STATS_CONF, 509*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_FRAG_IND, 510*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_SEC_IND, 511*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RC_UPDATE_IND, 512*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TX_INSPECT_IND, 513*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, 514*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, 515*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_PN_IND, 516*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, 517*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND, 518*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE, 519*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_CHAN_CHANGE, 520*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR, 521*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_AGGR_CONF, 522*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, 523*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TEST, 524*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_EN_STATS, 525*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TX_FETCH_IND, 526*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, 527*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, 528*da8fa4e3SBjoern A. Zeeb HTT_T2H_MSG_TYPE_PEER_STATS, 529*da8fa4e3SBjoern A. Zeeb /* keep this last */ 530*da8fa4e3SBjoern A. Zeeb HTT_T2H_NUM_MSGS 531*da8fa4e3SBjoern A. Zeeb }; 532*da8fa4e3SBjoern A. Zeeb 533*da8fa4e3SBjoern A. Zeeb /* 534*da8fa4e3SBjoern A. Zeeb * htt_resp_hdr - header for target-to-host messages 535*da8fa4e3SBjoern A. Zeeb * 536*da8fa4e3SBjoern A. Zeeb * msg_type: see htt_t2h_msg_type 537*da8fa4e3SBjoern A. Zeeb */ 538*da8fa4e3SBjoern A. Zeeb struct htt_resp_hdr { 539*da8fa4e3SBjoern A. Zeeb u8 msg_type; 540*da8fa4e3SBjoern A. Zeeb } __packed; 541*da8fa4e3SBjoern A. Zeeb 542*da8fa4e3SBjoern A. Zeeb #define HTT_RESP_HDR_MSG_TYPE_OFFSET 0 543*da8fa4e3SBjoern A. Zeeb #define HTT_RESP_HDR_MSG_TYPE_MASK 0xff 544*da8fa4e3SBjoern A. Zeeb #define HTT_RESP_HDR_MSG_TYPE_LSB 0 545*da8fa4e3SBjoern A. Zeeb 546*da8fa4e3SBjoern A. Zeeb /* htt_ver_resp - response sent for htt_ver_req */ 547*da8fa4e3SBjoern A. Zeeb struct htt_ver_resp { 548*da8fa4e3SBjoern A. Zeeb u8 minor; 549*da8fa4e3SBjoern A. Zeeb u8 major; 550*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 551*da8fa4e3SBjoern A. Zeeb } __packed; 552*da8fa4e3SBjoern A. Zeeb 553*da8fa4e3SBjoern A. Zeeb #define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0) 554*da8fa4e3SBjoern A. Zeeb 555*da8fa4e3SBjoern A. Zeeb #define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0) 556*da8fa4e3SBjoern A. Zeeb 557*da8fa4e3SBjoern A. Zeeb struct htt_mgmt_tx_completion { 558*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 559*da8fa4e3SBjoern A. Zeeb u8 rsvd1; 560*da8fa4e3SBjoern A. Zeeb u8 flags; 561*da8fa4e3SBjoern A. Zeeb __le32 desc_id; 562*da8fa4e3SBjoern A. Zeeb __le32 status; 563*da8fa4e3SBjoern A. Zeeb __le32 ppdu_id; 564*da8fa4e3SBjoern A. Zeeb __le32 info; 565*da8fa4e3SBjoern A. Zeeb } __packed; 566*da8fa4e3SBjoern A. Zeeb 567*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) 568*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) 569*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) 570*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) 571*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7) 572*da8fa4e3SBjoern A. Zeeb 573*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F 574*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 575*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0 576*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6 577*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000 578*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12 579*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000 580*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18 581*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 582*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 583*da8fa4e3SBjoern A. Zeeb 584*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0) 585*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1) 586*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2) 587*da8fa4e3SBjoern A. Zeeb #define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3) 588*da8fa4e3SBjoern A. Zeeb 589*da8fa4e3SBjoern A. Zeeb #define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3) 590*da8fa4e3SBjoern A. Zeeb #define HTT_TX_DATA_APPEND_RETRIES BIT(0) 591*da8fa4e3SBjoern A. Zeeb #define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1) 592*da8fa4e3SBjoern A. Zeeb 593*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hdr { 594*da8fa4e3SBjoern A. Zeeb u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ 595*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 596*da8fa4e3SBjoern A. Zeeb __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */ 597*da8fa4e3SBjoern A. Zeeb } __packed; 598*da8fa4e3SBjoern A. Zeeb 599*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0) 600*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E) 601*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1) 602*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5) 603*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6) 604*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7) 605*da8fa4e3SBjoern A. Zeeb 606*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF 607*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0 608*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000 609*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24 610*da8fa4e3SBjoern A. Zeeb 611*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF 612*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0 613*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000 614*da8fa4e3SBjoern A. Zeeb #define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24 615*da8fa4e3SBjoern A. Zeeb 616*da8fa4e3SBjoern A. Zeeb enum htt_rx_legacy_rate { 617*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_48 = 0, 618*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_24 = 1, 619*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_12, 620*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_6, 621*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_54, 622*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_36, 623*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_18, 624*da8fa4e3SBjoern A. Zeeb HTT_RX_OFDM_9, 625*da8fa4e3SBjoern A. Zeeb 626*da8fa4e3SBjoern A. Zeeb /* long preamble */ 627*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_11_LP = 0, 628*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_5_5_LP = 1, 629*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_2_LP, 630*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_1_LP, 631*da8fa4e3SBjoern A. Zeeb /* short preamble */ 632*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_11_SP, 633*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_5_5_SP, 634*da8fa4e3SBjoern A. Zeeb HTT_RX_CCK_2_SP 635*da8fa4e3SBjoern A. Zeeb }; 636*da8fa4e3SBjoern A. Zeeb 637*da8fa4e3SBjoern A. Zeeb enum htt_rx_legacy_rate_type { 638*da8fa4e3SBjoern A. Zeeb HTT_RX_LEGACY_RATE_OFDM = 0, 639*da8fa4e3SBjoern A. Zeeb HTT_RX_LEGACY_RATE_CCK 640*da8fa4e3SBjoern A. Zeeb }; 641*da8fa4e3SBjoern A. Zeeb 642*da8fa4e3SBjoern A. Zeeb enum htt_rx_preamble_type { 643*da8fa4e3SBjoern A. Zeeb HTT_RX_LEGACY = 0x4, 644*da8fa4e3SBjoern A. Zeeb HTT_RX_HT = 0x8, 645*da8fa4e3SBjoern A. Zeeb HTT_RX_HT_WITH_TXBF = 0x9, 646*da8fa4e3SBjoern A. Zeeb HTT_RX_VHT = 0xC, 647*da8fa4e3SBjoern A. Zeeb HTT_RX_VHT_WITH_TXBF = 0xD, 648*da8fa4e3SBjoern A. Zeeb }; 649*da8fa4e3SBjoern A. Zeeb 650*da8fa4e3SBjoern A. Zeeb /* 651*da8fa4e3SBjoern A. Zeeb * Fields: phy_err_valid, phy_err_code, tsf, 652*da8fa4e3SBjoern A. Zeeb * usec_timestamp, sub_usec_timestamp 653*da8fa4e3SBjoern A. Zeeb * ..are valid only if end_valid == 1. 654*da8fa4e3SBjoern A. Zeeb * 655*da8fa4e3SBjoern A. Zeeb * Fields: rssi_chains, legacy_rate_type, 656*da8fa4e3SBjoern A. Zeeb * legacy_rate_cck, preamble_type, service, 657*da8fa4e3SBjoern A. Zeeb * vht_sig_* 658*da8fa4e3SBjoern A. Zeeb * ..are valid only if start_valid == 1; 659*da8fa4e3SBjoern A. Zeeb */ 660*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_ppdu { 661*da8fa4e3SBjoern A. Zeeb u8 combined_rssi; 662*da8fa4e3SBjoern A. Zeeb u8 sub_usec_timestamp; 663*da8fa4e3SBjoern A. Zeeb u8 phy_err_code; 664*da8fa4e3SBjoern A. Zeeb u8 info0; /* HTT_RX_INDICATION_INFO0_ */ 665*da8fa4e3SBjoern A. Zeeb struct { 666*da8fa4e3SBjoern A. Zeeb u8 pri20_db; 667*da8fa4e3SBjoern A. Zeeb u8 ext20_db; 668*da8fa4e3SBjoern A. Zeeb u8 ext40_db; 669*da8fa4e3SBjoern A. Zeeb u8 ext80_db; 670*da8fa4e3SBjoern A. Zeeb } __packed rssi_chains[4]; 671*da8fa4e3SBjoern A. Zeeb __le32 tsf; 672*da8fa4e3SBjoern A. Zeeb __le32 usec_timestamp; 673*da8fa4e3SBjoern A. Zeeb __le32 info1; /* HTT_RX_INDICATION_INFO1_ */ 674*da8fa4e3SBjoern A. Zeeb __le32 info2; /* HTT_RX_INDICATION_INFO2_ */ 675*da8fa4e3SBjoern A. Zeeb } __packed; 676*da8fa4e3SBjoern A. Zeeb 677*da8fa4e3SBjoern A. Zeeb enum htt_rx_mpdu_status { 678*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0, 679*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_OK, 680*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_ERR_FCS, 681*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_ERR_DUP, 682*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_ERR_REPLAY, 683*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER, 684*da8fa4e3SBjoern A. Zeeb /* only accept EAPOL frames */ 685*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER, 686*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC, 687*da8fa4e3SBjoern A. Zeeb /* Non-data in promiscuous mode */ 688*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_MGMT_CTRL, 689*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR, 690*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR, 691*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR, 692*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR, 693*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR, 694*da8fa4e3SBjoern A. Zeeb 695*da8fa4e3SBjoern A. Zeeb /* 696*da8fa4e3SBjoern A. Zeeb * MISC: discard for unspecified reasons. 697*da8fa4e3SBjoern A. Zeeb * Leave this enum value last. 698*da8fa4e3SBjoern A. Zeeb */ 699*da8fa4e3SBjoern A. Zeeb HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF 700*da8fa4e3SBjoern A. Zeeb }; 701*da8fa4e3SBjoern A. Zeeb 702*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_mpdu_range { 703*da8fa4e3SBjoern A. Zeeb u8 mpdu_count; 704*da8fa4e3SBjoern A. Zeeb u8 mpdu_range_status; /* %htt_rx_mpdu_status */ 705*da8fa4e3SBjoern A. Zeeb u8 pad0; 706*da8fa4e3SBjoern A. Zeeb u8 pad1; 707*da8fa4e3SBjoern A. Zeeb } __packed; 708*da8fa4e3SBjoern A. Zeeb 709*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_prefix { 710*da8fa4e3SBjoern A. Zeeb __le16 fw_rx_desc_bytes; 711*da8fa4e3SBjoern A. Zeeb u8 pad0; 712*da8fa4e3SBjoern A. Zeeb u8 pad1; 713*da8fa4e3SBjoern A. Zeeb }; 714*da8fa4e3SBjoern A. Zeeb 715*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication { 716*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hdr hdr; 717*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_ppdu ppdu; 718*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_prefix prefix; 719*da8fa4e3SBjoern A. Zeeb 720*da8fa4e3SBjoern A. Zeeb /* 721*da8fa4e3SBjoern A. Zeeb * the following fields are both dynamically sized, so 722*da8fa4e3SBjoern A. Zeeb * take care addressing them 723*da8fa4e3SBjoern A. Zeeb */ 724*da8fa4e3SBjoern A. Zeeb 725*da8fa4e3SBjoern A. Zeeb /* the size of this is %fw_rx_desc_bytes */ 726*da8fa4e3SBjoern A. Zeeb struct fw_rx_desc_base fw_desc; 727*da8fa4e3SBjoern A. Zeeb 728*da8fa4e3SBjoern A. Zeeb /* 729*da8fa4e3SBjoern A. Zeeb * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4) 730*da8fa4e3SBjoern A. Zeeb * and has %num_mpdu_ranges elements. 731*da8fa4e3SBjoern A. Zeeb */ 732*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_mpdu_range mpdu_ranges[]; 733*da8fa4e3SBjoern A. Zeeb } __packed; 734*da8fa4e3SBjoern A. Zeeb 735*da8fa4e3SBjoern A. Zeeb /* High latency version of the RX indication */ 736*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hl { 737*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hdr hdr; 738*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_ppdu ppdu; 739*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_prefix prefix; 740*da8fa4e3SBjoern A. Zeeb struct fw_rx_desc_hl fw_desc; 741*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_mpdu_range mpdu_ranges[]; 742*da8fa4e3SBjoern A. Zeeb } __packed; 743*da8fa4e3SBjoern A. Zeeb 744*da8fa4e3SBjoern A. Zeeb struct htt_hl_rx_desc { 745*da8fa4e3SBjoern A. Zeeb __le32 info; 746*da8fa4e3SBjoern A. Zeeb __le32 pn_31_0; 747*da8fa4e3SBjoern A. Zeeb union { 748*da8fa4e3SBjoern A. Zeeb struct { 749*da8fa4e3SBjoern A. Zeeb __le16 pn_47_32; 750*da8fa4e3SBjoern A. Zeeb __le16 pn_63_48; 751*da8fa4e3SBjoern A. Zeeb } pn16; 752*da8fa4e3SBjoern A. Zeeb __le32 pn_63_32; 753*da8fa4e3SBjoern A. Zeeb } u0; 754*da8fa4e3SBjoern A. Zeeb __le32 pn_95_64; 755*da8fa4e3SBjoern A. Zeeb __le32 pn_127_96; 756*da8fa4e3SBjoern A. Zeeb } __packed; 757*da8fa4e3SBjoern A. Zeeb 758*da8fa4e3SBjoern A. Zeeb static inline struct htt_rx_indication_mpdu_range * 759*da8fa4e3SBjoern A. Zeeb htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind) 760*da8fa4e3SBjoern A. Zeeb { 761*da8fa4e3SBjoern A. Zeeb #if defined(__linux__) 762*da8fa4e3SBjoern A. Zeeb void *ptr = rx_ind; 763*da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__) 764*da8fa4e3SBjoern A. Zeeb u8 *ptr = (void *)rx_ind; 765*da8fa4e3SBjoern A. Zeeb #endif 766*da8fa4e3SBjoern A. Zeeb 767*da8fa4e3SBjoern A. Zeeb ptr += sizeof(rx_ind->hdr) 768*da8fa4e3SBjoern A. Zeeb + sizeof(rx_ind->ppdu) 769*da8fa4e3SBjoern A. Zeeb + sizeof(rx_ind->prefix) 770*da8fa4e3SBjoern A. Zeeb + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4); 771*da8fa4e3SBjoern A. Zeeb #if defined(__linux__) 772*da8fa4e3SBjoern A. Zeeb return ptr; 773*da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__) 774*da8fa4e3SBjoern A. Zeeb return ((void *)ptr); 775*da8fa4e3SBjoern A. Zeeb #endif 776*da8fa4e3SBjoern A. Zeeb } 777*da8fa4e3SBjoern A. Zeeb 778*da8fa4e3SBjoern A. Zeeb static inline struct htt_rx_indication_mpdu_range * 779*da8fa4e3SBjoern A. Zeeb htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind) 780*da8fa4e3SBjoern A. Zeeb { 781*da8fa4e3SBjoern A. Zeeb #if defined(__linux__) 782*da8fa4e3SBjoern A. Zeeb void *ptr = rx_ind; 783*da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__) 784*da8fa4e3SBjoern A. Zeeb u8 *ptr = (void *)rx_ind; 785*da8fa4e3SBjoern A. Zeeb #endif 786*da8fa4e3SBjoern A. Zeeb 787*da8fa4e3SBjoern A. Zeeb ptr += sizeof(rx_ind->hdr) 788*da8fa4e3SBjoern A. Zeeb + sizeof(rx_ind->ppdu) 789*da8fa4e3SBjoern A. Zeeb + sizeof(rx_ind->prefix) 790*da8fa4e3SBjoern A. Zeeb + sizeof(rx_ind->fw_desc); 791*da8fa4e3SBjoern A. Zeeb #if defined(__linux__) 792*da8fa4e3SBjoern A. Zeeb return ptr; 793*da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__) 794*da8fa4e3SBjoern A. Zeeb return ((void *)ptr); 795*da8fa4e3SBjoern A. Zeeb #endif 796*da8fa4e3SBjoern A. Zeeb } 797*da8fa4e3SBjoern A. Zeeb 798*da8fa4e3SBjoern A. Zeeb enum htt_rx_flush_mpdu_status { 799*da8fa4e3SBjoern A. Zeeb HTT_RX_FLUSH_MPDU_DISCARD = 0, 800*da8fa4e3SBjoern A. Zeeb HTT_RX_FLUSH_MPDU_REORDER = 1, 801*da8fa4e3SBjoern A. Zeeb }; 802*da8fa4e3SBjoern A. Zeeb 803*da8fa4e3SBjoern A. Zeeb /* 804*da8fa4e3SBjoern A. Zeeb * htt_rx_flush - discard or reorder given range of mpdus 805*da8fa4e3SBjoern A. Zeeb * 806*da8fa4e3SBjoern A. Zeeb * Note: host must check if all sequence numbers between 807*da8fa4e3SBjoern A. Zeeb * [seq_num_start, seq_num_end-1] are valid. 808*da8fa4e3SBjoern A. Zeeb */ 809*da8fa4e3SBjoern A. Zeeb struct htt_rx_flush { 810*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 811*da8fa4e3SBjoern A. Zeeb u8 tid; 812*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 813*da8fa4e3SBjoern A. Zeeb u8 mpdu_status; /* %htt_rx_flush_mpdu_status */ 814*da8fa4e3SBjoern A. Zeeb u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */ 815*da8fa4e3SBjoern A. Zeeb u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */ 816*da8fa4e3SBjoern A. Zeeb }; 817*da8fa4e3SBjoern A. Zeeb 818*da8fa4e3SBjoern A. Zeeb struct htt_rx_peer_map { 819*da8fa4e3SBjoern A. Zeeb u8 vdev_id; 820*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 821*da8fa4e3SBjoern A. Zeeb u8 addr[6]; 822*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 823*da8fa4e3SBjoern A. Zeeb u8 rsvd1; 824*da8fa4e3SBjoern A. Zeeb } __packed; 825*da8fa4e3SBjoern A. Zeeb 826*da8fa4e3SBjoern A. Zeeb struct htt_rx_peer_unmap { 827*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 828*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 829*da8fa4e3SBjoern A. Zeeb } __packed; 830*da8fa4e3SBjoern A. Zeeb 831*da8fa4e3SBjoern A. Zeeb enum htt_txrx_sec_cast_type { 832*da8fa4e3SBjoern A. Zeeb HTT_TXRX_SEC_MCAST = 0, 833*da8fa4e3SBjoern A. Zeeb HTT_TXRX_SEC_UCAST 834*da8fa4e3SBjoern A. Zeeb }; 835*da8fa4e3SBjoern A. Zeeb 836*da8fa4e3SBjoern A. Zeeb enum htt_rx_pn_check_type { 837*da8fa4e3SBjoern A. Zeeb HTT_RX_NON_PN_CHECK = 0, 838*da8fa4e3SBjoern A. Zeeb HTT_RX_PN_CHECK 839*da8fa4e3SBjoern A. Zeeb }; 840*da8fa4e3SBjoern A. Zeeb 841*da8fa4e3SBjoern A. Zeeb enum htt_rx_tkip_demic_type { 842*da8fa4e3SBjoern A. Zeeb HTT_RX_NON_TKIP_MIC = 0, 843*da8fa4e3SBjoern A. Zeeb HTT_RX_TKIP_MIC 844*da8fa4e3SBjoern A. Zeeb }; 845*da8fa4e3SBjoern A. Zeeb 846*da8fa4e3SBjoern A. Zeeb enum htt_security_types { 847*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_NONE, 848*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_WEP128, 849*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_WEP104, 850*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_WEP40, 851*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_TKIP, 852*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_TKIP_NOMIC, 853*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_AES_CCMP, 854*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_WAPI, 855*da8fa4e3SBjoern A. Zeeb 856*da8fa4e3SBjoern A. Zeeb HTT_NUM_SECURITY_TYPES /* keep this last! */ 857*da8fa4e3SBjoern A. Zeeb }; 858*da8fa4e3SBjoern A. Zeeb 859*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2 860*da8fa4e3SBjoern A. Zeeb #define ATH10K_TXRX_NUM_EXT_TIDS 19 861*da8fa4e3SBjoern A. Zeeb #define ATH10K_TXRX_NON_QOS_TID 16 862*da8fa4e3SBjoern A. Zeeb 863*da8fa4e3SBjoern A. Zeeb enum htt_security_flags { 864*da8fa4e3SBjoern A. Zeeb #define HTT_SECURITY_TYPE_MASK 0x7F 865*da8fa4e3SBjoern A. Zeeb #define HTT_SECURITY_TYPE_LSB 0 866*da8fa4e3SBjoern A. Zeeb HTT_SECURITY_IS_UNICAST = 1 << 7 867*da8fa4e3SBjoern A. Zeeb }; 868*da8fa4e3SBjoern A. Zeeb 869*da8fa4e3SBjoern A. Zeeb struct htt_security_indication { 870*da8fa4e3SBjoern A. Zeeb union { 871*da8fa4e3SBjoern A. Zeeb /* dont use bitfields; undefined behaviour */ 872*da8fa4e3SBjoern A. Zeeb u8 flags; /* %htt_security_flags */ 873*da8fa4e3SBjoern A. Zeeb struct { 874*da8fa4e3SBjoern A. Zeeb u8 security_type:7, /* %htt_security_types */ 875*da8fa4e3SBjoern A. Zeeb is_unicast:1; 876*da8fa4e3SBjoern A. Zeeb } __packed; 877*da8fa4e3SBjoern A. Zeeb } __packed; 878*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 879*da8fa4e3SBjoern A. Zeeb u8 michael_key[8]; 880*da8fa4e3SBjoern A. Zeeb u8 wapi_rsc[16]; 881*da8fa4e3SBjoern A. Zeeb } __packed; 882*da8fa4e3SBjoern A. Zeeb 883*da8fa4e3SBjoern A. Zeeb #define HTT_RX_BA_INFO0_TID_MASK 0x000F 884*da8fa4e3SBjoern A. Zeeb #define HTT_RX_BA_INFO0_TID_LSB 0 885*da8fa4e3SBjoern A. Zeeb #define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0 886*da8fa4e3SBjoern A. Zeeb #define HTT_RX_BA_INFO0_PEER_ID_LSB 4 887*da8fa4e3SBjoern A. Zeeb 888*da8fa4e3SBjoern A. Zeeb struct htt_rx_addba { 889*da8fa4e3SBjoern A. Zeeb u8 window_size; 890*da8fa4e3SBjoern A. Zeeb __le16 info0; /* %HTT_RX_BA_INFO0_ */ 891*da8fa4e3SBjoern A. Zeeb } __packed; 892*da8fa4e3SBjoern A. Zeeb 893*da8fa4e3SBjoern A. Zeeb struct htt_rx_delba { 894*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 895*da8fa4e3SBjoern A. Zeeb __le16 info0; /* %HTT_RX_BA_INFO0_ */ 896*da8fa4e3SBjoern A. Zeeb } __packed; 897*da8fa4e3SBjoern A. Zeeb 898*da8fa4e3SBjoern A. Zeeb enum htt_data_tx_status { 899*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_STATUS_OK = 0, 900*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_STATUS_DISCARD = 1, 901*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_STATUS_NO_ACK = 2, 902*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */ 903*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128 904*da8fa4e3SBjoern A. Zeeb }; 905*da8fa4e3SBjoern A. Zeeb 906*da8fa4e3SBjoern A. Zeeb enum htt_data_tx_flags { 907*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_STATUS_MASK 0x07 908*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_STATUS_LSB 0 909*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_TID_MASK 0x78 910*da8fa4e3SBjoern A. Zeeb #define HTT_DATA_TX_TID_LSB 3 911*da8fa4e3SBjoern A. Zeeb HTT_DATA_TX_TID_INVALID = 1 << 7 912*da8fa4e3SBjoern A. Zeeb }; 913*da8fa4e3SBjoern A. Zeeb 914*da8fa4e3SBjoern A. Zeeb #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF 915*da8fa4e3SBjoern A. Zeeb 916*da8fa4e3SBjoern A. Zeeb struct htt_append_retries { 917*da8fa4e3SBjoern A. Zeeb __le16 msdu_id; 918*da8fa4e3SBjoern A. Zeeb u8 tx_retries; 919*da8fa4e3SBjoern A. Zeeb u8 flag; 920*da8fa4e3SBjoern A. Zeeb } __packed; 921*da8fa4e3SBjoern A. Zeeb 922*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_completion_ext { 923*da8fa4e3SBjoern A. Zeeb struct htt_append_retries a_retries; 924*da8fa4e3SBjoern A. Zeeb __le32 t_stamp; 925*da8fa4e3SBjoern A. Zeeb __le16 msdus_rssi[]; 926*da8fa4e3SBjoern A. Zeeb } __packed; 927*da8fa4e3SBjoern A. Zeeb 928*da8fa4e3SBjoern A. Zeeb /** 929*da8fa4e3SBjoern A. Zeeb * @brief target -> host TX completion indication message definition 930*da8fa4e3SBjoern A. Zeeb * 931*da8fa4e3SBjoern A. Zeeb * @details 932*da8fa4e3SBjoern A. Zeeb * The following diagram shows the format of the TX completion indication sent 933*da8fa4e3SBjoern A. Zeeb * from the target to the host 934*da8fa4e3SBjoern A. Zeeb * 935*da8fa4e3SBjoern A. Zeeb * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0| 936*da8fa4e3SBjoern A. Zeeb * |-------------------------------------------------------------| 937*da8fa4e3SBjoern A. Zeeb * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type | 938*da8fa4e3SBjoern A. Zeeb * |-------------------------------------------------------------| 939*da8fa4e3SBjoern A. Zeeb * payload: | MSDU1 ID | MSDU0 ID | 940*da8fa4e3SBjoern A. Zeeb * |-------------------------------------------------------------| 941*da8fa4e3SBjoern A. Zeeb * : MSDU3 ID : MSDU2 ID : 942*da8fa4e3SBjoern A. Zeeb * |-------------------------------------------------------------| 943*da8fa4e3SBjoern A. Zeeb * | struct htt_tx_compl_ind_append_retries | 944*da8fa4e3SBjoern A. Zeeb * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| 945*da8fa4e3SBjoern A. Zeeb * | struct htt_tx_compl_ind_append_tx_tstamp | 946*da8fa4e3SBjoern A. Zeeb * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| 947*da8fa4e3SBjoern A. Zeeb * | MSDU1 ACK RSSI | MSDU0 ACK RSSI | 948*da8fa4e3SBjoern A. Zeeb * |-------------------------------------------------------------| 949*da8fa4e3SBjoern A. Zeeb * : MSDU3 ACK RSSI : MSDU2 ACK RSSI : 950*da8fa4e3SBjoern A. Zeeb * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| 951*da8fa4e3SBjoern A. Zeeb * -msg_type 952*da8fa4e3SBjoern A. Zeeb * Bits 7:0 953*da8fa4e3SBjoern A. Zeeb * Purpose: identifies this as HTT TX completion indication 954*da8fa4e3SBjoern A. Zeeb * -status 955*da8fa4e3SBjoern A. Zeeb * Bits 10:8 956*da8fa4e3SBjoern A. Zeeb * Purpose: the TX completion status of payload fragmentations descriptors 957*da8fa4e3SBjoern A. Zeeb * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD 958*da8fa4e3SBjoern A. Zeeb * -tid 959*da8fa4e3SBjoern A. Zeeb * Bits 14:11 960*da8fa4e3SBjoern A. Zeeb * Purpose: the tid associated with those fragmentation descriptors. It is 961*da8fa4e3SBjoern A. Zeeb * valid or not, depending on the tid_invalid bit. 962*da8fa4e3SBjoern A. Zeeb * Value: 0 to 15 963*da8fa4e3SBjoern A. Zeeb * -tid_invalid 964*da8fa4e3SBjoern A. Zeeb * Bits 15:15 965*da8fa4e3SBjoern A. Zeeb * Purpose: this bit indicates whether the tid field is valid or not 966*da8fa4e3SBjoern A. Zeeb * Value: 0 indicates valid, 1 indicates invalid 967*da8fa4e3SBjoern A. Zeeb * -num 968*da8fa4e3SBjoern A. Zeeb * Bits 23:16 969*da8fa4e3SBjoern A. Zeeb * Purpose: the number of payload in this indication 970*da8fa4e3SBjoern A. Zeeb * Value: 1 to 255 971*da8fa4e3SBjoern A. Zeeb * -A0 = append 972*da8fa4e3SBjoern A. Zeeb * Bits 24:24 973*da8fa4e3SBjoern A. Zeeb * Purpose: append the struct htt_tx_compl_ind_append_retries which contains 974*da8fa4e3SBjoern A. Zeeb * the number of tx retries for one MSDU at the end of this message 975*da8fa4e3SBjoern A. Zeeb * Value: 0 indicates no appending, 1 indicates appending 976*da8fa4e3SBjoern A. Zeeb * -A1 = append1 977*da8fa4e3SBjoern A. Zeeb * Bits 25:25 978*da8fa4e3SBjoern A. Zeeb * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which 979*da8fa4e3SBjoern A. Zeeb * contains the timestamp info for each TX msdu id in payload. 980*da8fa4e3SBjoern A. Zeeb * Value: 0 indicates no appending, 1 indicates appending 981*da8fa4e3SBjoern A. Zeeb * -TP = MSDU tx power presence 982*da8fa4e3SBjoern A. Zeeb * Bits 26:26 983*da8fa4e3SBjoern A. Zeeb * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report 984*da8fa4e3SBjoern A. Zeeb * for each MSDU referenced by the TX_COMPL_IND message. 985*da8fa4e3SBjoern A. Zeeb * The order of the per-MSDU tx power reports matches the order 986*da8fa4e3SBjoern A. Zeeb * of the MSDU IDs. 987*da8fa4e3SBjoern A. Zeeb * Value: 0 indicates not appending, 1 indicates appending 988*da8fa4e3SBjoern A. Zeeb * -A2 = append2 989*da8fa4e3SBjoern A. Zeeb * Bits 27:27 990*da8fa4e3SBjoern A. Zeeb * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in 991*da8fa4e3SBjoern A. Zeeb * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report 992*da8fa4e3SBjoern A. Zeeb * matches the order of the MSDU IDs. 993*da8fa4e3SBjoern A. Zeeb * The ACK RSSI values are valid when status is COMPLETE_OK (and 994*da8fa4e3SBjoern A. Zeeb * this append2 bit is set). 995*da8fa4e3SBjoern A. Zeeb * Value: 0 indicates not appending, 1 indicates appending 996*da8fa4e3SBjoern A. Zeeb */ 997*da8fa4e3SBjoern A. Zeeb 998*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_completion { 999*da8fa4e3SBjoern A. Zeeb union { 1000*da8fa4e3SBjoern A. Zeeb u8 flags; 1001*da8fa4e3SBjoern A. Zeeb struct { 1002*da8fa4e3SBjoern A. Zeeb u8 status:3, 1003*da8fa4e3SBjoern A. Zeeb tid:4, 1004*da8fa4e3SBjoern A. Zeeb tid_invalid:1; 1005*da8fa4e3SBjoern A. Zeeb } __packed; 1006*da8fa4e3SBjoern A. Zeeb } __packed; 1007*da8fa4e3SBjoern A. Zeeb u8 num_msdus; 1008*da8fa4e3SBjoern A. Zeeb u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */ 1009*da8fa4e3SBjoern A. Zeeb __le16 msdus[]; /* variable length based on %num_msdus */ 1010*da8fa4e3SBjoern A. Zeeb } __packed; 1011*da8fa4e3SBjoern A. Zeeb 1012*da8fa4e3SBjoern A. Zeeb #define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0) 1013*da8fa4e3SBjoern A. Zeeb #define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16) 1014*da8fa4e3SBjoern A. Zeeb 1015*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_ppdu_dur { 1016*da8fa4e3SBjoern A. Zeeb __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */ 1017*da8fa4e3SBjoern A. Zeeb __le32 tx_duration; /* in usecs */ 1018*da8fa4e3SBjoern A. Zeeb } __packed; 1019*da8fa4e3SBjoern A. Zeeb 1020*da8fa4e3SBjoern A. Zeeb #define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0) 1021*da8fa4e3SBjoern A. Zeeb 1022*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_compl_ppdu_dur { 1023*da8fa4e3SBjoern A. Zeeb __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */ 1024*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_ppdu_dur ppdu_dur[]; 1025*da8fa4e3SBjoern A. Zeeb } __packed; 1026*da8fa4e3SBjoern A. Zeeb 1027*da8fa4e3SBjoern A. Zeeb struct htt_tx_compl_ind_base { 1028*da8fa4e3SBjoern A. Zeeb u32 hdr; 1029*da8fa4e3SBjoern A. Zeeb u16 payload[1/*or more*/]; 1030*da8fa4e3SBjoern A. Zeeb } __packed; 1031*da8fa4e3SBjoern A. Zeeb 1032*da8fa4e3SBjoern A. Zeeb struct htt_rc_tx_done_params { 1033*da8fa4e3SBjoern A. Zeeb u32 rate_code; 1034*da8fa4e3SBjoern A. Zeeb u32 rate_code_flags; 1035*da8fa4e3SBjoern A. Zeeb u32 flags; 1036*da8fa4e3SBjoern A. Zeeb u32 num_enqued; /* 1 for non-AMPDU */ 1037*da8fa4e3SBjoern A. Zeeb u32 num_retries; 1038*da8fa4e3SBjoern A. Zeeb u32 num_failed; /* for AMPDU */ 1039*da8fa4e3SBjoern A. Zeeb u32 ack_rssi; 1040*da8fa4e3SBjoern A. Zeeb u32 time_stamp; 1041*da8fa4e3SBjoern A. Zeeb u32 is_probe; 1042*da8fa4e3SBjoern A. Zeeb }; 1043*da8fa4e3SBjoern A. Zeeb 1044*da8fa4e3SBjoern A. Zeeb struct htt_rc_update { 1045*da8fa4e3SBjoern A. Zeeb u8 vdev_id; 1046*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 1047*da8fa4e3SBjoern A. Zeeb u8 addr[6]; 1048*da8fa4e3SBjoern A. Zeeb u8 num_elems; 1049*da8fa4e3SBjoern A. Zeeb u8 rsvd0; 1050*da8fa4e3SBjoern A. Zeeb struct htt_rc_tx_done_params params[]; /* variable length %num_elems */ 1051*da8fa4e3SBjoern A. Zeeb } __packed; 1052*da8fa4e3SBjoern A. Zeeb 1053*da8fa4e3SBjoern A. Zeeb /* see htt_rx_indication for similar fields and descriptions */ 1054*da8fa4e3SBjoern A. Zeeb struct htt_rx_fragment_indication { 1055*da8fa4e3SBjoern A. Zeeb union { 1056*da8fa4e3SBjoern A. Zeeb u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */ 1057*da8fa4e3SBjoern A. Zeeb struct { 1058*da8fa4e3SBjoern A. Zeeb u8 ext_tid:5, 1059*da8fa4e3SBjoern A. Zeeb flush_valid:1; 1060*da8fa4e3SBjoern A. Zeeb } __packed; 1061*da8fa4e3SBjoern A. Zeeb } __packed; 1062*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 1063*da8fa4e3SBjoern A. Zeeb __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */ 1064*da8fa4e3SBjoern A. Zeeb __le16 fw_rx_desc_bytes; 1065*da8fa4e3SBjoern A. Zeeb __le16 rsvd0; 1066*da8fa4e3SBjoern A. Zeeb 1067*da8fa4e3SBjoern A. Zeeb u8 fw_msdu_rx_desc[]; 1068*da8fa4e3SBjoern A. Zeeb } __packed; 1069*da8fa4e3SBjoern A. Zeeb 1070*da8fa4e3SBjoern A. Zeeb #define ATH10K_IEEE80211_EXTIV BIT(5) 1071*da8fa4e3SBjoern A. Zeeb #define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */ 1072*da8fa4e3SBjoern A. Zeeb 1073*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16 1074*da8fa4e3SBjoern A. Zeeb 1075*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F 1076*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0 1077*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20 1078*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5 1079*da8fa4e3SBjoern A. Zeeb 1080*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F 1081*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0 1082*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 1083*da8fa4e3SBjoern A. Zeeb #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 1084*da8fa4e3SBjoern A. Zeeb 1085*da8fa4e3SBjoern A. Zeeb struct htt_rx_pn_ind { 1086*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 1087*da8fa4e3SBjoern A. Zeeb u8 tid; 1088*da8fa4e3SBjoern A. Zeeb u8 seqno_start; 1089*da8fa4e3SBjoern A. Zeeb u8 seqno_end; 1090*da8fa4e3SBjoern A. Zeeb u8 pn_ie_count; 1091*da8fa4e3SBjoern A. Zeeb u8 reserved; 1092*da8fa4e3SBjoern A. Zeeb u8 pn_ies[]; 1093*da8fa4e3SBjoern A. Zeeb } __packed; 1094*da8fa4e3SBjoern A. Zeeb 1095*da8fa4e3SBjoern A. Zeeb struct htt_rx_offload_msdu { 1096*da8fa4e3SBjoern A. Zeeb __le16 msdu_len; 1097*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 1098*da8fa4e3SBjoern A. Zeeb u8 vdev_id; 1099*da8fa4e3SBjoern A. Zeeb u8 tid; 1100*da8fa4e3SBjoern A. Zeeb u8 fw_desc; 1101*da8fa4e3SBjoern A. Zeeb u8 payload[]; 1102*da8fa4e3SBjoern A. Zeeb } __packed; 1103*da8fa4e3SBjoern A. Zeeb 1104*da8fa4e3SBjoern A. Zeeb struct htt_rx_offload_ind { 1105*da8fa4e3SBjoern A. Zeeb u8 reserved; 1106*da8fa4e3SBjoern A. Zeeb __le16 msdu_count; 1107*da8fa4e3SBjoern A. Zeeb } __packed; 1108*da8fa4e3SBjoern A. Zeeb 1109*da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc { 1110*da8fa4e3SBjoern A. Zeeb __le32 msdu_paddr; 1111*da8fa4e3SBjoern A. Zeeb __le16 msdu_len; 1112*da8fa4e3SBjoern A. Zeeb u8 fw_desc; 1113*da8fa4e3SBjoern A. Zeeb u8 reserved; 1114*da8fa4e3SBjoern A. Zeeb } __packed; 1115*da8fa4e3SBjoern A. Zeeb 1116*da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc_ext { 1117*da8fa4e3SBjoern A. Zeeb __le64 msdu_paddr; 1118*da8fa4e3SBjoern A. Zeeb __le16 msdu_len; 1119*da8fa4e3SBjoern A. Zeeb u8 fw_desc; 1120*da8fa4e3SBjoern A. Zeeb u8 reserved; 1121*da8fa4e3SBjoern A. Zeeb } __packed; 1122*da8fa4e3SBjoern A. Zeeb 1123*da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_ind { 1124*da8fa4e3SBjoern A. Zeeb u8 info; 1125*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 1126*da8fa4e3SBjoern A. Zeeb u8 vdev_id; 1127*da8fa4e3SBjoern A. Zeeb u8 reserved; 1128*da8fa4e3SBjoern A. Zeeb __le16 msdu_count; 1129*da8fa4e3SBjoern A. Zeeb union { 1130*da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc msdu_descs32[0]; 1131*da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0]; 1132*da8fa4e3SBjoern A. Zeeb } __packed; 1133*da8fa4e3SBjoern A. Zeeb } __packed; 1134*da8fa4e3SBjoern A. Zeeb 1135*da8fa4e3SBjoern A. Zeeb #define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f 1136*da8fa4e3SBjoern A. Zeeb #define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0 1137*da8fa4e3SBjoern A. Zeeb #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020 1138*da8fa4e3SBjoern A. Zeeb #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5 1139*da8fa4e3SBjoern A. Zeeb #define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040 1140*da8fa4e3SBjoern A. Zeeb #define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6 1141*da8fa4e3SBjoern A. Zeeb 1142*da8fa4e3SBjoern A. Zeeb /* 1143*da8fa4e3SBjoern A. Zeeb * target -> host test message definition 1144*da8fa4e3SBjoern A. Zeeb * 1145*da8fa4e3SBjoern A. Zeeb * The following field definitions describe the format of the test 1146*da8fa4e3SBjoern A. Zeeb * message sent from the target to the host. 1147*da8fa4e3SBjoern A. Zeeb * The message consists of a 4-octet header, followed by a variable 1148*da8fa4e3SBjoern A. Zeeb * number of 32-bit integer values, followed by a variable number 1149*da8fa4e3SBjoern A. Zeeb * of 8-bit character values. 1150*da8fa4e3SBjoern A. Zeeb * 1151*da8fa4e3SBjoern A. Zeeb * |31 16|15 8|7 0| 1152*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1153*da8fa4e3SBjoern A. Zeeb * | num chars | num ints | msg type | 1154*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1155*da8fa4e3SBjoern A. Zeeb * | int 0 | 1156*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1157*da8fa4e3SBjoern A. Zeeb * | int 1 | 1158*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1159*da8fa4e3SBjoern A. Zeeb * | ... | 1160*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1161*da8fa4e3SBjoern A. Zeeb * | char 3 | char 2 | char 1 | char 0 | 1162*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1163*da8fa4e3SBjoern A. Zeeb * | | | ... | char 4 | 1164*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1165*da8fa4e3SBjoern A. Zeeb * - MSG_TYPE 1166*da8fa4e3SBjoern A. Zeeb * Bits 7:0 1167*da8fa4e3SBjoern A. Zeeb * Purpose: identifies this as a test message 1168*da8fa4e3SBjoern A. Zeeb * Value: HTT_MSG_TYPE_TEST 1169*da8fa4e3SBjoern A. Zeeb * - NUM_INTS 1170*da8fa4e3SBjoern A. Zeeb * Bits 15:8 1171*da8fa4e3SBjoern A. Zeeb * Purpose: indicate how many 32-bit integers follow the message header 1172*da8fa4e3SBjoern A. Zeeb * - NUM_CHARS 1173*da8fa4e3SBjoern A. Zeeb * Bits 31:16 1174*da8fa4e3SBjoern A. Zeeb * Purpose: indicate how many 8-bit characters follow the series of integers 1175*da8fa4e3SBjoern A. Zeeb */ 1176*da8fa4e3SBjoern A. Zeeb struct htt_rx_test { 1177*da8fa4e3SBjoern A. Zeeb u8 num_ints; 1178*da8fa4e3SBjoern A. Zeeb __le16 num_chars; 1179*da8fa4e3SBjoern A. Zeeb 1180*da8fa4e3SBjoern A. Zeeb /* payload consists of 2 lists: 1181*da8fa4e3SBjoern A. Zeeb * a) num_ints * sizeof(__le32) 1182*da8fa4e3SBjoern A. Zeeb * b) num_chars * sizeof(u8) aligned to 4bytes 1183*da8fa4e3SBjoern A. Zeeb */ 1184*da8fa4e3SBjoern A. Zeeb u8 payload[]; 1185*da8fa4e3SBjoern A. Zeeb } __packed; 1186*da8fa4e3SBjoern A. Zeeb 1187*da8fa4e3SBjoern A. Zeeb static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test) 1188*da8fa4e3SBjoern A. Zeeb { 1189*da8fa4e3SBjoern A. Zeeb return (__le32 *)rx_test->payload; 1190*da8fa4e3SBjoern A. Zeeb } 1191*da8fa4e3SBjoern A. Zeeb 1192*da8fa4e3SBjoern A. Zeeb static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) 1193*da8fa4e3SBjoern A. Zeeb { 1194*da8fa4e3SBjoern A. Zeeb return rx_test->payload + (rx_test->num_ints * sizeof(__le32)); 1195*da8fa4e3SBjoern A. Zeeb } 1196*da8fa4e3SBjoern A. Zeeb 1197*da8fa4e3SBjoern A. Zeeb /* 1198*da8fa4e3SBjoern A. Zeeb * target -> host packet log message 1199*da8fa4e3SBjoern A. Zeeb * 1200*da8fa4e3SBjoern A. Zeeb * The following field definitions describe the format of the packet log 1201*da8fa4e3SBjoern A. Zeeb * message sent from the target to the host. 1202*da8fa4e3SBjoern A. Zeeb * The message consists of a 4-octet header,followed by a variable number 1203*da8fa4e3SBjoern A. Zeeb * of 32-bit character values. 1204*da8fa4e3SBjoern A. Zeeb * 1205*da8fa4e3SBjoern A. Zeeb * |31 24|23 16|15 8|7 0| 1206*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1207*da8fa4e3SBjoern A. Zeeb * | | | | msg type | 1208*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1209*da8fa4e3SBjoern A. Zeeb * | payload | 1210*da8fa4e3SBjoern A. Zeeb * |-----------------------------------------------------------| 1211*da8fa4e3SBjoern A. Zeeb * - MSG_TYPE 1212*da8fa4e3SBjoern A. Zeeb * Bits 7:0 1213*da8fa4e3SBjoern A. Zeeb * Purpose: identifies this as a test message 1214*da8fa4e3SBjoern A. Zeeb * Value: HTT_MSG_TYPE_PACKETLOG 1215*da8fa4e3SBjoern A. Zeeb */ 1216*da8fa4e3SBjoern A. Zeeb struct htt_pktlog_msg { 1217*da8fa4e3SBjoern A. Zeeb u8 pad[3]; 1218*da8fa4e3SBjoern A. Zeeb u8 payload[]; 1219*da8fa4e3SBjoern A. Zeeb } __packed; 1220*da8fa4e3SBjoern A. Zeeb 1221*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_rx_reorder_stats { 1222*da8fa4e3SBjoern A. Zeeb /* Non QoS MPDUs received */ 1223*da8fa4e3SBjoern A. Zeeb __le32 deliver_non_qos; 1224*da8fa4e3SBjoern A. Zeeb 1225*da8fa4e3SBjoern A. Zeeb /* MPDUs received in-order */ 1226*da8fa4e3SBjoern A. Zeeb __le32 deliver_in_order; 1227*da8fa4e3SBjoern A. Zeeb 1228*da8fa4e3SBjoern A. Zeeb /* Flush due to reorder timer expired */ 1229*da8fa4e3SBjoern A. Zeeb __le32 deliver_flush_timeout; 1230*da8fa4e3SBjoern A. Zeeb 1231*da8fa4e3SBjoern A. Zeeb /* Flush due to move out of window */ 1232*da8fa4e3SBjoern A. Zeeb __le32 deliver_flush_oow; 1233*da8fa4e3SBjoern A. Zeeb 1234*da8fa4e3SBjoern A. Zeeb /* Flush due to DELBA */ 1235*da8fa4e3SBjoern A. Zeeb __le32 deliver_flush_delba; 1236*da8fa4e3SBjoern A. Zeeb 1237*da8fa4e3SBjoern A. Zeeb /* MPDUs dropped due to FCS error */ 1238*da8fa4e3SBjoern A. Zeeb __le32 fcs_error; 1239*da8fa4e3SBjoern A. Zeeb 1240*da8fa4e3SBjoern A. Zeeb /* MPDUs dropped due to monitor mode non-data packet */ 1241*da8fa4e3SBjoern A. Zeeb __le32 mgmt_ctrl; 1242*da8fa4e3SBjoern A. Zeeb 1243*da8fa4e3SBjoern A. Zeeb /* MPDUs dropped due to invalid peer */ 1244*da8fa4e3SBjoern A. Zeeb __le32 invalid_peer; 1245*da8fa4e3SBjoern A. Zeeb 1246*da8fa4e3SBjoern A. Zeeb /* MPDUs dropped due to duplication (non aggregation) */ 1247*da8fa4e3SBjoern A. Zeeb __le32 dup_non_aggr; 1248*da8fa4e3SBjoern A. Zeeb 1249*da8fa4e3SBjoern A. Zeeb /* MPDUs dropped due to processed before */ 1250*da8fa4e3SBjoern A. Zeeb __le32 dup_past; 1251*da8fa4e3SBjoern A. Zeeb 1252*da8fa4e3SBjoern A. Zeeb /* MPDUs dropped due to duplicate in reorder queue */ 1253*da8fa4e3SBjoern A. Zeeb __le32 dup_in_reorder; 1254*da8fa4e3SBjoern A. Zeeb 1255*da8fa4e3SBjoern A. Zeeb /* Reorder timeout happened */ 1256*da8fa4e3SBjoern A. Zeeb __le32 reorder_timeout; 1257*da8fa4e3SBjoern A. Zeeb 1258*da8fa4e3SBjoern A. Zeeb /* invalid bar ssn */ 1259*da8fa4e3SBjoern A. Zeeb __le32 invalid_bar_ssn; 1260*da8fa4e3SBjoern A. Zeeb 1261*da8fa4e3SBjoern A. Zeeb /* reorder reset due to bar ssn */ 1262*da8fa4e3SBjoern A. Zeeb __le32 ssn_reset; 1263*da8fa4e3SBjoern A. Zeeb }; 1264*da8fa4e3SBjoern A. Zeeb 1265*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_tx_stats { 1266*da8fa4e3SBjoern A. Zeeb /* Num HTT cookies queued to dispatch list */ 1267*da8fa4e3SBjoern A. Zeeb __le32 comp_queued; 1268*da8fa4e3SBjoern A. Zeeb 1269*da8fa4e3SBjoern A. Zeeb /* Num HTT cookies dispatched */ 1270*da8fa4e3SBjoern A. Zeeb __le32 comp_delivered; 1271*da8fa4e3SBjoern A. Zeeb 1272*da8fa4e3SBjoern A. Zeeb /* Num MSDU queued to WAL */ 1273*da8fa4e3SBjoern A. Zeeb __le32 msdu_enqued; 1274*da8fa4e3SBjoern A. Zeeb 1275*da8fa4e3SBjoern A. Zeeb /* Num MPDU queue to WAL */ 1276*da8fa4e3SBjoern A. Zeeb __le32 mpdu_enqued; 1277*da8fa4e3SBjoern A. Zeeb 1278*da8fa4e3SBjoern A. Zeeb /* Num MSDUs dropped by WMM limit */ 1279*da8fa4e3SBjoern A. Zeeb __le32 wmm_drop; 1280*da8fa4e3SBjoern A. Zeeb 1281*da8fa4e3SBjoern A. Zeeb /* Num Local frames queued */ 1282*da8fa4e3SBjoern A. Zeeb __le32 local_enqued; 1283*da8fa4e3SBjoern A. Zeeb 1284*da8fa4e3SBjoern A. Zeeb /* Num Local frames done */ 1285*da8fa4e3SBjoern A. Zeeb __le32 local_freed; 1286*da8fa4e3SBjoern A. Zeeb 1287*da8fa4e3SBjoern A. Zeeb /* Num queued to HW */ 1288*da8fa4e3SBjoern A. Zeeb __le32 hw_queued; 1289*da8fa4e3SBjoern A. Zeeb 1290*da8fa4e3SBjoern A. Zeeb /* Num PPDU reaped from HW */ 1291*da8fa4e3SBjoern A. Zeeb __le32 hw_reaped; 1292*da8fa4e3SBjoern A. Zeeb 1293*da8fa4e3SBjoern A. Zeeb /* Num underruns */ 1294*da8fa4e3SBjoern A. Zeeb __le32 underrun; 1295*da8fa4e3SBjoern A. Zeeb 1296*da8fa4e3SBjoern A. Zeeb /* Num PPDUs cleaned up in TX abort */ 1297*da8fa4e3SBjoern A. Zeeb __le32 tx_abort; 1298*da8fa4e3SBjoern A. Zeeb 1299*da8fa4e3SBjoern A. Zeeb /* Num MPDUs requeued by SW */ 1300*da8fa4e3SBjoern A. Zeeb __le32 mpdus_requeued; 1301*da8fa4e3SBjoern A. Zeeb 1302*da8fa4e3SBjoern A. Zeeb /* excessive retries */ 1303*da8fa4e3SBjoern A. Zeeb __le32 tx_ko; 1304*da8fa4e3SBjoern A. Zeeb 1305*da8fa4e3SBjoern A. Zeeb /* data hw rate code */ 1306*da8fa4e3SBjoern A. Zeeb __le32 data_rc; 1307*da8fa4e3SBjoern A. Zeeb 1308*da8fa4e3SBjoern A. Zeeb /* Scheduler self triggers */ 1309*da8fa4e3SBjoern A. Zeeb __le32 self_triggers; 1310*da8fa4e3SBjoern A. Zeeb 1311*da8fa4e3SBjoern A. Zeeb /* frames dropped due to excessive sw retries */ 1312*da8fa4e3SBjoern A. Zeeb __le32 sw_retry_failure; 1313*da8fa4e3SBjoern A. Zeeb 1314*da8fa4e3SBjoern A. Zeeb /* illegal rate phy errors */ 1315*da8fa4e3SBjoern A. Zeeb __le32 illgl_rate_phy_err; 1316*da8fa4e3SBjoern A. Zeeb 1317*da8fa4e3SBjoern A. Zeeb /* wal pdev continuous xretry */ 1318*da8fa4e3SBjoern A. Zeeb __le32 pdev_cont_xretry; 1319*da8fa4e3SBjoern A. Zeeb 1320*da8fa4e3SBjoern A. Zeeb /* wal pdev continuous xretry */ 1321*da8fa4e3SBjoern A. Zeeb __le32 pdev_tx_timeout; 1322*da8fa4e3SBjoern A. Zeeb 1323*da8fa4e3SBjoern A. Zeeb /* wal pdev resets */ 1324*da8fa4e3SBjoern A. Zeeb __le32 pdev_resets; 1325*da8fa4e3SBjoern A. Zeeb 1326*da8fa4e3SBjoern A. Zeeb __le32 phy_underrun; 1327*da8fa4e3SBjoern A. Zeeb 1328*da8fa4e3SBjoern A. Zeeb /* MPDU is more than txop limit */ 1329*da8fa4e3SBjoern A. Zeeb __le32 txop_ovf; 1330*da8fa4e3SBjoern A. Zeeb } __packed; 1331*da8fa4e3SBjoern A. Zeeb 1332*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_rx_stats { 1333*da8fa4e3SBjoern A. Zeeb /* Cnts any change in ring routing mid-ppdu */ 1334*da8fa4e3SBjoern A. Zeeb __le32 mid_ppdu_route_change; 1335*da8fa4e3SBjoern A. Zeeb 1336*da8fa4e3SBjoern A. Zeeb /* Total number of statuses processed */ 1337*da8fa4e3SBjoern A. Zeeb __le32 status_rcvd; 1338*da8fa4e3SBjoern A. Zeeb 1339*da8fa4e3SBjoern A. Zeeb /* Extra frags on rings 0-3 */ 1340*da8fa4e3SBjoern A. Zeeb __le32 r0_frags; 1341*da8fa4e3SBjoern A. Zeeb __le32 r1_frags; 1342*da8fa4e3SBjoern A. Zeeb __le32 r2_frags; 1343*da8fa4e3SBjoern A. Zeeb __le32 r3_frags; 1344*da8fa4e3SBjoern A. Zeeb 1345*da8fa4e3SBjoern A. Zeeb /* MSDUs / MPDUs delivered to HTT */ 1346*da8fa4e3SBjoern A. Zeeb __le32 htt_msdus; 1347*da8fa4e3SBjoern A. Zeeb __le32 htt_mpdus; 1348*da8fa4e3SBjoern A. Zeeb 1349*da8fa4e3SBjoern A. Zeeb /* MSDUs / MPDUs delivered to local stack */ 1350*da8fa4e3SBjoern A. Zeeb __le32 loc_msdus; 1351*da8fa4e3SBjoern A. Zeeb __le32 loc_mpdus; 1352*da8fa4e3SBjoern A. Zeeb 1353*da8fa4e3SBjoern A. Zeeb /* AMSDUs that have more MSDUs than the status ring size */ 1354*da8fa4e3SBjoern A. Zeeb __le32 oversize_amsdu; 1355*da8fa4e3SBjoern A. Zeeb 1356*da8fa4e3SBjoern A. Zeeb /* Number of PHY errors */ 1357*da8fa4e3SBjoern A. Zeeb __le32 phy_errs; 1358*da8fa4e3SBjoern A. Zeeb 1359*da8fa4e3SBjoern A. Zeeb /* Number of PHY errors drops */ 1360*da8fa4e3SBjoern A. Zeeb __le32 phy_err_drop; 1361*da8fa4e3SBjoern A. Zeeb 1362*da8fa4e3SBjoern A. Zeeb /* Number of mpdu errors - FCS, MIC, ENC etc. */ 1363*da8fa4e3SBjoern A. Zeeb __le32 mpdu_errs; 1364*da8fa4e3SBjoern A. Zeeb } __packed; 1365*da8fa4e3SBjoern A. Zeeb 1366*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_peer_stats { 1367*da8fa4e3SBjoern A. Zeeb __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ 1368*da8fa4e3SBjoern A. Zeeb } __packed; 1369*da8fa4e3SBjoern A. Zeeb 1370*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_pdev_txrx { 1371*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_tx_stats tx_stats; 1372*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_rx_stats rx_stats; 1373*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_wal_peer_stats peer_stats; 1374*da8fa4e3SBjoern A. Zeeb } __packed; 1375*da8fa4e3SBjoern A. Zeeb 1376*da8fa4e3SBjoern A. Zeeb struct htt_dbg_stats_rx_rate_info { 1377*da8fa4e3SBjoern A. Zeeb __le32 mcs[10]; 1378*da8fa4e3SBjoern A. Zeeb __le32 sgi[10]; 1379*da8fa4e3SBjoern A. Zeeb __le32 nss[4]; 1380*da8fa4e3SBjoern A. Zeeb __le32 stbc[10]; 1381*da8fa4e3SBjoern A. Zeeb __le32 bw[3]; 1382*da8fa4e3SBjoern A. Zeeb __le32 pream[6]; 1383*da8fa4e3SBjoern A. Zeeb __le32 ldpc; 1384*da8fa4e3SBjoern A. Zeeb __le32 txbf; 1385*da8fa4e3SBjoern A. Zeeb }; 1386*da8fa4e3SBjoern A. Zeeb 1387*da8fa4e3SBjoern A. Zeeb /* 1388*da8fa4e3SBjoern A. Zeeb * htt_dbg_stats_status - 1389*da8fa4e3SBjoern A. Zeeb * present - The requested stats have been delivered in full. 1390*da8fa4e3SBjoern A. Zeeb * This indicates that either the stats information was contained 1391*da8fa4e3SBjoern A. Zeeb * in its entirety within this message, or else this message 1392*da8fa4e3SBjoern A. Zeeb * completes the delivery of the requested stats info that was 1393*da8fa4e3SBjoern A. Zeeb * partially delivered through earlier STATS_CONF messages. 1394*da8fa4e3SBjoern A. Zeeb * partial - The requested stats have been delivered in part. 1395*da8fa4e3SBjoern A. Zeeb * One or more subsequent STATS_CONF messages with the same 1396*da8fa4e3SBjoern A. Zeeb * cookie value will be sent to deliver the remainder of the 1397*da8fa4e3SBjoern A. Zeeb * information. 1398*da8fa4e3SBjoern A. Zeeb * error - The requested stats could not be delivered, for example due 1399*da8fa4e3SBjoern A. Zeeb * to a shortage of memory to construct a message holding the 1400*da8fa4e3SBjoern A. Zeeb * requested stats. 1401*da8fa4e3SBjoern A. Zeeb * invalid - The requested stat type is either not recognized, or the 1402*da8fa4e3SBjoern A. Zeeb * target is configured to not gather the stats type in question. 1403*da8fa4e3SBjoern A. Zeeb * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1404*da8fa4e3SBjoern A. Zeeb * series_done - This special value indicates that no further stats info 1405*da8fa4e3SBjoern A. Zeeb * elements are present within a series of stats info elems 1406*da8fa4e3SBjoern A. Zeeb * (within a stats upload confirmation message). 1407*da8fa4e3SBjoern A. Zeeb */ 1408*da8fa4e3SBjoern A. Zeeb enum htt_dbg_stats_status { 1409*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_STATUS_PRESENT = 0, 1410*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_STATUS_PARTIAL = 1, 1411*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_STATUS_ERROR = 2, 1412*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_STATUS_INVALID = 3, 1413*da8fa4e3SBjoern A. Zeeb HTT_DBG_STATS_STATUS_SERIES_DONE = 7 1414*da8fa4e3SBjoern A. Zeeb }; 1415*da8fa4e3SBjoern A. Zeeb 1416*da8fa4e3SBjoern A. Zeeb /* 1417*da8fa4e3SBjoern A. Zeeb * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank 1418*da8fa4e3SBjoern A. Zeeb * 1419*da8fa4e3SBjoern A. Zeeb * The following field definitions describe the format of the HTT host 1420*da8fa4e3SBjoern A. Zeeb * to target frag_desc/msdu_ext bank configuration message. 1421*da8fa4e3SBjoern A. Zeeb * The message contains the based address and the min and max id of the 1422*da8fa4e3SBjoern A. Zeeb * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and 1423*da8fa4e3SBjoern A. Zeeb * MSDU_EXT/FRAG_DESC. 1424*da8fa4e3SBjoern A. Zeeb * HTT will use id in HTT descriptor instead sending the frag_desc_ptr. 1425*da8fa4e3SBjoern A. Zeeb * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0 1426*da8fa4e3SBjoern A. Zeeb * the hardware does the mapping/translation. 1427*da8fa4e3SBjoern A. Zeeb * 1428*da8fa4e3SBjoern A. Zeeb * Total banks that can be configured is configured to 16. 1429*da8fa4e3SBjoern A. Zeeb * 1430*da8fa4e3SBjoern A. Zeeb * This should be called before any TX has be initiated by the HTT 1431*da8fa4e3SBjoern A. Zeeb * 1432*da8fa4e3SBjoern A. Zeeb * |31 16|15 8|7 5|4 0| 1433*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1434*da8fa4e3SBjoern A. Zeeb * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type | 1435*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1436*da8fa4e3SBjoern A. Zeeb * | BANK0_BASE_ADDRESS | 1437*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1438*da8fa4e3SBjoern A. Zeeb * | ... | 1439*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1440*da8fa4e3SBjoern A. Zeeb * | BANK15_BASE_ADDRESS | 1441*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1442*da8fa4e3SBjoern A. Zeeb * | BANK0_MAX_ID | BANK0_MIN_ID | 1443*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1444*da8fa4e3SBjoern A. Zeeb * | ... | 1445*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1446*da8fa4e3SBjoern A. Zeeb * | BANK15_MAX_ID | BANK15_MIN_ID | 1447*da8fa4e3SBjoern A. Zeeb * |------------------------------------------------------------| 1448*da8fa4e3SBjoern A. Zeeb * Header fields: 1449*da8fa4e3SBjoern A. Zeeb * - MSG_TYPE 1450*da8fa4e3SBjoern A. Zeeb * Bits 7:0 1451*da8fa4e3SBjoern A. Zeeb * Value: 0x6 1452*da8fa4e3SBjoern A. Zeeb * - BANKx_BASE_ADDRESS 1453*da8fa4e3SBjoern A. Zeeb * Bits 31:0 1454*da8fa4e3SBjoern A. Zeeb * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT 1455*da8fa4e3SBjoern A. Zeeb * bank physical/bus address. 1456*da8fa4e3SBjoern A. Zeeb * - BANKx_MIN_ID 1457*da8fa4e3SBjoern A. Zeeb * Bits 15:0 1458*da8fa4e3SBjoern A. Zeeb * Purpose: Provide a mechanism to specify the min index that needs to 1459*da8fa4e3SBjoern A. Zeeb * mapped. 1460*da8fa4e3SBjoern A. Zeeb * - BANKx_MAX_ID 1461*da8fa4e3SBjoern A. Zeeb * Bits 31:16 1462*da8fa4e3SBjoern A. Zeeb * Purpose: Provide a mechanism to specify the max index that needs to 1463*da8fa4e3SBjoern A. Zeeb * 1464*da8fa4e3SBjoern A. Zeeb */ 1465*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_id { 1466*da8fa4e3SBjoern A. Zeeb __le16 bank_min_id; 1467*da8fa4e3SBjoern A. Zeeb __le16 bank_max_id; 1468*da8fa4e3SBjoern A. Zeeb } __packed; 1469*da8fa4e3SBjoern A. Zeeb 1470*da8fa4e3SBjoern A. Zeeb /* real is 16 but it wouldn't fit in the max htt message size 1471*da8fa4e3SBjoern A. Zeeb * so we use a conservatively safe value for now 1472*da8fa4e3SBjoern A. Zeeb */ 1473*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_MAX 4 1474*da8fa4e3SBjoern A. Zeeb 1475*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 1476*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 1477*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2) 1478*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3) 1479*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4) 1480*da8fa4e3SBjoern A. Zeeb #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4 1481*da8fa4e3SBjoern A. Zeeb 1482*da8fa4e3SBjoern A. Zeeb enum htt_q_depth_type { 1483*da8fa4e3SBjoern A. Zeeb HTT_Q_DEPTH_TYPE_BYTES = 0, 1484*da8fa4e3SBjoern A. Zeeb HTT_Q_DEPTH_TYPE_MSDUS = 1, 1485*da8fa4e3SBjoern A. Zeeb }; 1486*da8fa4e3SBjoern A. Zeeb 1487*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \ 1488*da8fa4e3SBjoern A. Zeeb TARGET_10_4_NUM_VDEVS) 1489*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_NUM_TIDS 8 1490*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_SIZE 1 1491*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 1492*da8fa4e3SBjoern A. Zeeb 1493*da8fa4e3SBjoern A. Zeeb /** 1494*da8fa4e3SBjoern A. Zeeb * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config 1495*da8fa4e3SBjoern A. Zeeb * 1496*da8fa4e3SBjoern A. Zeeb * Defines host q state format and behavior. See htt_q_state. 1497*da8fa4e3SBjoern A. Zeeb * 1498*da8fa4e3SBjoern A. Zeeb * @record_size: Defines the size of each host q entry in bytes. In practice 1499*da8fa4e3SBjoern A. Zeeb * however firmware (at least 10.4.3-00191) ignores this host 1500*da8fa4e3SBjoern A. Zeeb * configuration value and uses hardcoded value of 1. 1501*da8fa4e3SBjoern A. Zeeb * @record_multiplier: This is valid only when q depth type is MSDUs. It 1502*da8fa4e3SBjoern A. Zeeb * defines the exponent for the power of 2 multiplication. 1503*da8fa4e3SBjoern A. Zeeb */ 1504*da8fa4e3SBjoern A. Zeeb struct htt_q_state_conf { 1505*da8fa4e3SBjoern A. Zeeb __le32 paddr; 1506*da8fa4e3SBjoern A. Zeeb __le16 num_peers; 1507*da8fa4e3SBjoern A. Zeeb __le16 num_tids; 1508*da8fa4e3SBjoern A. Zeeb u8 record_size; 1509*da8fa4e3SBjoern A. Zeeb u8 record_multiplier; 1510*da8fa4e3SBjoern A. Zeeb u8 pad[2]; 1511*da8fa4e3SBjoern A. Zeeb } __packed; 1512*da8fa4e3SBjoern A. Zeeb 1513*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_cfg32 { 1514*da8fa4e3SBjoern A. Zeeb u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ 1515*da8fa4e3SBjoern A. Zeeb u8 num_banks; 1516*da8fa4e3SBjoern A. Zeeb u8 desc_size; 1517*da8fa4e3SBjoern A. Zeeb __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; 1518*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; 1519*da8fa4e3SBjoern A. Zeeb struct htt_q_state_conf q_state; 1520*da8fa4e3SBjoern A. Zeeb } __packed; 1521*da8fa4e3SBjoern A. Zeeb 1522*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_cfg64 { 1523*da8fa4e3SBjoern A. Zeeb u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ 1524*da8fa4e3SBjoern A. Zeeb u8 num_banks; 1525*da8fa4e3SBjoern A. Zeeb u8 desc_size; 1526*da8fa4e3SBjoern A. Zeeb __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; 1527*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; 1528*da8fa4e3SBjoern A. Zeeb struct htt_q_state_conf q_state; 1529*da8fa4e3SBjoern A. Zeeb } __packed; 1530*da8fa4e3SBjoern A. Zeeb 1531*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 1532*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f 1533*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 1534*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0 1535*da8fa4e3SBjoern A. Zeeb #define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 1536*da8fa4e3SBjoern A. Zeeb 1537*da8fa4e3SBjoern A. Zeeb /** 1538*da8fa4e3SBjoern A. Zeeb * htt_q_state - shared between host and firmware via DMA 1539*da8fa4e3SBjoern A. Zeeb * 1540*da8fa4e3SBjoern A. Zeeb * This structure is used for the host to expose it's software queue state to 1541*da8fa4e3SBjoern A. Zeeb * firmware so that its rate control can schedule fetch requests for optimized 1542*da8fa4e3SBjoern A. Zeeb * performance. This is most notably used for MU-MIMO aggregation when multiple 1543*da8fa4e3SBjoern A. Zeeb * MU clients are connected. 1544*da8fa4e3SBjoern A. Zeeb * 1545*da8fa4e3SBjoern A. Zeeb * @count: Each element defines the host queue depth. When q depth type was 1546*da8fa4e3SBjoern A. Zeeb * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as: 1547*da8fa4e3SBjoern A. Zeeb * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and 1548*da8fa4e3SBjoern A. Zeeb * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as 1549*da8fa4e3SBjoern A. Zeeb * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 ** 1550*da8fa4e3SBjoern A. Zeeb * record_multiplier (see htt_q_state_conf). 1551*da8fa4e3SBjoern A. Zeeb * @map: Used by firmware to quickly check which host queues are not empty. It 1552*da8fa4e3SBjoern A. Zeeb * is a bitmap simply saying. 1553*da8fa4e3SBjoern A. Zeeb * @seq: Used by firmware to quickly check if the host queues were updated 1554*da8fa4e3SBjoern A. Zeeb * since it last checked. 1555*da8fa4e3SBjoern A. Zeeb * 1556*da8fa4e3SBjoern A. Zeeb * FIXME: Is the q_state map[] size calculation really correct? 1557*da8fa4e3SBjoern A. Zeeb */ 1558*da8fa4e3SBjoern A. Zeeb struct htt_q_state { 1559*da8fa4e3SBjoern A. Zeeb u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS]; 1560*da8fa4e3SBjoern A. Zeeb u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; 1561*da8fa4e3SBjoern A. Zeeb __le32 seq; 1562*da8fa4e3SBjoern A. Zeeb } __packed; 1563*da8fa4e3SBjoern A. Zeeb 1564*da8fa4e3SBjoern A. Zeeb #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff 1565*da8fa4e3SBjoern A. Zeeb #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0 1566*da8fa4e3SBjoern A. Zeeb #define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000 1567*da8fa4e3SBjoern A. Zeeb #define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12 1568*da8fa4e3SBjoern A. Zeeb 1569*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_record { 1570*da8fa4e3SBjoern A. Zeeb __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */ 1571*da8fa4e3SBjoern A. Zeeb __le16 num_msdus; 1572*da8fa4e3SBjoern A. Zeeb __le32 num_bytes; 1573*da8fa4e3SBjoern A. Zeeb } __packed; 1574*da8fa4e3SBjoern A. Zeeb 1575*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_ind { 1576*da8fa4e3SBjoern A. Zeeb u8 pad0; 1577*da8fa4e3SBjoern A. Zeeb __le16 fetch_seq_num; 1578*da8fa4e3SBjoern A. Zeeb __le32 token; 1579*da8fa4e3SBjoern A. Zeeb __le16 num_resp_ids; 1580*da8fa4e3SBjoern A. Zeeb __le16 num_records; 1581*da8fa4e3SBjoern A. Zeeb union { 1582*da8fa4e3SBjoern A. Zeeb /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ 1583*da8fa4e3SBjoern A. Zeeb DECLARE_FLEX_ARRAY(__le32, resp_ids); 1584*da8fa4e3SBjoern A. Zeeb DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records); 1585*da8fa4e3SBjoern A. Zeeb }; 1586*da8fa4e3SBjoern A. Zeeb } __packed; 1587*da8fa4e3SBjoern A. Zeeb 1588*da8fa4e3SBjoern A. Zeeb static inline void * 1589*da8fa4e3SBjoern A. Zeeb ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind) 1590*da8fa4e3SBjoern A. Zeeb { 1591*da8fa4e3SBjoern A. Zeeb return (void *)&ind->records[le16_to_cpu(ind->num_records)]; 1592*da8fa4e3SBjoern A. Zeeb } 1593*da8fa4e3SBjoern A. Zeeb 1594*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_resp { 1595*da8fa4e3SBjoern A. Zeeb u8 pad0; 1596*da8fa4e3SBjoern A. Zeeb __le16 resp_id; 1597*da8fa4e3SBjoern A. Zeeb __le16 fetch_seq_num; 1598*da8fa4e3SBjoern A. Zeeb __le16 num_records; 1599*da8fa4e3SBjoern A. Zeeb __le32 token; 1600*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_record records[]; 1601*da8fa4e3SBjoern A. Zeeb } __packed; 1602*da8fa4e3SBjoern A. Zeeb 1603*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_confirm { 1604*da8fa4e3SBjoern A. Zeeb u8 pad0; 1605*da8fa4e3SBjoern A. Zeeb __le16 num_resp_ids; 1606*da8fa4e3SBjoern A. Zeeb __le32 resp_ids[]; 1607*da8fa4e3SBjoern A. Zeeb } __packed; 1608*da8fa4e3SBjoern A. Zeeb 1609*da8fa4e3SBjoern A. Zeeb enum htt_tx_mode_switch_mode { 1610*da8fa4e3SBjoern A. Zeeb HTT_TX_MODE_SWITCH_PUSH = 0, 1611*da8fa4e3SBjoern A. Zeeb HTT_TX_MODE_SWITCH_PUSH_PULL = 1, 1612*da8fa4e3SBjoern A. Zeeb }; 1613*da8fa4e3SBjoern A. Zeeb 1614*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0) 1615*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe 1616*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1 1617*da8fa4e3SBjoern A. Zeeb 1618*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003 1619*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0 1620*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc 1621*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2 1622*da8fa4e3SBjoern A. Zeeb 1623*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff 1624*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0 1625*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000 1626*da8fa4e3SBjoern A. Zeeb #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12 1627*da8fa4e3SBjoern A. Zeeb 1628*da8fa4e3SBjoern A. Zeeb struct htt_tx_mode_switch_record { 1629*da8fa4e3SBjoern A. Zeeb __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */ 1630*da8fa4e3SBjoern A. Zeeb __le16 num_max_msdus; 1631*da8fa4e3SBjoern A. Zeeb } __packed; 1632*da8fa4e3SBjoern A. Zeeb 1633*da8fa4e3SBjoern A. Zeeb struct htt_tx_mode_switch_ind { 1634*da8fa4e3SBjoern A. Zeeb u8 pad0; 1635*da8fa4e3SBjoern A. Zeeb __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ 1636*da8fa4e3SBjoern A. Zeeb __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ 1637*da8fa4e3SBjoern A. Zeeb u8 pad1[2]; 1638*da8fa4e3SBjoern A. Zeeb struct htt_tx_mode_switch_record records[]; 1639*da8fa4e3SBjoern A. Zeeb } __packed; 1640*da8fa4e3SBjoern A. Zeeb 1641*da8fa4e3SBjoern A. Zeeb struct htt_channel_change { 1642*da8fa4e3SBjoern A. Zeeb u8 pad[3]; 1643*da8fa4e3SBjoern A. Zeeb __le32 freq; 1644*da8fa4e3SBjoern A. Zeeb __le32 center_freq1; 1645*da8fa4e3SBjoern A. Zeeb __le32 center_freq2; 1646*da8fa4e3SBjoern A. Zeeb __le32 phymode; 1647*da8fa4e3SBjoern A. Zeeb } __packed; 1648*da8fa4e3SBjoern A. Zeeb 1649*da8fa4e3SBjoern A. Zeeb struct htt_per_peer_tx_stats_ind { 1650*da8fa4e3SBjoern A. Zeeb __le32 succ_bytes; 1651*da8fa4e3SBjoern A. Zeeb __le32 retry_bytes; 1652*da8fa4e3SBjoern A. Zeeb __le32 failed_bytes; 1653*da8fa4e3SBjoern A. Zeeb u8 ratecode; 1654*da8fa4e3SBjoern A. Zeeb u8 flags; 1655*da8fa4e3SBjoern A. Zeeb __le16 peer_id; 1656*da8fa4e3SBjoern A. Zeeb __le16 succ_pkts; 1657*da8fa4e3SBjoern A. Zeeb __le16 retry_pkts; 1658*da8fa4e3SBjoern A. Zeeb __le16 failed_pkts; 1659*da8fa4e3SBjoern A. Zeeb __le16 tx_duration; 1660*da8fa4e3SBjoern A. Zeeb __le32 reserved1; 1661*da8fa4e3SBjoern A. Zeeb __le32 reserved2; 1662*da8fa4e3SBjoern A. Zeeb } __packed; 1663*da8fa4e3SBjoern A. Zeeb 1664*da8fa4e3SBjoern A. Zeeb struct htt_peer_tx_stats { 1665*da8fa4e3SBjoern A. Zeeb u8 num_ppdu; 1666*da8fa4e3SBjoern A. Zeeb u8 ppdu_len; 1667*da8fa4e3SBjoern A. Zeeb u8 version; 1668*da8fa4e3SBjoern A. Zeeb u8 payload[]; 1669*da8fa4e3SBjoern A. Zeeb } __packed; 1670*da8fa4e3SBjoern A. Zeeb 1671*da8fa4e3SBjoern A. Zeeb #define ATH10K_10_2_TX_STATS_OFFSET 136 1672*da8fa4e3SBjoern A. Zeeb #define PEER_STATS_FOR_NO_OF_PPDUS 4 1673*da8fa4e3SBjoern A. Zeeb 1674*da8fa4e3SBjoern A. Zeeb struct ath10k_10_2_peer_tx_stats { 1675*da8fa4e3SBjoern A. Zeeb u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS]; 1676*da8fa4e3SBjoern A. Zeeb u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; 1677*da8fa4e3SBjoern A. Zeeb __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; 1678*da8fa4e3SBjoern A. Zeeb u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; 1679*da8fa4e3SBjoern A. Zeeb __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; 1680*da8fa4e3SBjoern A. Zeeb u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; 1681*da8fa4e3SBjoern A. Zeeb __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; 1682*da8fa4e3SBjoern A. Zeeb u8 flags[PEER_STATS_FOR_NO_OF_PPDUS]; 1683*da8fa4e3SBjoern A. Zeeb __le32 tx_duration; 1684*da8fa4e3SBjoern A. Zeeb u8 tx_ppdu_cnt; 1685*da8fa4e3SBjoern A. Zeeb u8 peer_id; 1686*da8fa4e3SBjoern A. Zeeb } __packed; 1687*da8fa4e3SBjoern A. Zeeb 1688*da8fa4e3SBjoern A. Zeeb union htt_rx_pn_t { 1689*da8fa4e3SBjoern A. Zeeb /* WEP: 24-bit PN */ 1690*da8fa4e3SBjoern A. Zeeb u32 pn24; 1691*da8fa4e3SBjoern A. Zeeb 1692*da8fa4e3SBjoern A. Zeeb /* TKIP or CCMP: 48-bit PN */ 1693*da8fa4e3SBjoern A. Zeeb u64 pn48; 1694*da8fa4e3SBjoern A. Zeeb 1695*da8fa4e3SBjoern A. Zeeb /* WAPI: 128-bit PN */ 1696*da8fa4e3SBjoern A. Zeeb u64 pn128[2]; 1697*da8fa4e3SBjoern A. Zeeb }; 1698*da8fa4e3SBjoern A. Zeeb 1699*da8fa4e3SBjoern A. Zeeb struct htt_cmd { 1700*da8fa4e3SBjoern A. Zeeb struct htt_cmd_hdr hdr; 1701*da8fa4e3SBjoern A. Zeeb union { 1702*da8fa4e3SBjoern A. Zeeb struct htt_ver_req ver_req; 1703*da8fa4e3SBjoern A. Zeeb struct htt_mgmt_tx_desc mgmt_tx; 1704*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc data_tx; 1705*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_32 rx_setup_32; 1706*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_setup_64 rx_setup_64; 1707*da8fa4e3SBjoern A. Zeeb struct htt_stats_req stats_req; 1708*da8fa4e3SBjoern A. Zeeb struct htt_oob_sync_req oob_sync_req; 1709*da8fa4e3SBjoern A. Zeeb struct htt_aggr_conf aggr_conf; 1710*da8fa4e3SBjoern A. Zeeb struct htt_aggr_conf_v2 aggr_conf_v2; 1711*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32; 1712*da8fa4e3SBjoern A. Zeeb struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64; 1713*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_resp tx_fetch_resp; 1714*da8fa4e3SBjoern A. Zeeb }; 1715*da8fa4e3SBjoern A. Zeeb } __packed; 1716*da8fa4e3SBjoern A. Zeeb 1717*da8fa4e3SBjoern A. Zeeb struct htt_resp { 1718*da8fa4e3SBjoern A. Zeeb struct htt_resp_hdr hdr; 1719*da8fa4e3SBjoern A. Zeeb union { 1720*da8fa4e3SBjoern A. Zeeb struct htt_ver_resp ver_resp; 1721*da8fa4e3SBjoern A. Zeeb struct htt_mgmt_tx_completion mgmt_tx_completion; 1722*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_completion data_tx_completion; 1723*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication rx_ind; 1724*da8fa4e3SBjoern A. Zeeb struct htt_rx_indication_hl rx_ind_hl; 1725*da8fa4e3SBjoern A. Zeeb struct htt_rx_fragment_indication rx_frag_ind; 1726*da8fa4e3SBjoern A. Zeeb struct htt_rx_peer_map peer_map; 1727*da8fa4e3SBjoern A. Zeeb struct htt_rx_peer_unmap peer_unmap; 1728*da8fa4e3SBjoern A. Zeeb struct htt_rx_flush rx_flush; 1729*da8fa4e3SBjoern A. Zeeb struct htt_rx_addba rx_addba; 1730*da8fa4e3SBjoern A. Zeeb struct htt_rx_delba rx_delba; 1731*da8fa4e3SBjoern A. Zeeb struct htt_security_indication security_indication; 1732*da8fa4e3SBjoern A. Zeeb struct htt_rc_update rc_update; 1733*da8fa4e3SBjoern A. Zeeb struct htt_rx_test rx_test; 1734*da8fa4e3SBjoern A. Zeeb struct htt_pktlog_msg pktlog_msg; 1735*da8fa4e3SBjoern A. Zeeb struct htt_rx_pn_ind rx_pn_ind; 1736*da8fa4e3SBjoern A. Zeeb struct htt_rx_offload_ind rx_offload_ind; 1737*da8fa4e3SBjoern A. Zeeb struct htt_rx_in_ord_ind rx_in_ord_ind; 1738*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_ind tx_fetch_ind; 1739*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_confirm tx_fetch_confirm; 1740*da8fa4e3SBjoern A. Zeeb struct htt_tx_mode_switch_ind tx_mode_switch_ind; 1741*da8fa4e3SBjoern A. Zeeb struct htt_channel_change chan_change; 1742*da8fa4e3SBjoern A. Zeeb struct htt_peer_tx_stats peer_tx_stats; 1743*da8fa4e3SBjoern A. Zeeb }; 1744*da8fa4e3SBjoern A. Zeeb } __packed; 1745*da8fa4e3SBjoern A. Zeeb 1746*da8fa4e3SBjoern A. Zeeb /*** host side structures follow ***/ 1747*da8fa4e3SBjoern A. Zeeb 1748*da8fa4e3SBjoern A. Zeeb struct htt_tx_done { 1749*da8fa4e3SBjoern A. Zeeb u16 msdu_id; 1750*da8fa4e3SBjoern A. Zeeb u16 status; 1751*da8fa4e3SBjoern A. Zeeb u8 ack_rssi; 1752*da8fa4e3SBjoern A. Zeeb }; 1753*da8fa4e3SBjoern A. Zeeb 1754*da8fa4e3SBjoern A. Zeeb enum htt_tx_compl_state { 1755*da8fa4e3SBjoern A. Zeeb HTT_TX_COMPL_STATE_NONE, 1756*da8fa4e3SBjoern A. Zeeb HTT_TX_COMPL_STATE_ACK, 1757*da8fa4e3SBjoern A. Zeeb HTT_TX_COMPL_STATE_NOACK, 1758*da8fa4e3SBjoern A. Zeeb HTT_TX_COMPL_STATE_DISCARD, 1759*da8fa4e3SBjoern A. Zeeb }; 1760*da8fa4e3SBjoern A. Zeeb 1761*da8fa4e3SBjoern A. Zeeb struct htt_peer_map_event { 1762*da8fa4e3SBjoern A. Zeeb u8 vdev_id; 1763*da8fa4e3SBjoern A. Zeeb u16 peer_id; 1764*da8fa4e3SBjoern A. Zeeb u8 addr[ETH_ALEN]; 1765*da8fa4e3SBjoern A. Zeeb }; 1766*da8fa4e3SBjoern A. Zeeb 1767*da8fa4e3SBjoern A. Zeeb struct htt_peer_unmap_event { 1768*da8fa4e3SBjoern A. Zeeb u16 peer_id; 1769*da8fa4e3SBjoern A. Zeeb }; 1770*da8fa4e3SBjoern A. Zeeb 1771*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_txbuf_32 { 1772*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_frag frags[2]; 1773*da8fa4e3SBjoern A. Zeeb struct ath10k_htc_hdr htc_hdr; 1774*da8fa4e3SBjoern A. Zeeb struct htt_cmd_hdr cmd_hdr; 1775*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc cmd_tx; 1776*da8fa4e3SBjoern A. Zeeb } __packed __aligned(4); 1777*da8fa4e3SBjoern A. Zeeb 1778*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_txbuf_64 { 1779*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_frag frags[2]; 1780*da8fa4e3SBjoern A. Zeeb struct ath10k_htc_hdr htc_hdr; 1781*da8fa4e3SBjoern A. Zeeb struct htt_cmd_hdr cmd_hdr; 1782*da8fa4e3SBjoern A. Zeeb struct htt_data_tx_desc_64 cmd_tx; 1783*da8fa4e3SBjoern A. Zeeb } __packed __aligned(4); 1784*da8fa4e3SBjoern A. Zeeb 1785*da8fa4e3SBjoern A. Zeeb struct ath10k_htt { 1786*da8fa4e3SBjoern A. Zeeb struct ath10k *ar; 1787*da8fa4e3SBjoern A. Zeeb enum ath10k_htc_ep_id eid; 1788*da8fa4e3SBjoern A. Zeeb 1789*da8fa4e3SBjoern A. Zeeb struct sk_buff_head rx_indication_head; 1790*da8fa4e3SBjoern A. Zeeb 1791*da8fa4e3SBjoern A. Zeeb u8 target_version_major; 1792*da8fa4e3SBjoern A. Zeeb u8 target_version_minor; 1793*da8fa4e3SBjoern A. Zeeb struct completion target_version_received; 1794*da8fa4e3SBjoern A. Zeeb u8 max_num_amsdu; 1795*da8fa4e3SBjoern A. Zeeb u8 max_num_ampdu; 1796*da8fa4e3SBjoern A. Zeeb 1797*da8fa4e3SBjoern A. Zeeb const enum htt_t2h_msg_type *t2h_msg_types; 1798*da8fa4e3SBjoern A. Zeeb u32 t2h_msg_types_max; 1799*da8fa4e3SBjoern A. Zeeb 1800*da8fa4e3SBjoern A. Zeeb struct { 1801*da8fa4e3SBjoern A. Zeeb /* 1802*da8fa4e3SBjoern A. Zeeb * Ring of network buffer objects - This ring is 1803*da8fa4e3SBjoern A. Zeeb * used exclusively by the host SW. This ring 1804*da8fa4e3SBjoern A. Zeeb * mirrors the dev_addrs_ring that is shared 1805*da8fa4e3SBjoern A. Zeeb * between the host SW and the MAC HW. The host SW 1806*da8fa4e3SBjoern A. Zeeb * uses this netbufs ring to locate the network 1807*da8fa4e3SBjoern A. Zeeb * buffer objects whose data buffers the HW has 1808*da8fa4e3SBjoern A. Zeeb * filled. 1809*da8fa4e3SBjoern A. Zeeb */ 1810*da8fa4e3SBjoern A. Zeeb struct sk_buff **netbufs_ring; 1811*da8fa4e3SBjoern A. Zeeb 1812*da8fa4e3SBjoern A. Zeeb /* This is used only with firmware supporting IN_ORD_IND. 1813*da8fa4e3SBjoern A. Zeeb * 1814*da8fa4e3SBjoern A. Zeeb * With Full Rx Reorder the HTT Rx Ring is more of a temporary 1815*da8fa4e3SBjoern A. Zeeb * buffer ring from which buffer addresses are copied by the 1816*da8fa4e3SBjoern A. Zeeb * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND 1817*da8fa4e3SBjoern A. Zeeb * pointing to specific (re-ordered) buffers. 1818*da8fa4e3SBjoern A. Zeeb * 1819*da8fa4e3SBjoern A. Zeeb * FIXME: With kernel generic hashing functions there's a lot 1820*da8fa4e3SBjoern A. Zeeb * of hash collisions for sk_buffs. 1821*da8fa4e3SBjoern A. Zeeb */ 1822*da8fa4e3SBjoern A. Zeeb bool in_ord_rx; 1823*da8fa4e3SBjoern A. Zeeb DECLARE_HASHTABLE(skb_table, 4); 1824*da8fa4e3SBjoern A. Zeeb 1825*da8fa4e3SBjoern A. Zeeb /* 1826*da8fa4e3SBjoern A. Zeeb * Ring of buffer addresses - 1827*da8fa4e3SBjoern A. Zeeb * This ring holds the "physical" device address of the 1828*da8fa4e3SBjoern A. Zeeb * rx buffers the host SW provides for the MAC HW to 1829*da8fa4e3SBjoern A. Zeeb * fill. 1830*da8fa4e3SBjoern A. Zeeb */ 1831*da8fa4e3SBjoern A. Zeeb union { 1832*da8fa4e3SBjoern A. Zeeb __le64 *paddrs_ring_64; 1833*da8fa4e3SBjoern A. Zeeb __le32 *paddrs_ring_32; 1834*da8fa4e3SBjoern A. Zeeb }; 1835*da8fa4e3SBjoern A. Zeeb 1836*da8fa4e3SBjoern A. Zeeb /* 1837*da8fa4e3SBjoern A. Zeeb * Base address of ring, as a "physical" device address 1838*da8fa4e3SBjoern A. Zeeb * rather than a CPU address. 1839*da8fa4e3SBjoern A. Zeeb */ 1840*da8fa4e3SBjoern A. Zeeb dma_addr_t base_paddr; 1841*da8fa4e3SBjoern A. Zeeb 1842*da8fa4e3SBjoern A. Zeeb /* how many elems in the ring (power of 2) */ 1843*da8fa4e3SBjoern A. Zeeb int size; 1844*da8fa4e3SBjoern A. Zeeb 1845*da8fa4e3SBjoern A. Zeeb /* size - 1 */ 1846*da8fa4e3SBjoern A. Zeeb unsigned int size_mask; 1847*da8fa4e3SBjoern A. Zeeb 1848*da8fa4e3SBjoern A. Zeeb /* how many rx buffers to keep in the ring */ 1849*da8fa4e3SBjoern A. Zeeb int fill_level; 1850*da8fa4e3SBjoern A. Zeeb 1851*da8fa4e3SBjoern A. Zeeb /* how many rx buffers (full+empty) are in the ring */ 1852*da8fa4e3SBjoern A. Zeeb int fill_cnt; 1853*da8fa4e3SBjoern A. Zeeb 1854*da8fa4e3SBjoern A. Zeeb /* 1855*da8fa4e3SBjoern A. Zeeb * alloc_idx - where HTT SW has deposited empty buffers 1856*da8fa4e3SBjoern A. Zeeb * This is allocated in consistent mem, so that the FW can 1857*da8fa4e3SBjoern A. Zeeb * read this variable, and program the HW's FW_IDX reg with 1858*da8fa4e3SBjoern A. Zeeb * the value of this shadow register. 1859*da8fa4e3SBjoern A. Zeeb */ 1860*da8fa4e3SBjoern A. Zeeb struct { 1861*da8fa4e3SBjoern A. Zeeb __le32 *vaddr; 1862*da8fa4e3SBjoern A. Zeeb dma_addr_t paddr; 1863*da8fa4e3SBjoern A. Zeeb } alloc_idx; 1864*da8fa4e3SBjoern A. Zeeb 1865*da8fa4e3SBjoern A. Zeeb /* where HTT SW has processed bufs filled by rx MAC DMA */ 1866*da8fa4e3SBjoern A. Zeeb struct { 1867*da8fa4e3SBjoern A. Zeeb unsigned int msdu_payld; 1868*da8fa4e3SBjoern A. Zeeb } sw_rd_idx; 1869*da8fa4e3SBjoern A. Zeeb 1870*da8fa4e3SBjoern A. Zeeb /* 1871*da8fa4e3SBjoern A. Zeeb * refill_retry_timer - timer triggered when the ring is 1872*da8fa4e3SBjoern A. Zeeb * not refilled to the level expected 1873*da8fa4e3SBjoern A. Zeeb */ 1874*da8fa4e3SBjoern A. Zeeb struct timer_list refill_retry_timer; 1875*da8fa4e3SBjoern A. Zeeb 1876*da8fa4e3SBjoern A. Zeeb /* Protects access to all rx ring buffer state variables */ 1877*da8fa4e3SBjoern A. Zeeb spinlock_t lock; 1878*da8fa4e3SBjoern A. Zeeb } rx_ring; 1879*da8fa4e3SBjoern A. Zeeb 1880*da8fa4e3SBjoern A. Zeeb unsigned int prefetch_len; 1881*da8fa4e3SBjoern A. Zeeb 1882*da8fa4e3SBjoern A. Zeeb /* Protects access to pending_tx, num_pending_tx */ 1883*da8fa4e3SBjoern A. Zeeb spinlock_t tx_lock; 1884*da8fa4e3SBjoern A. Zeeb int max_num_pending_tx; 1885*da8fa4e3SBjoern A. Zeeb int num_pending_tx; 1886*da8fa4e3SBjoern A. Zeeb int num_pending_mgmt_tx; 1887*da8fa4e3SBjoern A. Zeeb struct idr pending_tx; 1888*da8fa4e3SBjoern A. Zeeb wait_queue_head_t empty_tx_wq; 1889*da8fa4e3SBjoern A. Zeeb 1890*da8fa4e3SBjoern A. Zeeb /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */ 1891*da8fa4e3SBjoern A. Zeeb DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done); 1892*da8fa4e3SBjoern A. Zeeb 1893*da8fa4e3SBjoern A. Zeeb /* set if host-fw communication goes haywire 1894*da8fa4e3SBjoern A. Zeeb * used to avoid further failures 1895*da8fa4e3SBjoern A. Zeeb */ 1896*da8fa4e3SBjoern A. Zeeb bool rx_confused; 1897*da8fa4e3SBjoern A. Zeeb atomic_t num_mpdus_ready; 1898*da8fa4e3SBjoern A. Zeeb 1899*da8fa4e3SBjoern A. Zeeb /* This is used to group tx/rx completions separately and process them 1900*da8fa4e3SBjoern A. Zeeb * in batches to reduce cache stalls 1901*da8fa4e3SBjoern A. Zeeb */ 1902*da8fa4e3SBjoern A. Zeeb struct sk_buff_head rx_msdus_q; 1903*da8fa4e3SBjoern A. Zeeb struct sk_buff_head rx_in_ord_compl_q; 1904*da8fa4e3SBjoern A. Zeeb struct sk_buff_head tx_fetch_ind_q; 1905*da8fa4e3SBjoern A. Zeeb 1906*da8fa4e3SBjoern A. Zeeb /* rx_status template */ 1907*da8fa4e3SBjoern A. Zeeb struct ieee80211_rx_status rx_status; 1908*da8fa4e3SBjoern A. Zeeb 1909*da8fa4e3SBjoern A. Zeeb struct { 1910*da8fa4e3SBjoern A. Zeeb dma_addr_t paddr; 1911*da8fa4e3SBjoern A. Zeeb union { 1912*da8fa4e3SBjoern A. Zeeb struct htt_msdu_ext_desc *vaddr_desc_32; 1913*da8fa4e3SBjoern A. Zeeb struct htt_msdu_ext_desc_64 *vaddr_desc_64; 1914*da8fa4e3SBjoern A. Zeeb }; 1915*da8fa4e3SBjoern A. Zeeb size_t size; 1916*da8fa4e3SBjoern A. Zeeb } frag_desc; 1917*da8fa4e3SBjoern A. Zeeb 1918*da8fa4e3SBjoern A. Zeeb struct { 1919*da8fa4e3SBjoern A. Zeeb dma_addr_t paddr; 1920*da8fa4e3SBjoern A. Zeeb union { 1921*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_txbuf_32 *vaddr_txbuff_32; 1922*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_txbuf_64 *vaddr_txbuff_64; 1923*da8fa4e3SBjoern A. Zeeb }; 1924*da8fa4e3SBjoern A. Zeeb size_t size; 1925*da8fa4e3SBjoern A. Zeeb } txbuf; 1926*da8fa4e3SBjoern A. Zeeb 1927*da8fa4e3SBjoern A. Zeeb struct { 1928*da8fa4e3SBjoern A. Zeeb bool enabled; 1929*da8fa4e3SBjoern A. Zeeb struct htt_q_state *vaddr; 1930*da8fa4e3SBjoern A. Zeeb dma_addr_t paddr; 1931*da8fa4e3SBjoern A. Zeeb u16 num_push_allowed; 1932*da8fa4e3SBjoern A. Zeeb u16 num_peers; 1933*da8fa4e3SBjoern A. Zeeb u16 num_tids; 1934*da8fa4e3SBjoern A. Zeeb enum htt_tx_mode_switch_mode mode; 1935*da8fa4e3SBjoern A. Zeeb enum htt_q_depth_type type; 1936*da8fa4e3SBjoern A. Zeeb } tx_q_state; 1937*da8fa4e3SBjoern A. Zeeb 1938*da8fa4e3SBjoern A. Zeeb bool tx_mem_allocated; 1939*da8fa4e3SBjoern A. Zeeb const struct ath10k_htt_tx_ops *tx_ops; 1940*da8fa4e3SBjoern A. Zeeb const struct ath10k_htt_rx_ops *rx_ops; 1941*da8fa4e3SBjoern A. Zeeb bool disable_tx_comp; 1942*da8fa4e3SBjoern A. Zeeb bool bundle_tx; 1943*da8fa4e3SBjoern A. Zeeb struct sk_buff_head tx_req_head; 1944*da8fa4e3SBjoern A. Zeeb struct sk_buff_head tx_complete_head; 1945*da8fa4e3SBjoern A. Zeeb }; 1946*da8fa4e3SBjoern A. Zeeb 1947*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_tx_ops { 1948*da8fa4e3SBjoern A. Zeeb int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt); 1949*da8fa4e3SBjoern A. Zeeb int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt); 1950*da8fa4e3SBjoern A. Zeeb int (*htt_alloc_frag_desc)(struct ath10k_htt *htt); 1951*da8fa4e3SBjoern A. Zeeb void (*htt_free_frag_desc)(struct ath10k_htt *htt); 1952*da8fa4e3SBjoern A. Zeeb int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, 1953*da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu); 1954*da8fa4e3SBjoern A. Zeeb int (*htt_alloc_txbuff)(struct ath10k_htt *htt); 1955*da8fa4e3SBjoern A. Zeeb void (*htt_free_txbuff)(struct ath10k_htt *htt); 1956*da8fa4e3SBjoern A. Zeeb int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, 1957*da8fa4e3SBjoern A. Zeeb u8 max_subfrms_ampdu, 1958*da8fa4e3SBjoern A. Zeeb u8 max_subfrms_amsdu); 1959*da8fa4e3SBjoern A. Zeeb void (*htt_flush_tx)(struct ath10k_htt *htt); 1960*da8fa4e3SBjoern A. Zeeb }; 1961*da8fa4e3SBjoern A. Zeeb 1962*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) 1963*da8fa4e3SBjoern A. Zeeb { 1964*da8fa4e3SBjoern A. Zeeb if (!htt->tx_ops->htt_send_rx_ring_cfg) 1965*da8fa4e3SBjoern A. Zeeb return -EOPNOTSUPP; 1966*da8fa4e3SBjoern A. Zeeb 1967*da8fa4e3SBjoern A. Zeeb return htt->tx_ops->htt_send_rx_ring_cfg(htt); 1968*da8fa4e3SBjoern A. Zeeb } 1969*da8fa4e3SBjoern A. Zeeb 1970*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) 1971*da8fa4e3SBjoern A. Zeeb { 1972*da8fa4e3SBjoern A. Zeeb if (!htt->tx_ops->htt_send_frag_desc_bank_cfg) 1973*da8fa4e3SBjoern A. Zeeb return -EOPNOTSUPP; 1974*da8fa4e3SBjoern A. Zeeb 1975*da8fa4e3SBjoern A. Zeeb return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); 1976*da8fa4e3SBjoern A. Zeeb } 1977*da8fa4e3SBjoern A. Zeeb 1978*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt) 1979*da8fa4e3SBjoern A. Zeeb { 1980*da8fa4e3SBjoern A. Zeeb if (!htt->tx_ops->htt_alloc_frag_desc) 1981*da8fa4e3SBjoern A. Zeeb return -EOPNOTSUPP; 1982*da8fa4e3SBjoern A. Zeeb 1983*da8fa4e3SBjoern A. Zeeb return htt->tx_ops->htt_alloc_frag_desc(htt); 1984*da8fa4e3SBjoern A. Zeeb } 1985*da8fa4e3SBjoern A. Zeeb 1986*da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt) 1987*da8fa4e3SBjoern A. Zeeb { 1988*da8fa4e3SBjoern A. Zeeb if (htt->tx_ops->htt_free_frag_desc) 1989*da8fa4e3SBjoern A. Zeeb htt->tx_ops->htt_free_frag_desc(htt); 1990*da8fa4e3SBjoern A. Zeeb } 1991*da8fa4e3SBjoern A. Zeeb 1992*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_tx(struct ath10k_htt *htt, 1993*da8fa4e3SBjoern A. Zeeb enum ath10k_hw_txrx_mode txmode, 1994*da8fa4e3SBjoern A. Zeeb struct sk_buff *msdu) 1995*da8fa4e3SBjoern A. Zeeb { 1996*da8fa4e3SBjoern A. Zeeb return htt->tx_ops->htt_tx(htt, txmode, msdu); 1997*da8fa4e3SBjoern A. Zeeb } 1998*da8fa4e3SBjoern A. Zeeb 1999*da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt) 2000*da8fa4e3SBjoern A. Zeeb { 2001*da8fa4e3SBjoern A. Zeeb if (htt->tx_ops->htt_flush_tx) 2002*da8fa4e3SBjoern A. Zeeb htt->tx_ops->htt_flush_tx(htt); 2003*da8fa4e3SBjoern A. Zeeb } 2004*da8fa4e3SBjoern A. Zeeb 2005*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) 2006*da8fa4e3SBjoern A. Zeeb { 2007*da8fa4e3SBjoern A. Zeeb if (!htt->tx_ops->htt_alloc_txbuff) 2008*da8fa4e3SBjoern A. Zeeb return -EOPNOTSUPP; 2009*da8fa4e3SBjoern A. Zeeb 2010*da8fa4e3SBjoern A. Zeeb return htt->tx_ops->htt_alloc_txbuff(htt); 2011*da8fa4e3SBjoern A. Zeeb } 2012*da8fa4e3SBjoern A. Zeeb 2013*da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt) 2014*da8fa4e3SBjoern A. Zeeb { 2015*da8fa4e3SBjoern A. Zeeb if (htt->tx_ops->htt_free_txbuff) 2016*da8fa4e3SBjoern A. Zeeb htt->tx_ops->htt_free_txbuff(htt); 2017*da8fa4e3SBjoern A. Zeeb } 2018*da8fa4e3SBjoern A. Zeeb 2019*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, 2020*da8fa4e3SBjoern A. Zeeb u8 max_subfrms_ampdu, 2021*da8fa4e3SBjoern A. Zeeb u8 max_subfrms_amsdu) 2022*da8fa4e3SBjoern A. Zeeb 2023*da8fa4e3SBjoern A. Zeeb { 2024*da8fa4e3SBjoern A. Zeeb if (!htt->tx_ops->htt_h2t_aggr_cfg_msg) 2025*da8fa4e3SBjoern A. Zeeb return -EOPNOTSUPP; 2026*da8fa4e3SBjoern A. Zeeb 2027*da8fa4e3SBjoern A. Zeeb return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt, 2028*da8fa4e3SBjoern A. Zeeb max_subfrms_ampdu, 2029*da8fa4e3SBjoern A. Zeeb max_subfrms_amsdu); 2030*da8fa4e3SBjoern A. Zeeb } 2031*da8fa4e3SBjoern A. Zeeb 2032*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_rx_ops { 2033*da8fa4e3SBjoern A. Zeeb size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt); 2034*da8fa4e3SBjoern A. Zeeb void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); 2035*da8fa4e3SBjoern A. Zeeb void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr, 2036*da8fa4e3SBjoern A. Zeeb int idx); 2037*da8fa4e3SBjoern A. Zeeb void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt); 2038*da8fa4e3SBjoern A. Zeeb void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx); 2039*da8fa4e3SBjoern A. Zeeb bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt, 2040*da8fa4e3SBjoern A. Zeeb struct htt_rx_fragment_indication *rx, 2041*da8fa4e3SBjoern A. Zeeb struct sk_buff *skb); 2042*da8fa4e3SBjoern A. Zeeb }; 2043*da8fa4e3SBjoern A. Zeeb 2044*da8fa4e3SBjoern A. Zeeb static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt) 2045*da8fa4e3SBjoern A. Zeeb { 2046*da8fa4e3SBjoern A. Zeeb if (!htt->rx_ops->htt_get_rx_ring_size) 2047*da8fa4e3SBjoern A. Zeeb return 0; 2048*da8fa4e3SBjoern A. Zeeb 2049*da8fa4e3SBjoern A. Zeeb return htt->rx_ops->htt_get_rx_ring_size(htt); 2050*da8fa4e3SBjoern A. Zeeb } 2051*da8fa4e3SBjoern A. Zeeb 2052*da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt, 2053*da8fa4e3SBjoern A. Zeeb void *vaddr) 2054*da8fa4e3SBjoern A. Zeeb { 2055*da8fa4e3SBjoern A. Zeeb if (htt->rx_ops->htt_config_paddrs_ring) 2056*da8fa4e3SBjoern A. Zeeb htt->rx_ops->htt_config_paddrs_ring(htt, vaddr); 2057*da8fa4e3SBjoern A. Zeeb } 2058*da8fa4e3SBjoern A. Zeeb 2059*da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt, 2060*da8fa4e3SBjoern A. Zeeb dma_addr_t paddr, 2061*da8fa4e3SBjoern A. Zeeb int idx) 2062*da8fa4e3SBjoern A. Zeeb { 2063*da8fa4e3SBjoern A. Zeeb if (htt->rx_ops->htt_set_paddrs_ring) 2064*da8fa4e3SBjoern A. Zeeb htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); 2065*da8fa4e3SBjoern A. Zeeb } 2066*da8fa4e3SBjoern A. Zeeb 2067*da8fa4e3SBjoern A. Zeeb static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt) 2068*da8fa4e3SBjoern A. Zeeb { 2069*da8fa4e3SBjoern A. Zeeb if (!htt->rx_ops->htt_get_vaddr_ring) 2070*da8fa4e3SBjoern A. Zeeb return NULL; 2071*da8fa4e3SBjoern A. Zeeb 2072*da8fa4e3SBjoern A. Zeeb return htt->rx_ops->htt_get_vaddr_ring(htt); 2073*da8fa4e3SBjoern A. Zeeb } 2074*da8fa4e3SBjoern A. Zeeb 2075*da8fa4e3SBjoern A. Zeeb static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx) 2076*da8fa4e3SBjoern A. Zeeb { 2077*da8fa4e3SBjoern A. Zeeb if (htt->rx_ops->htt_reset_paddrs_ring) 2078*da8fa4e3SBjoern A. Zeeb htt->rx_ops->htt_reset_paddrs_ring(htt, idx); 2079*da8fa4e3SBjoern A. Zeeb } 2080*da8fa4e3SBjoern A. Zeeb 2081*da8fa4e3SBjoern A. Zeeb static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt, 2082*da8fa4e3SBjoern A. Zeeb struct htt_rx_fragment_indication *rx, 2083*da8fa4e3SBjoern A. Zeeb struct sk_buff *skb) 2084*da8fa4e3SBjoern A. Zeeb { 2085*da8fa4e3SBjoern A. Zeeb if (!htt->rx_ops->htt_rx_proc_rx_frag_ind) 2086*da8fa4e3SBjoern A. Zeeb return true; 2087*da8fa4e3SBjoern A. Zeeb 2088*da8fa4e3SBjoern A. Zeeb return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb); 2089*da8fa4e3SBjoern A. Zeeb } 2090*da8fa4e3SBjoern A. Zeeb 2091*da8fa4e3SBjoern A. Zeeb /* the driver strongly assumes that the rx header status be 64 bytes long, 2092*da8fa4e3SBjoern A. Zeeb * so all possible rx_desc structures must respect this assumption. 2093*da8fa4e3SBjoern A. Zeeb */ 2094*da8fa4e3SBjoern A. Zeeb #define RX_HTT_HDR_STATUS_LEN 64 2095*da8fa4e3SBjoern A. Zeeb 2096*da8fa4e3SBjoern A. Zeeb /* The rx descriptor structure layout is programmed via rx ring setup 2097*da8fa4e3SBjoern A. Zeeb * so that FW knows how to transfer the rx descriptor to the host. 2098*da8fa4e3SBjoern A. Zeeb * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly 2099*da8fa4e3SBjoern A. Zeeb * when modifying the structure layout of the rx descriptor beyond what it expects 2100*da8fa4e3SBjoern A. Zeeb * (even if it correctly programmed during the rx ring setup). 2101*da8fa4e3SBjoern A. Zeeb * Therefore we must keep two different memory layouts, abstract the rx descriptor 2102*da8fa4e3SBjoern A. Zeeb * representation and use ath10k_rx_desc_ops 2103*da8fa4e3SBjoern A. Zeeb * for correctly accessing rx descriptor data. 2104*da8fa4e3SBjoern A. Zeeb */ 2105*da8fa4e3SBjoern A. Zeeb 2106*da8fa4e3SBjoern A. Zeeb /* base struct used for abstracting the rx descritor representation */ 2107*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc { 2108*da8fa4e3SBjoern A. Zeeb union { 2109*da8fa4e3SBjoern A. Zeeb /* This field is filled on the host using the msdu buffer 2110*da8fa4e3SBjoern A. Zeeb * from htt_rx_indication 2111*da8fa4e3SBjoern A. Zeeb */ 2112*da8fa4e3SBjoern A. Zeeb struct fw_rx_desc_base fw_desc; 2113*da8fa4e3SBjoern A. Zeeb u32 pad; 2114*da8fa4e3SBjoern A. Zeeb } __packed; 2115*da8fa4e3SBjoern A. Zeeb } __packed; 2116*da8fa4e3SBjoern A. Zeeb 2117*da8fa4e3SBjoern A. Zeeb /* rx descriptor for wcn3990 and possibly extensible for newer cards 2118*da8fa4e3SBjoern A. Zeeb * Buffers like this are placed on the rx ring. 2119*da8fa4e3SBjoern A. Zeeb */ 2120*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v2 { 2121*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc base; 2122*da8fa4e3SBjoern A. Zeeb struct { 2123*da8fa4e3SBjoern A. Zeeb struct rx_attention attention; 2124*da8fa4e3SBjoern A. Zeeb struct rx_frag_info frag_info; 2125*da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start mpdu_start; 2126*da8fa4e3SBjoern A. Zeeb struct rx_msdu_start msdu_start; 2127*da8fa4e3SBjoern A. Zeeb struct rx_msdu_end msdu_end; 2128*da8fa4e3SBjoern A. Zeeb struct rx_mpdu_end mpdu_end; 2129*da8fa4e3SBjoern A. Zeeb struct rx_ppdu_start ppdu_start; 2130*da8fa4e3SBjoern A. Zeeb struct rx_ppdu_end ppdu_end; 2131*da8fa4e3SBjoern A. Zeeb } __packed; 2132*da8fa4e3SBjoern A. Zeeb u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; 2133*da8fa4e3SBjoern A. Zeeb u8 msdu_payload[]; 2134*da8fa4e3SBjoern A. Zeeb }; 2135*da8fa4e3SBjoern A. Zeeb 2136*da8fa4e3SBjoern A. Zeeb /* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware 2137*da8fa4e3SBjoern A. Zeeb * works correctly. We keep a single rx descriptor for all these three 2138*da8fa4e3SBjoern A. Zeeb * families of cards because from tests it seems to be the most stable solution, 2139*da8fa4e3SBjoern A. Zeeb * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes 2140*da8fa4e3SBjoern A. Zeeb * during some tests. 2141*da8fa4e3SBjoern A. Zeeb * Buffers like this are placed on the rx ring. 2142*da8fa4e3SBjoern A. Zeeb */ 2143*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 { 2144*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc base; 2145*da8fa4e3SBjoern A. Zeeb struct { 2146*da8fa4e3SBjoern A. Zeeb struct rx_attention attention; 2147*da8fa4e3SBjoern A. Zeeb struct rx_frag_info_v1 frag_info; 2148*da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start mpdu_start; 2149*da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_v1 msdu_start; 2150*da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_v1 msdu_end; 2151*da8fa4e3SBjoern A. Zeeb struct rx_mpdu_end mpdu_end; 2152*da8fa4e3SBjoern A. Zeeb struct rx_ppdu_start ppdu_start; 2153*da8fa4e3SBjoern A. Zeeb struct rx_ppdu_end_v1 ppdu_end; 2154*da8fa4e3SBjoern A. Zeeb } __packed; 2155*da8fa4e3SBjoern A. Zeeb u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; 2156*da8fa4e3SBjoern A. Zeeb u8 msdu_payload[]; 2157*da8fa4e3SBjoern A. Zeeb }; 2158*da8fa4e3SBjoern A. Zeeb 2159*da8fa4e3SBjoern A. Zeeb /* rx_desc abstraction */ 2160*da8fa4e3SBjoern A. Zeeb struct ath10k_htt_rx_desc_ops { 2161*da8fa4e3SBjoern A. Zeeb /* These fields are mandatory, they must be specified in any instance */ 2162*da8fa4e3SBjoern A. Zeeb 2163*da8fa4e3SBjoern A. Zeeb /* sizeof() of the rx_desc structure used by this hw */ 2164*da8fa4e3SBjoern A. Zeeb size_t rx_desc_size; 2165*da8fa4e3SBjoern A. Zeeb 2166*da8fa4e3SBjoern A. Zeeb /* offset of msdu_payload inside the rx_desc structure used by this hw */ 2167*da8fa4e3SBjoern A. Zeeb size_t rx_desc_msdu_payload_offset; 2168*da8fa4e3SBjoern A. Zeeb 2169*da8fa4e3SBjoern A. Zeeb /* These fields are options. 2170*da8fa4e3SBjoern A. Zeeb * When a field is not provided the default implementation gets used 2171*da8fa4e3SBjoern A. Zeeb * (see the ath10k_rx_desc_* operations below for more info about the defaults) 2172*da8fa4e3SBjoern A. Zeeb */ 2173*da8fa4e3SBjoern A. Zeeb bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd); 2174*da8fa4e3SBjoern A. Zeeb int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd); 2175*da8fa4e3SBjoern A. Zeeb 2176*da8fa4e3SBjoern A. Zeeb /* Safely cast from a void* buffer containing an rx descriptor 2177*da8fa4e3SBjoern A. Zeeb * to the proper rx_desc structure 2178*da8fa4e3SBjoern A. Zeeb */ 2179*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff); 2180*da8fa4e3SBjoern A. Zeeb 2181*da8fa4e3SBjoern A. Zeeb void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs); 2182*da8fa4e3SBjoern A. Zeeb struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd); 2183*da8fa4e3SBjoern A. Zeeb struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd); 2184*da8fa4e3SBjoern A. Zeeb struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd); 2185*da8fa4e3SBjoern A. Zeeb struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd); 2186*da8fa4e3SBjoern A. Zeeb struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd); 2187*da8fa4e3SBjoern A. Zeeb struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd); 2188*da8fa4e3SBjoern A. Zeeb struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd); 2189*da8fa4e3SBjoern A. Zeeb struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd); 2190*da8fa4e3SBjoern A. Zeeb u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd); 2191*da8fa4e3SBjoern A. Zeeb u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd); 2192*da8fa4e3SBjoern A. Zeeb }; 2193*da8fa4e3SBjoern A. Zeeb 2194*da8fa4e3SBjoern A. Zeeb extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops; 2195*da8fa4e3SBjoern A. Zeeb extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops; 2196*da8fa4e3SBjoern A. Zeeb extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops; 2197*da8fa4e3SBjoern A. Zeeb 2198*da8fa4e3SBjoern A. Zeeb static inline int 2199*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2200*da8fa4e3SBjoern A. Zeeb { 2201*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes) 2202*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd); 2203*da8fa4e3SBjoern A. Zeeb return 0; 2204*da8fa4e3SBjoern A. Zeeb } 2205*da8fa4e3SBjoern A. Zeeb 2206*da8fa4e3SBjoern A. Zeeb static inline bool 2207*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2208*da8fa4e3SBjoern A. Zeeb { 2209*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error) 2210*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd); 2211*da8fa4e3SBjoern A. Zeeb return false; 2212*da8fa4e3SBjoern A. Zeeb } 2213*da8fa4e3SBjoern A. Zeeb 2214*da8fa4e3SBjoern A. Zeeb /* The default implementation of all these getters is using the old rx_desc, 2215*da8fa4e3SBjoern A. Zeeb * so that it is easier to define the ath10k_htt_rx_desc_ops instances. 2216*da8fa4e3SBjoern A. Zeeb * But probably, if new wireless cards must be supported, it would be better 2217*da8fa4e3SBjoern A. Zeeb * to switch the default implementation to the new rx_desc, since this would 2218*da8fa4e3SBjoern A. Zeeb * make the extension easier . 2219*da8fa4e3SBjoern A. Zeeb */ 2220*da8fa4e3SBjoern A. Zeeb static inline struct htt_rx_desc * 2221*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff) 2222*da8fa4e3SBjoern A. Zeeb { 2223*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_from_raw_buffer) 2224*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff); 2225*da8fa4e3SBjoern A. Zeeb return &((struct htt_rx_desc_v1 *)buff)->base; 2226*da8fa4e3SBjoern A. Zeeb } 2227*da8fa4e3SBjoern A. Zeeb 2228*da8fa4e3SBjoern A. Zeeb static inline void 2229*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw, 2230*da8fa4e3SBjoern A. Zeeb struct htt_rx_ring_rx_desc_offsets *off) 2231*da8fa4e3SBjoern A. Zeeb { 2232*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_offsets) { 2233*da8fa4e3SBjoern A. Zeeb hw->rx_desc_ops->rx_desc_get_offsets(off); 2234*da8fa4e3SBjoern A. Zeeb } else { 2235*da8fa4e3SBjoern A. Zeeb #define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4) 2236*da8fa4e3SBjoern A. Zeeb off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 2237*da8fa4e3SBjoern A. Zeeb off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 2238*da8fa4e3SBjoern A. Zeeb off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 2239*da8fa4e3SBjoern A. Zeeb off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 2240*da8fa4e3SBjoern A. Zeeb off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 2241*da8fa4e3SBjoern A. Zeeb off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 2242*da8fa4e3SBjoern A. Zeeb off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 2243*da8fa4e3SBjoern A. Zeeb off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 2244*da8fa4e3SBjoern A. Zeeb off->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 2245*da8fa4e3SBjoern A. Zeeb off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 2246*da8fa4e3SBjoern A. Zeeb #undef desc_offset 2247*da8fa4e3SBjoern A. Zeeb } 2248*da8fa4e3SBjoern A. Zeeb } 2249*da8fa4e3SBjoern A. Zeeb 2250*da8fa4e3SBjoern A. Zeeb static inline struct rx_attention * 2251*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2252*da8fa4e3SBjoern A. Zeeb { 2253*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2254*da8fa4e3SBjoern A. Zeeb 2255*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_attention) 2256*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_attention(rxd); 2257*da8fa4e3SBjoern A. Zeeb 2258*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2259*da8fa4e3SBjoern A. Zeeb return &rx_desc->attention; 2260*da8fa4e3SBjoern A. Zeeb } 2261*da8fa4e3SBjoern A. Zeeb 2262*da8fa4e3SBjoern A. Zeeb static inline struct rx_frag_info_common * 2263*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2264*da8fa4e3SBjoern A. Zeeb { 2265*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2266*da8fa4e3SBjoern A. Zeeb 2267*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_frag_info) 2268*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_frag_info(rxd); 2269*da8fa4e3SBjoern A. Zeeb 2270*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2271*da8fa4e3SBjoern A. Zeeb return &rx_desc->frag_info.common; 2272*da8fa4e3SBjoern A. Zeeb } 2273*da8fa4e3SBjoern A. Zeeb 2274*da8fa4e3SBjoern A. Zeeb static inline struct rx_mpdu_start * 2275*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2276*da8fa4e3SBjoern A. Zeeb { 2277*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2278*da8fa4e3SBjoern A. Zeeb 2279*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_mpdu_start) 2280*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd); 2281*da8fa4e3SBjoern A. Zeeb 2282*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2283*da8fa4e3SBjoern A. Zeeb return &rx_desc->mpdu_start; 2284*da8fa4e3SBjoern A. Zeeb } 2285*da8fa4e3SBjoern A. Zeeb 2286*da8fa4e3SBjoern A. Zeeb static inline struct rx_mpdu_end * 2287*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2288*da8fa4e3SBjoern A. Zeeb { 2289*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2290*da8fa4e3SBjoern A. Zeeb 2291*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_mpdu_end) 2292*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd); 2293*da8fa4e3SBjoern A. Zeeb 2294*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2295*da8fa4e3SBjoern A. Zeeb return &rx_desc->mpdu_end; 2296*da8fa4e3SBjoern A. Zeeb } 2297*da8fa4e3SBjoern A. Zeeb 2298*da8fa4e3SBjoern A. Zeeb static inline struct rx_msdu_start_common * 2299*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2300*da8fa4e3SBjoern A. Zeeb { 2301*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2302*da8fa4e3SBjoern A. Zeeb 2303*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_msdu_start) 2304*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd); 2305*da8fa4e3SBjoern A. Zeeb 2306*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2307*da8fa4e3SBjoern A. Zeeb return &rx_desc->msdu_start.common; 2308*da8fa4e3SBjoern A. Zeeb } 2309*da8fa4e3SBjoern A. Zeeb 2310*da8fa4e3SBjoern A. Zeeb static inline struct rx_msdu_end_common * 2311*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2312*da8fa4e3SBjoern A. Zeeb { 2313*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2314*da8fa4e3SBjoern A. Zeeb 2315*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_msdu_end) 2316*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd); 2317*da8fa4e3SBjoern A. Zeeb 2318*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2319*da8fa4e3SBjoern A. Zeeb return &rx_desc->msdu_end.common; 2320*da8fa4e3SBjoern A. Zeeb } 2321*da8fa4e3SBjoern A. Zeeb 2322*da8fa4e3SBjoern A. Zeeb static inline struct rx_ppdu_start * 2323*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2324*da8fa4e3SBjoern A. Zeeb { 2325*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2326*da8fa4e3SBjoern A. Zeeb 2327*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_ppdu_start) 2328*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd); 2329*da8fa4e3SBjoern A. Zeeb 2330*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2331*da8fa4e3SBjoern A. Zeeb return &rx_desc->ppdu_start; 2332*da8fa4e3SBjoern A. Zeeb } 2333*da8fa4e3SBjoern A. Zeeb 2334*da8fa4e3SBjoern A. Zeeb static inline struct rx_ppdu_end_common * 2335*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2336*da8fa4e3SBjoern A. Zeeb { 2337*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2338*da8fa4e3SBjoern A. Zeeb 2339*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_ppdu_end) 2340*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd); 2341*da8fa4e3SBjoern A. Zeeb 2342*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2343*da8fa4e3SBjoern A. Zeeb return &rx_desc->ppdu_end.common; 2344*da8fa4e3SBjoern A. Zeeb } 2345*da8fa4e3SBjoern A. Zeeb 2346*da8fa4e3SBjoern A. Zeeb static inline u8 * 2347*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2348*da8fa4e3SBjoern A. Zeeb { 2349*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2350*da8fa4e3SBjoern A. Zeeb 2351*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status) 2352*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd); 2353*da8fa4e3SBjoern A. Zeeb 2354*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2355*da8fa4e3SBjoern A. Zeeb return rx_desc->rx_hdr_status; 2356*da8fa4e3SBjoern A. Zeeb } 2357*da8fa4e3SBjoern A. Zeeb 2358*da8fa4e3SBjoern A. Zeeb static inline u8 * 2359*da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) 2360*da8fa4e3SBjoern A. Zeeb { 2361*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_v1 *rx_desc; 2362*da8fa4e3SBjoern A. Zeeb 2363*da8fa4e3SBjoern A. Zeeb if (hw->rx_desc_ops->rx_desc_get_msdu_payload) 2364*da8fa4e3SBjoern A. Zeeb return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd); 2365*da8fa4e3SBjoern A. Zeeb 2366*da8fa4e3SBjoern A. Zeeb rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); 2367*da8fa4e3SBjoern A. Zeeb return rx_desc->msdu_payload; 2368*da8fa4e3SBjoern A. Zeeb } 2369*da8fa4e3SBjoern A. Zeeb 2370*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff 2371*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0 2372*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000 2373*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12 2374*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000 2375*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13 2376*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000 2377*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16 2378*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000 2379*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17 2380*da8fa4e3SBjoern A. Zeeb 2381*da8fa4e3SBjoern A. Zeeb struct htt_rx_desc_base_hl { 2382*da8fa4e3SBjoern A. Zeeb __le32 info; /* HTT_RX_DESC_HL_INFO_ */ 2383*da8fa4e3SBjoern A. Zeeb }; 2384*da8fa4e3SBjoern A. Zeeb 2385*da8fa4e3SBjoern A. Zeeb struct htt_rx_chan_info { 2386*da8fa4e3SBjoern A. Zeeb __le16 primary_chan_center_freq_mhz; 2387*da8fa4e3SBjoern A. Zeeb __le16 contig_chan1_center_freq_mhz; 2388*da8fa4e3SBjoern A. Zeeb __le16 contig_chan2_center_freq_mhz; 2389*da8fa4e3SBjoern A. Zeeb u8 phy_mode; 2390*da8fa4e3SBjoern A. Zeeb u8 reserved; 2391*da8fa4e3SBjoern A. Zeeb } __packed; 2392*da8fa4e3SBjoern A. Zeeb 2393*da8fa4e3SBjoern A. Zeeb #define HTT_RX_DESC_ALIGN 8 2394*da8fa4e3SBjoern A. Zeeb 2395*da8fa4e3SBjoern A. Zeeb #define HTT_MAC_ADDR_LEN 6 2396*da8fa4e3SBjoern A. Zeeb 2397*da8fa4e3SBjoern A. Zeeb /* 2398*da8fa4e3SBjoern A. Zeeb * FIX THIS 2399*da8fa4e3SBjoern A. Zeeb * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size, 2400*da8fa4e3SBjoern A. Zeeb * rounded up to a cache line size. 2401*da8fa4e3SBjoern A. Zeeb */ 2402*da8fa4e3SBjoern A. Zeeb #define HTT_RX_BUF_SIZE 2048 2403*da8fa4e3SBjoern A. Zeeb 2404*da8fa4e3SBjoern A. Zeeb /* The HTT_RX_MSDU_SIZE can't be statically computed anymore, 2405*da8fa4e3SBjoern A. Zeeb * because it depends on the underlying device rx_desc representation 2406*da8fa4e3SBjoern A. Zeeb */ 2407*da8fa4e3SBjoern A. Zeeb static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw) 2408*da8fa4e3SBjoern A. Zeeb { 2409*da8fa4e3SBjoern A. Zeeb return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size; 2410*da8fa4e3SBjoern A. Zeeb } 2411*da8fa4e3SBjoern A. Zeeb 2412*da8fa4e3SBjoern A. Zeeb /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle 2413*da8fa4e3SBjoern A. Zeeb * aggregated traffic more nicely. 2414*da8fa4e3SBjoern A. Zeeb */ 2415*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTT_MAX_NUM_REFILL 100 2416*da8fa4e3SBjoern A. Zeeb 2417*da8fa4e3SBjoern A. Zeeb /* 2418*da8fa4e3SBjoern A. Zeeb * DMA_MAP expects the buffer to be an integral number of cache lines. 2419*da8fa4e3SBjoern A. Zeeb * Rather than checking the actual cache line size, this code makes a 2420*da8fa4e3SBjoern A. Zeeb * conservative estimate of what the cache line size could be. 2421*da8fa4e3SBjoern A. Zeeb */ 2422*da8fa4e3SBjoern A. Zeeb #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ 2423*da8fa4e3SBjoern A. Zeeb #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) 2424*da8fa4e3SBjoern A. Zeeb 2425*da8fa4e3SBjoern A. Zeeb /* These values are default in most firmware revisions and apparently are a 2426*da8fa4e3SBjoern A. Zeeb * sweet spot performance wise. 2427*da8fa4e3SBjoern A. Zeeb */ 2428*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3 2429*da8fa4e3SBjoern A. Zeeb #define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64 2430*da8fa4e3SBjoern A. Zeeb 2431*da8fa4e3SBjoern A. Zeeb int ath10k_htt_connect(struct ath10k_htt *htt); 2432*da8fa4e3SBjoern A. Zeeb int ath10k_htt_init(struct ath10k *ar); 2433*da8fa4e3SBjoern A. Zeeb int ath10k_htt_setup(struct ath10k_htt *htt); 2434*da8fa4e3SBjoern A. Zeeb 2435*da8fa4e3SBjoern A. Zeeb int ath10k_htt_tx_start(struct ath10k_htt *htt); 2436*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_stop(struct ath10k_htt *htt); 2437*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_destroy(struct ath10k_htt *htt); 2438*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_free(struct ath10k_htt *htt); 2439*da8fa4e3SBjoern A. Zeeb 2440*da8fa4e3SBjoern A. Zeeb int ath10k_htt_rx_alloc(struct ath10k_htt *htt); 2441*da8fa4e3SBjoern A. Zeeb int ath10k_htt_rx_ring_refill(struct ath10k *ar); 2442*da8fa4e3SBjoern A. Zeeb void ath10k_htt_rx_free(struct ath10k_htt *htt); 2443*da8fa4e3SBjoern A. Zeeb 2444*da8fa4e3SBjoern A. Zeeb void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 2445*da8fa4e3SBjoern A. Zeeb void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); 2446*da8fa4e3SBjoern A. Zeeb bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); 2447*da8fa4e3SBjoern A. Zeeb int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); 2448*da8fa4e3SBjoern A. Zeeb int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, 2449*da8fa4e3SBjoern A. Zeeb u64 cookie); 2450*da8fa4e3SBjoern A. Zeeb void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); 2451*da8fa4e3SBjoern A. Zeeb int ath10k_htt_tx_fetch_resp(struct ath10k *ar, 2452*da8fa4e3SBjoern A. Zeeb __le32 token, 2453*da8fa4e3SBjoern A. Zeeb __le16 fetch_seq_num, 2454*da8fa4e3SBjoern A. Zeeb struct htt_tx_fetch_record *records, 2455*da8fa4e3SBjoern A. Zeeb size_t num_records); 2456*da8fa4e3SBjoern A. Zeeb void ath10k_htt_op_ep_tx_credits(struct ath10k *ar); 2457*da8fa4e3SBjoern A. Zeeb 2458*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, 2459*da8fa4e3SBjoern A. Zeeb struct ieee80211_txq *txq); 2460*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 2461*da8fa4e3SBjoern A. Zeeb struct ieee80211_txq *txq); 2462*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_txq_sync(struct ath10k *ar); 2463*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); 2464*da8fa4e3SBjoern A. Zeeb int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt); 2465*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt); 2466*da8fa4e3SBjoern A. Zeeb int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, 2467*da8fa4e3SBjoern A. Zeeb bool is_presp); 2468*da8fa4e3SBjoern A. Zeeb 2469*da8fa4e3SBjoern A. Zeeb int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); 2470*da8fa4e3SBjoern A. Zeeb void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); 2471*da8fa4e3SBjoern A. Zeeb int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu); 2472*da8fa4e3SBjoern A. Zeeb void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 2473*da8fa4e3SBjoern A. Zeeb struct sk_buff *skb); 2474*da8fa4e3SBjoern A. Zeeb int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget); 2475*da8fa4e3SBjoern A. Zeeb int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget); 2476*da8fa4e3SBjoern A. Zeeb void ath10k_htt_set_tx_ops(struct ath10k_htt *htt); 2477*da8fa4e3SBjoern A. Zeeb void ath10k_htt_set_rx_ops(struct ath10k_htt *htt); 2478*da8fa4e3SBjoern A. Zeeb #endif 2479