xref: /linux/drivers/net/wireless/marvell/mwifiex/11n_aggr.c (revision 8a922b7728a93d837954315c98b84f6b78de0c4f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NXP Wireless LAN device driver: 802.11n Aggregation
4  *
5  * Copyright 2011-2020 NXP
6  */
7 
8 #include "decl.h"
9 #include "ioctl.h"
10 #include "util.h"
11 #include "fw.h"
12 #include "main.h"
13 #include "wmm.h"
14 #include "11n.h"
15 #include "11n_aggr.h"
16 
17 /*
18  * Creates an AMSDU subframe for aggregation into one AMSDU packet.
19  *
20  * The resultant AMSDU subframe format is -
21  *
22  * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
23  * |     DA     |     SA      |   Length   | SNAP header |   MSDU     |
24  * | data[0..5] | data[6..11] |            |             | data[14..] |
25  * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
26  * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes-->
27  *
28  * This function also computes the amount of padding required to make the
29  * buffer length multiple of 4 bytes.
30  *
31  * Data => |DA|SA|SNAP-TYPE|........    .|
32  * MSDU => |DA|SA|Length|SNAP|......   ..|
33  */
34 static int
35 mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
36 			   struct sk_buff *skb_src, int *pad)
37 
38 {
39 	int dt_offset;
40 	struct rfc_1042_hdr snap = {
41 		0xaa,		/* LLC DSAP */
42 		0xaa,		/* LLC SSAP */
43 		0x03,		/* LLC CTRL */
44 		{0x00, 0x00, 0x00},	/* SNAP OUI */
45 		0x0000		/* SNAP type */
46 			/*
47 			 * This field will be overwritten
48 			 * later with ethertype
49 			 */
50 	};
51 	struct tx_packet_hdr *tx_header;
52 
53 	tx_header = skb_put(skb_aggr, sizeof(*tx_header));
54 
55 	/* Copy DA and SA */
56 	dt_offset = 2 * ETH_ALEN;
57 	memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
58 
59 	/* Copy SNAP header */
60 	snap.snap_type = ((struct ethhdr *)skb_src->data)->h_proto;
61 
62 	dt_offset += sizeof(__be16);
63 
64 	memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
65 
66 	skb_pull(skb_src, dt_offset);
67 
68 	/* Update Length field */
69 	tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
70 
71 	/* Add payload */
72 	skb_put_data(skb_aggr, skb_src->data, skb_src->len);
73 
74 	/* Add padding for new MSDU to start from 4 byte boundary */
75 	*pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
76 
77 	return skb_aggr->len + *pad;
78 }
79 
80 /*
81  * Adds TxPD to AMSDU header.
82  *
83  * Each AMSDU packet will contain one TxPD at the beginning,
84  * followed by multiple AMSDU subframes.
85  */
86 static void
87 mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
88 			    struct sk_buff *skb)
89 {
90 	struct txpd *local_tx_pd;
91 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
92 
93 	skb_push(skb, sizeof(*local_tx_pd));
94 
95 	local_tx_pd = (struct txpd *) skb->data;
96 	memset(local_tx_pd, 0, sizeof(struct txpd));
97 
98 	/* Original priority has been overwritten */
99 	local_tx_pd->priority = (u8) skb->priority;
100 	local_tx_pd->pkt_delay_2ms =
101 		mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
102 	local_tx_pd->bss_num = priv->bss_num;
103 	local_tx_pd->bss_type = priv->bss_type;
104 	/* Always zero as the data is followed by struct txpd */
105 	local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
106 	local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
107 	local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
108 						 sizeof(*local_tx_pd));
109 
110 	if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
111 		local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
112 
113 	if (local_tx_pd->tx_control == 0)
114 		/* TxCtrl set by user or default */
115 		local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
116 
117 	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
118 	    priv->adapter->pps_uapsd_mode) {
119 		if (true == mwifiex_check_last_packet_indication(priv)) {
120 			priv->adapter->tx_lock_flag = true;
121 			local_tx_pd->flags =
122 				MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
123 		}
124 	}
125 }
126 
127 /*
128  * Create aggregated packet.
129  *
130  * This function creates an aggregated MSDU packet, by combining buffers
131  * from the RA list. Each individual buffer is encapsulated as an AMSDU
132  * subframe and all such subframes are concatenated together to form the
133  * AMSDU packet.
134  *
135  * A TxPD is also added to the front of the resultant AMSDU packets for
136  * transmission. The resultant packets format is -
137  *
138  * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+
139  * |    TxPD   |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame|
140  * |           |       1       |       2       | .. |       n       |
141  * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+
142  */
143 int
144 mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
145 			  struct mwifiex_ra_list_tbl *pra_list,
146 			  int ptrindex)
147 			  __releases(&priv->wmm.ra_list_spinlock)
148 {
149 	struct mwifiex_adapter *adapter = priv->adapter;
150 	struct sk_buff *skb_aggr, *skb_src;
151 	struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
152 	int pad = 0, aggr_num = 0, ret;
153 	struct mwifiex_tx_param tx_param;
154 	struct txpd *ptx_pd = NULL;
155 	int headroom = adapter->intf_hdr_len;
156 
157 	skb_src = skb_peek(&pra_list->skb_head);
158 	if (!skb_src) {
159 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
160 		return 0;
161 	}
162 
163 	tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
164 	skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
165 					       GFP_ATOMIC);
166 	if (!skb_aggr) {
167 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
168 		return -1;
169 	}
170 
171 	/* skb_aggr->data already 64 byte align, just reserve bus interface
172 	 * header and txpd.
173 	 */
174 	skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
175 	tx_info_aggr =  MWIFIEX_SKB_TXCB(skb_aggr);
176 
177 	memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
178 	tx_info_aggr->bss_type = tx_info_src->bss_type;
179 	tx_info_aggr->bss_num = tx_info_src->bss_num;
180 
181 	if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
182 		tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
183 	tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
184 	skb_aggr->priority = skb_src->priority;
185 	skb_aggr->tstamp = skb_src->tstamp;
186 
187 	do {
188 		/* Check if AMSDU can accommodate this MSDU */
189 		if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
190 		    adapter->tx_buf_size)
191 			break;
192 
193 		skb_src = skb_dequeue(&pra_list->skb_head);
194 		pra_list->total_pkt_count--;
195 		atomic_dec(&priv->wmm.tx_pkts_queued);
196 		aggr_num++;
197 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
198 		mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
199 
200 		mwifiex_write_data_complete(adapter, skb_src, 0, 0);
201 
202 		spin_lock_bh(&priv->wmm.ra_list_spinlock);
203 
204 		if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
205 			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
206 			return -1;
207 		}
208 
209 		if (skb_tailroom(skb_aggr) < pad) {
210 			pad = 0;
211 			break;
212 		}
213 		skb_put(skb_aggr, pad);
214 
215 		skb_src = skb_peek(&pra_list->skb_head);
216 
217 	} while (skb_src);
218 
219 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
220 
221 	/* Last AMSDU packet does not need padding */
222 	skb_trim(skb_aggr, skb_aggr->len - pad);
223 
224 	/* Form AMSDU */
225 	mwifiex_11n_form_amsdu_txpd(priv, skb_aggr);
226 	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
227 		ptx_pd = (struct txpd *)skb_aggr->data;
228 
229 	skb_push(skb_aggr, headroom);
230 	tx_info_aggr->aggr_num = aggr_num * 2;
231 	if (adapter->data_sent || adapter->tx_lock_flag) {
232 		atomic_add(aggr_num * 2, &adapter->tx_queued);
233 		skb_queue_tail(&adapter->tx_data_q, skb_aggr);
234 		return 0;
235 	}
236 
237 	if (skb_src)
238 		tx_param.next_pkt_len = skb_src->len + sizeof(struct txpd);
239 	else
240 		tx_param.next_pkt_len = 0;
241 
242 	if (adapter->iface_type == MWIFIEX_USB) {
243 		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
244 						   skb_aggr, &tx_param);
245 	} else {
246 
247 		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
248 						   skb_aggr, &tx_param);
249 	}
250 	switch (ret) {
251 	case -EBUSY:
252 		spin_lock_bh(&priv->wmm.ra_list_spinlock);
253 		if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
254 			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
255 			mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
256 			return -1;
257 		}
258 		if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
259 		    adapter->pps_uapsd_mode && adapter->tx_lock_flag) {
260 				priv->adapter->tx_lock_flag = false;
261 				if (ptx_pd)
262 					ptx_pd->flags = 0;
263 		}
264 
265 		skb_queue_tail(&pra_list->skb_head, skb_aggr);
266 
267 		pra_list->total_pkt_count++;
268 
269 		atomic_inc(&priv->wmm.tx_pkts_queued);
270 
271 		tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
272 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
273 		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
274 		break;
275 	case -1:
276 		mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
277 			    __func__, ret);
278 		adapter->dbg.num_tx_host_to_card_failure++;
279 		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
280 		return 0;
281 	case -EINPROGRESS:
282 		break;
283 	case 0:
284 		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
285 		break;
286 	default:
287 		break;
288 	}
289 	if (ret != -EBUSY) {
290 		mwifiex_rotate_priolists(priv, pra_list, ptrindex);
291 	}
292 
293 	return 0;
294 }
295