1 // SPDX-License-Identifier: GPL-2.0
2 /******************************************************************************
3 * rtl8712_recv.c
4 *
5 * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
6 * Linux device driver for RTL8192SU
7 *
8 * Modifications for inclusion into the Linux staging tree are
9 * Copyright(c) 2010 Larry Finger. All rights reserved.
10 *
11 * Contact information:
12 * WLAN FAE <wlanfae@realtek.com>
13 * Larry Finger <Larry.Finger@lwfinger.net>
14 *
15 ******************************************************************************/
16
17 #define _RTL8712_RECV_C_
18
19 #include <linux/if_ether.h>
20 #include <linux/ip.h>
21 #include <net/cfg80211.h>
22
23 #include "osdep_service.h"
24 #include "drv_types.h"
25 #include "recv_osdep.h"
26 #include "mlme_osdep.h"
27 #include "ethernet.h"
28 #include "usb_ops.h"
29 #include "wifi.h"
30
31 static void recv_tasklet(struct tasklet_struct *t);
32
r8712_init_recv_priv(struct recv_priv * precvpriv,struct _adapter * padapter)33 int r8712_init_recv_priv(struct recv_priv *precvpriv,
34 struct _adapter *padapter)
35 {
36 int i;
37 struct recv_buf *precvbuf;
38 addr_t tmpaddr = 0;
39 int alignment = 0;
40 struct sk_buff *pskb = NULL;
41
42 /*init recv_buf*/
43 _init_queue(&precvpriv->free_recv_buf_queue);
44 precvpriv->pallocated_recv_buf =
45 kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC);
46 if (!precvpriv->pallocated_recv_buf)
47 return -ENOMEM;
48 precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
49 ((addr_t)(precvpriv->pallocated_recv_buf) & 3);
50 precvbuf = (struct recv_buf *)precvpriv->precv_buf;
51 for (i = 0; i < NR_RECVBUFF; i++) {
52 INIT_LIST_HEAD(&precvbuf->list);
53 spin_lock_init(&precvbuf->recvbuf_lock);
54 if (r8712_os_recvbuf_resource_alloc(padapter, precvbuf))
55 break;
56 precvbuf->ref_cnt = 0;
57 precvbuf->adapter = padapter;
58 list_add_tail(&precvbuf->list,
59 &precvpriv->free_recv_buf_queue.queue);
60 precvbuf++;
61 }
62 precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
63 tasklet_setup(&precvpriv->recv_tasklet, recv_tasklet);
64 skb_queue_head_init(&precvpriv->rx_skb_queue);
65
66 skb_queue_head_init(&precvpriv->free_recv_skb_queue);
67 for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
68 pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ +
69 RECVBUFF_ALIGN_SZ);
70 if (pskb) {
71 tmpaddr = (addr_t)pskb->data;
72 alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
73 skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
74 skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
75 }
76 pskb = NULL;
77 }
78 return 0;
79 }
80
r8712_free_recv_priv(struct recv_priv * precvpriv)81 void r8712_free_recv_priv(struct recv_priv *precvpriv)
82 {
83 int i;
84 struct recv_buf *precvbuf;
85 struct _adapter *padapter = precvpriv->adapter;
86
87 precvbuf = (struct recv_buf *)precvpriv->precv_buf;
88 for (i = 0; i < NR_RECVBUFF; i++) {
89 r8712_os_recvbuf_resource_free(padapter, precvbuf);
90 precvbuf++;
91 }
92 kfree(precvpriv->pallocated_recv_buf);
93 skb_queue_purge(&precvpriv->rx_skb_queue);
94 if (skb_queue_len(&precvpriv->rx_skb_queue))
95 netdev_warn(padapter->pnetdev, "r8712u: rx_skb_queue not empty\n");
96 skb_queue_purge(&precvpriv->free_recv_skb_queue);
97 if (skb_queue_len(&precvpriv->free_recv_skb_queue))
98 netdev_warn(padapter->pnetdev, "r8712u: free_recv_skb_queue not empty %d\n",
99 skb_queue_len(&precvpriv->free_recv_skb_queue));
100 }
101
r8712_init_recvbuf(struct _adapter * padapter,struct recv_buf * precvbuf)102 void r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
103 {
104 precvbuf->transfer_len = 0;
105 precvbuf->len = 0;
106 precvbuf->ref_cnt = 0;
107 if (precvbuf->pbuf) {
108 precvbuf->pdata = precvbuf->pbuf;
109 precvbuf->phead = precvbuf->pbuf;
110 precvbuf->ptail = precvbuf->pbuf;
111 precvbuf->pend = precvbuf->pdata + MAX_RECVBUF_SZ;
112 }
113 }
114
r8712_free_recvframe(union recv_frame * precvframe,struct __queue * pfree_recv_queue)115 void r8712_free_recvframe(union recv_frame *precvframe,
116 struct __queue *pfree_recv_queue)
117 {
118 unsigned long irqL;
119 struct _adapter *padapter = precvframe->u.hdr.adapter;
120 struct recv_priv *precvpriv = &padapter->recvpriv;
121
122 if (precvframe->u.hdr.pkt) {
123 dev_kfree_skb_any(precvframe->u.hdr.pkt);/*free skb by driver*/
124 precvframe->u.hdr.pkt = NULL;
125 }
126 spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
127 list_del_init(&precvframe->u.hdr.list);
128 list_add_tail(&precvframe->u.hdr.list, &pfree_recv_queue->queue);
129 if (padapter) {
130 if (pfree_recv_queue == &precvpriv->free_recv_queue)
131 precvpriv->free_recvframe_cnt++;
132 }
133 spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL);
134 }
135
update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib * pattrib,struct recv_stat * prxstat)136 static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
137 struct recv_stat *prxstat)
138 {
139 /*TODO:
140 * Offset 0
141 */
142 pattrib->bdecrypted = (le32_to_cpu(prxstat->rxdw0) & BIT(27)) == 0;
143 pattrib->crc_err = (le32_to_cpu(prxstat->rxdw0) & BIT(14)) != 0;
144 /*Offset 4*/
145 /*Offset 8*/
146 /*Offset 12*/
147 if (le32_to_cpu(prxstat->rxdw3) & BIT(13)) {
148 pattrib->tcpchk_valid = 1; /* valid */
149 if (le32_to_cpu(prxstat->rxdw3) & BIT(11))
150 pattrib->tcp_chkrpt = 1; /* correct */
151 else
152 pattrib->tcp_chkrpt = 0; /* incorrect */
153 if (le32_to_cpu(prxstat->rxdw3) & BIT(12))
154 pattrib->ip_chkrpt = 1; /* correct */
155 else
156 pattrib->ip_chkrpt = 0; /* incorrect */
157 } else {
158 pattrib->tcpchk_valid = 0; /* invalid */
159 }
160 pattrib->mcs_rate = (u8)((le32_to_cpu(prxstat->rxdw3)) & 0x3f);
161 pattrib->htc = (u8)((le32_to_cpu(prxstat->rxdw3) >> 14) & 0x1);
162 /*Offset 16*/
163 /*Offset 20*/
164 /*phy_info*/
165 }
166
167 /*perform defrag*/
recvframe_defrag(struct _adapter * adapter,struct __queue * defrag_q)168 static union recv_frame *recvframe_defrag(struct _adapter *adapter,
169 struct __queue *defrag_q)
170 {
171 struct list_head *plist, *phead;
172 u8 wlanhdr_offset;
173 u8 curfragnum;
174 struct recv_frame_hdr *pfhdr, *pnfhdr;
175 union recv_frame *prframe, *pnextrframe;
176 struct __queue *pfree_recv_queue;
177
178 pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
179 phead = &defrag_q->queue;
180 plist = phead->next;
181 prframe = container_of(plist, union recv_frame, u.list);
182 list_del_init(&prframe->u.list);
183 pfhdr = &prframe->u.hdr;
184 curfragnum = 0;
185 if (curfragnum != pfhdr->attrib.frag_num) {
186 /*the first fragment number must be 0
187 *free the whole queue
188 */
189 r8712_free_recvframe(prframe, pfree_recv_queue);
190 r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
191 return NULL;
192 }
193 curfragnum++;
194 plist = &defrag_q->queue;
195 plist = plist->next;
196 while (!end_of_queue_search(phead, plist)) {
197 pnextrframe = container_of(plist, union recv_frame, u.list);
198 pnfhdr = &pnextrframe->u.hdr;
199 /*check the fragment sequence (2nd ~n fragment frame) */
200 if (curfragnum != pnfhdr->attrib.frag_num) {
201 /* the fragment number must increase (after decache)
202 * release the defrag_q & prframe
203 */
204 r8712_free_recvframe(prframe, pfree_recv_queue);
205 r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
206 return NULL;
207 }
208 curfragnum++;
209 /* copy the 2nd~n fragment frame's payload to the first fragment
210 * get the 2nd~last fragment frame's payload
211 */
212 wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
213 recvframe_pull(pnextrframe, wlanhdr_offset);
214 /* append to first fragment frame's tail (if privacy frame,
215 * pull the ICV)
216 */
217 recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
218 memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
219 recvframe_put(prframe, pnfhdr->len);
220 pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
221 plist = plist->next;
222 }
223 /* free the defrag_q queue and return the prframe */
224 r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
225 return prframe;
226 }
227
228 /* check if need to defrag, if needed queue the frame to defrag_q */
r8712_recvframe_chk_defrag(struct _adapter * padapter,union recv_frame * precv_frame)229 union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
230 union recv_frame *precv_frame)
231 {
232 u8 ismfrag;
233 u8 fragnum;
234 u8 *psta_addr;
235 struct recv_frame_hdr *pfhdr;
236 struct sta_info *psta;
237 struct sta_priv *pstapriv;
238 struct list_head *phead;
239 union recv_frame *prtnframe = NULL;
240 struct __queue *pfree_recv_queue, *pdefrag_q;
241
242 pstapriv = &padapter->stapriv;
243 pfhdr = &precv_frame->u.hdr;
244 pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
245 /* need to define struct of wlan header frame ctrl */
246 ismfrag = pfhdr->attrib.mfrag;
247 fragnum = pfhdr->attrib.frag_num;
248 psta_addr = pfhdr->attrib.ta;
249 psta = r8712_get_stainfo(pstapriv, psta_addr);
250 if (!psta)
251 pdefrag_q = NULL;
252 else
253 pdefrag_q = &psta->sta_recvpriv.defrag_q;
254
255 if ((ismfrag == 0) && (fragnum == 0))
256 prtnframe = precv_frame;/*isn't a fragment frame*/
257 if (ismfrag == 1) {
258 /* 0~(n-1) fragment frame
259 * enqueue to defraf_g
260 */
261 if (pdefrag_q) {
262 if (fragnum == 0) {
263 /*the first fragment*/
264 if (!list_empty(&pdefrag_q->queue)) {
265 /*free current defrag_q */
266 r8712_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
267 }
268 }
269 /* Then enqueue the 0~(n-1) fragment to the defrag_q */
270 phead = &pdefrag_q->queue;
271 list_add_tail(&pfhdr->list, phead);
272 prtnframe = NULL;
273 } else {
274 /* can't find this ta's defrag_queue, so free this
275 * recv_frame
276 */
277 r8712_free_recvframe(precv_frame, pfree_recv_queue);
278 prtnframe = NULL;
279 }
280 }
281 if ((ismfrag == 0) && (fragnum != 0)) {
282 /* the last fragment frame
283 * enqueue the last fragment
284 */
285 if (pdefrag_q) {
286 phead = &pdefrag_q->queue;
287 list_add_tail(&pfhdr->list, phead);
288 /*call recvframe_defrag to defrag*/
289 precv_frame = recvframe_defrag(padapter, pdefrag_q);
290 prtnframe = precv_frame;
291 } else {
292 /* can't find this ta's defrag_queue, so free this
293 * recv_frame
294 */
295 r8712_free_recvframe(precv_frame, pfree_recv_queue);
296 prtnframe = NULL;
297 }
298 }
299 if (prtnframe && (prtnframe->u.hdr.attrib.privacy)) {
300 /* after defrag we must check tkip mic code */
301 if (r8712_recvframe_chkmic(padapter, prtnframe) == _FAIL) {
302 r8712_free_recvframe(prtnframe, pfree_recv_queue);
303 prtnframe = NULL;
304 }
305 }
306 return prtnframe;
307 }
308
amsdu_to_msdu(struct _adapter * padapter,union recv_frame * prframe)309 static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
310 {
311 int a_len, padding_len;
312 u16 eth_type, nSubframe_Length;
313 u8 nr_subframes, i;
314 unsigned char *pdata;
315 struct rx_pkt_attrib *pattrib;
316 _pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
317 struct recv_priv *precvpriv = &padapter->recvpriv;
318 struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
319
320 nr_subframes = 0;
321 pattrib = &prframe->u.hdr.attrib;
322 recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
323 if (prframe->u.hdr.attrib.iv_len > 0)
324 recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
325 a_len = prframe->u.hdr.len;
326 pdata = prframe->u.hdr.rx_data;
327 while (a_len > ETH_HLEN) {
328 /* Offset 12 denote 2 mac address */
329 nSubframe_Length = *((u16 *)(pdata + 12));
330 /*==m==>change the length order*/
331 nSubframe_Length = (nSubframe_Length >> 8) +
332 (nSubframe_Length << 8);
333 if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
334 netdev_warn(padapter->pnetdev, "r8712u: nRemain_Length is %d and nSubframe_Length is: %d\n",
335 a_len, nSubframe_Length);
336 goto exit;
337 }
338 /* move the data point to data content */
339 pdata += ETH_HLEN;
340 a_len -= ETH_HLEN;
341 /* Allocate new skb for releasing to upper layer */
342 sub_skb = dev_alloc_skb(nSubframe_Length + 12);
343 if (!sub_skb)
344 break;
345 skb_reserve(sub_skb, 12);
346 skb_put_data(sub_skb, pdata, nSubframe_Length);
347 subframes[nr_subframes++] = sub_skb;
348 if (nr_subframes >= MAX_SUBFRAME_COUNT) {
349 netdev_warn(padapter->pnetdev, "r8712u: ParseSubframe(): Too many Subframes! Packets dropped!\n");
350 break;
351 }
352 pdata += nSubframe_Length;
353 a_len -= nSubframe_Length;
354 if (a_len != 0) {
355 padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & 3);
356 if (padding_len == 4)
357 padding_len = 0;
358 if (a_len < padding_len)
359 goto exit;
360 pdata += padding_len;
361 a_len -= padding_len;
362 }
363 }
364 for (i = 0; i < nr_subframes; i++) {
365 sub_skb = subframes[i];
366 /* convert hdr + possible LLC headers into Ethernet header */
367 eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7];
368 if (sub_skb->len >= 8 &&
369 ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
370 eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
371 !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
372 /* remove RFC1042 or Bridge-Tunnel encapsulation and
373 * replace EtherType
374 */
375 skb_pull(sub_skb, SNAP_SIZE);
376 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
377 ETH_ALEN);
378 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
379 ETH_ALEN);
380 } else {
381 __be16 len;
382 /* Leave Ethernet header part of hdr and full payload */
383 len = htons(sub_skb->len);
384 memcpy(skb_push(sub_skb, 2), &len, 2);
385 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
386 ETH_ALEN);
387 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
388 ETH_ALEN);
389 }
390 /* Indicate the packets to upper layer */
391 if (sub_skb) {
392 sub_skb->protocol =
393 eth_type_trans(sub_skb, padapter->pnetdev);
394 sub_skb->dev = padapter->pnetdev;
395 if ((pattrib->tcpchk_valid == 1) &&
396 (pattrib->tcp_chkrpt == 1)) {
397 sub_skb->ip_summed = CHECKSUM_UNNECESSARY;
398 } else {
399 sub_skb->ip_summed = CHECKSUM_NONE;
400 }
401 netif_rx(sub_skb);
402 }
403 }
404 exit:
405 prframe->u.hdr.len = 0;
406 r8712_free_recvframe(prframe, pfree_recv_queue);
407 }
408
r8712_rxcmd_event_hdl(struct _adapter * padapter,void * prxcmdbuf)409 void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
410 {
411 __le32 voffset;
412 u8 *poffset;
413 u16 cmd_len, drvinfo_sz;
414 struct recv_stat *prxstat;
415
416 poffset = prxcmdbuf;
417 voffset = *(__le32 *)poffset;
418 prxstat = prxcmdbuf;
419 drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
420 drvinfo_sz <<= 3;
421 poffset += RXDESC_SIZE + drvinfo_sz;
422 do {
423 voffset = *(__le32 *)poffset;
424 cmd_len = (u16)(le32_to_cpu(voffset) & 0xffff);
425 r8712_event_handle(padapter, (__le32 *)poffset);
426 poffset += (cmd_len + 8);/*8 bytes alignment*/
427 } while (le32_to_cpu(voffset) & BIT(31));
428 }
429
check_indicate_seq(struct recv_reorder_ctrl * preorder_ctrl,u16 seq_num)430 static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
431 u16 seq_num)
432 {
433 u8 wsize = preorder_ctrl->wsize_b;
434 u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) % 4096;
435
436 /* Rx Reorder initialize condition.*/
437 if (preorder_ctrl->indicate_seq == 0xffff)
438 preorder_ctrl->indicate_seq = seq_num;
439 /* Drop out the packet which SeqNum is smaller than WinStart */
440 if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
441 return false;
442 /*
443 * Sliding window manipulation. Conditions includes:
444 * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
445 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N
446 */
447 if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq))
448 preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq +
449 1) % 4096;
450 else if (SN_LESS(wend, seq_num)) {
451 if (seq_num >= (wsize - 1))
452 preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
453 else
454 preorder_ctrl->indicate_seq = 4095 - (wsize -
455 (seq_num + 1)) + 1;
456 }
457 return true;
458 }
459
enqueue_reorder_recvframe(struct recv_reorder_ctrl * preorder_ctrl,union recv_frame * prframe)460 static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
461 union recv_frame *prframe)
462 {
463 struct list_head *phead, *plist;
464 union recv_frame *pnextrframe;
465 struct rx_pkt_attrib *pnextattrib;
466 struct __queue *ppending_recvframe_queue =
467 &preorder_ctrl->pending_recvframe_queue;
468 struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
469
470 phead = &ppending_recvframe_queue->queue;
471 plist = phead->next;
472 while (!end_of_queue_search(phead, plist)) {
473 pnextrframe = container_of(plist, union recv_frame, u.list);
474 pnextattrib = &pnextrframe->u.hdr.attrib;
475
476 if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
477 return false;
478
479 if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
480 plist = plist->next;
481 else
482 break;
483 }
484 list_del_init(&prframe->u.hdr.list);
485 list_add_tail(&prframe->u.hdr.list, plist);
486 return true;
487 }
488
r8712_recv_indicatepkts_in_order(struct _adapter * padapter,struct recv_reorder_ctrl * preorder_ctrl,int bforced)489 int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
490 struct recv_reorder_ctrl *preorder_ctrl,
491 int bforced)
492 {
493 struct list_head *phead, *plist;
494 union recv_frame *prframe;
495 struct rx_pkt_attrib *pattrib;
496 int bPktInBuf = false;
497 struct __queue *ppending_recvframe_queue =
498 &preorder_ctrl->pending_recvframe_queue;
499
500 phead = &ppending_recvframe_queue->queue;
501 plist = phead->next;
502 /* Handling some condition for forced indicate case.*/
503 if (bforced) {
504 if (list_empty(phead))
505 return true;
506
507 prframe = container_of(plist, union recv_frame, u.list);
508 pattrib = &prframe->u.hdr.attrib;
509 preorder_ctrl->indicate_seq = pattrib->seq_num;
510 }
511 /* Prepare indication list and indication.
512 * Check if there is any packet need indicate.
513 */
514 while (!list_empty(phead)) {
515 prframe = container_of(plist, union recv_frame, u.list);
516 pattrib = &prframe->u.hdr.attrib;
517 if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
518 plist = plist->next;
519 list_del_init(&prframe->u.hdr.list);
520 if (SN_EQUAL(preorder_ctrl->indicate_seq,
521 pattrib->seq_num))
522 preorder_ctrl->indicate_seq =
523 (preorder_ctrl->indicate_seq + 1) % 4096;
524 /*indicate this recv_frame*/
525 if (!pattrib->amsdu) {
526 if (!padapter->driver_stopped &&
527 !padapter->surprise_removed) {
528 /* indicate this recv_frame */
529 r8712_recv_indicatepkt(padapter,
530 prframe);
531 }
532 } else if (pattrib->amsdu == 1) {
533 amsdu_to_msdu(padapter, prframe);
534 }
535 /* Update local variables. */
536 bPktInBuf = false;
537 } else {
538 bPktInBuf = true;
539 break;
540 }
541 }
542 return bPktInBuf;
543 }
544
recv_indicatepkt_reorder(struct _adapter * padapter,union recv_frame * prframe)545 static int recv_indicatepkt_reorder(struct _adapter *padapter,
546 union recv_frame *prframe)
547 {
548 unsigned long irql;
549 struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
550 struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
551 struct __queue *ppending_recvframe_queue =
552 &preorder_ctrl->pending_recvframe_queue;
553
554 if (!pattrib->amsdu) {
555 /* s1. */
556 r8712_wlanhdr_to_ethhdr(prframe);
557 if (pattrib->qos != 1) {
558 if (!padapter->driver_stopped &&
559 !padapter->surprise_removed) {
560 r8712_recv_indicatepkt(padapter, prframe);
561 return 0;
562 } else {
563 return -EINVAL;
564 }
565 }
566 }
567 spin_lock_irqsave(&ppending_recvframe_queue->lock, irql);
568 /*s2. check if winstart_b(indicate_seq) needs to be updated*/
569 if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num))
570 goto _err_exit;
571 /*s3. Insert all packet into Reorder Queue to maintain its ordering.*/
572 if (!enqueue_reorder_recvframe(preorder_ctrl, prframe))
573 goto _err_exit;
574 /*s4.
575 * Indication process.
576 * After Packet dropping and Sliding Window shifting as above, we can
577 * now just indicate the packets with the SeqNum smaller than latest
578 * WinStart and buffer other packets.
579 *
580 * For Rx Reorder condition:
581 * 1. All packets with SeqNum smaller than WinStart => Indicate
582 * 2. All packets with SeqNum larger than or equal to
583 * WinStart => Buffer it.
584 */
585 if (r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
586 mod_timer(&preorder_ctrl->reordering_ctrl_timer,
587 jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
588 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
589 } else {
590 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
591 del_timer(&preorder_ctrl->reordering_ctrl_timer);
592 }
593 return 0;
594 _err_exit:
595 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
596 return -ENOMEM;
597 }
598
r8712_reordering_ctrl_timeout_handler(void * pcontext)599 void r8712_reordering_ctrl_timeout_handler(void *pcontext)
600 {
601 unsigned long irql;
602 struct recv_reorder_ctrl *preorder_ctrl = pcontext;
603 struct _adapter *padapter = preorder_ctrl->padapter;
604 struct __queue *ppending_recvframe_queue =
605 &preorder_ctrl->pending_recvframe_queue;
606
607 if (padapter->driver_stopped || padapter->surprise_removed)
608 return;
609 spin_lock_irqsave(&ppending_recvframe_queue->lock, irql);
610 r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, true);
611 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
612 }
613
r8712_process_recv_indicatepkts(struct _adapter * padapter,union recv_frame * prframe)614 static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
615 union recv_frame *prframe)
616 {
617 int retval = _SUCCESS;
618 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
619 struct ht_priv *phtpriv = &pmlmepriv->htpriv;
620
621 if (phtpriv->ht_option == 1) { /*B/G/N Mode*/
622 if (recv_indicatepkt_reorder(padapter, prframe)) {
623 /* including perform A-MPDU Rx Ordering Buffer Control*/
624 if (!padapter->driver_stopped &&
625 !padapter->surprise_removed)
626 return _FAIL;
627 }
628 } else { /*B/G mode*/
629 retval = r8712_wlanhdr_to_ethhdr(prframe);
630 if (retval)
631 return _FAIL;
632 if (!padapter->driver_stopped && !padapter->surprise_removed) {
633 /* indicate this recv_frame */
634 r8712_recv_indicatepkt(padapter, prframe);
635 } else {
636 return _FAIL;
637 }
638 }
639 return retval;
640 }
641
query_rx_pwr_percentage(s8 antpower)642 static u8 query_rx_pwr_percentage(s8 antpower)
643 {
644 if ((antpower <= -100) || (antpower >= 20))
645 return 0;
646 else if (antpower >= 0)
647 return 100;
648 else
649 return 100 + antpower;
650 }
651
evm_db2percentage(s8 value)652 static u8 evm_db2percentage(s8 value)
653 {
654 /*
655 * -33dB~0dB to 0%~99%
656 */
657 s8 ret_val = clamp(-value, 0, 33) * 3;
658
659 if (ret_val == 99)
660 ret_val = 100;
661
662 return ret_val;
663 }
664
r8712_signal_scale_mapping(s32 cur_sig)665 s32 r8712_signal_scale_mapping(s32 cur_sig)
666 {
667 s32 ret_sig;
668
669 if (cur_sig >= 51 && cur_sig <= 100)
670 ret_sig = 100;
671 else if (cur_sig >= 41 && cur_sig <= 50)
672 ret_sig = 80 + ((cur_sig - 40) * 2);
673 else if (cur_sig >= 31 && cur_sig <= 40)
674 ret_sig = 66 + (cur_sig - 30);
675 else if (cur_sig >= 21 && cur_sig <= 30)
676 ret_sig = 54 + (cur_sig - 20);
677 else if (cur_sig >= 10 && cur_sig <= 20)
678 ret_sig = 42 + (((cur_sig - 10) * 2) / 3);
679 else if (cur_sig >= 5 && cur_sig <= 9)
680 ret_sig = 22 + (((cur_sig - 5) * 3) / 2);
681 else if (cur_sig >= 1 && cur_sig <= 4)
682 ret_sig = 6 + (((cur_sig - 1) * 3) / 2);
683 else
684 ret_sig = cur_sig;
685 return ret_sig;
686 }
687
translate2dbm(struct _adapter * padapter,u8 signal_strength_idx)688 static s32 translate2dbm(struct _adapter *padapter, u8 signal_strength_idx)
689 {
690 s32 signal_power; /* in dBm.*/
691 /* Translate to dBm (x=0.5y-95).*/
692 signal_power = (s32)((signal_strength_idx + 1) >> 1);
693 signal_power -= 95;
694 return signal_power;
695 }
696
query_rx_phy_status(struct _adapter * padapter,union recv_frame * prframe)697 static void query_rx_phy_status(struct _adapter *padapter,
698 union recv_frame *prframe)
699 {
700 u8 i, max_spatial_stream, evm;
701 struct recv_stat *prxstat = (struct recv_stat *)prframe->u.hdr.rx_head;
702 struct phy_stat *pphy_stat = (struct phy_stat *)(prxstat + 1);
703 u8 *pphy_head = (u8 *)(prxstat + 1);
704 s8 rx_pwr[4], rx_pwr_all;
705 u8 pwdb_all;
706 u32 rssi, total_rssi = 0;
707 u8 bcck_rate = 0, rf_rx_num = 0, cck_highpwr = 0;
708 struct phy_cck_rx_status *pcck_buf;
709 u8 sq;
710
711 /* Record it for next packet processing*/
712 bcck_rate = (prframe->u.hdr.attrib.mcs_rate <= 3 ? 1 : 0);
713 if (bcck_rate) {
714 u8 report;
715
716 /* CCK Driver info Structure is not the same as OFDM packet.*/
717 pcck_buf = (struct phy_cck_rx_status *)pphy_stat;
718 /* (1)Hardware does not provide RSSI for CCK
719 * (2)PWDB, Average PWDB calculated by hardware
720 * (for rate adaptive)
721 */
722 if (!cck_highpwr) {
723 report = pcck_buf->cck_agc_rpt & 0xc0;
724 report >>= 6;
725 switch (report) {
726 /* Modify the RF RNA gain value to -40, -20,
727 * -2, 14 by Jenyu's suggestion
728 * Note: different RF with the different
729 * RNA gain.
730 */
731 case 0x3:
732 rx_pwr_all = -40 - (pcck_buf->cck_agc_rpt &
733 0x3e);
734 break;
735 case 0x2:
736 rx_pwr_all = -20 - (pcck_buf->cck_agc_rpt &
737 0x3e);
738 break;
739 case 0x1:
740 rx_pwr_all = -2 - (pcck_buf->cck_agc_rpt &
741 0x3e);
742 break;
743 case 0x0:
744 rx_pwr_all = 14 - (pcck_buf->cck_agc_rpt &
745 0x3e);
746 break;
747 }
748 } else {
749 report = ((u8)(le32_to_cpu(pphy_stat->phydw1) >> 8)) &
750 0x60;
751 report >>= 5;
752 switch (report) {
753 case 0x3:
754 rx_pwr_all = -40 - ((pcck_buf->cck_agc_rpt &
755 0x1f) << 1);
756 break;
757 case 0x2:
758 rx_pwr_all = -20 - ((pcck_buf->cck_agc_rpt &
759 0x1f) << 1);
760 break;
761 case 0x1:
762 rx_pwr_all = -2 - ((pcck_buf->cck_agc_rpt &
763 0x1f) << 1);
764 break;
765 case 0x0:
766 rx_pwr_all = 14 - ((pcck_buf->cck_agc_rpt &
767 0x1f) << 1);
768 break;
769 }
770 }
771 pwdb_all = query_rx_pwr_percentage(rx_pwr_all);
772 /* CCK gain is smaller than OFDM/MCS gain,*/
773 /* so we add gain diff by experiences, the val is 6 */
774 pwdb_all += 6;
775 if (pwdb_all > 100)
776 pwdb_all = 100;
777 /* modify the offset to make the same gain index with OFDM.*/
778 if (pwdb_all > 34 && pwdb_all <= 42)
779 pwdb_all -= 2;
780 else if (pwdb_all > 26 && pwdb_all <= 34)
781 pwdb_all -= 6;
782 else if (pwdb_all > 14 && pwdb_all <= 26)
783 pwdb_all -= 8;
784 else if (pwdb_all > 4 && pwdb_all <= 14)
785 pwdb_all -= 4;
786 /*
787 * (3) Get Signal Quality (EVM)
788 */
789 if (pwdb_all > 40) {
790 sq = 100;
791 } else {
792 sq = pcck_buf->sq_rpt;
793 if (pcck_buf->sq_rpt > 64)
794 sq = 0;
795 else if (pcck_buf->sq_rpt < 20)
796 sq = 100;
797 else
798 sq = ((64 - sq) * 100) / 44;
799 }
800 prframe->u.hdr.attrib.signal_qual = sq;
801 prframe->u.hdr.attrib.rx_mimo_signal_qual[0] = sq;
802 prframe->u.hdr.attrib.rx_mimo_signal_qual[1] = -1;
803 } else {
804 /* (1)Get RSSI for HT rate */
805 for (i = 0; i < ((padapter->registrypriv.rf_config) &
806 0x0f); i++) {
807 rf_rx_num++;
808 rx_pwr[i] = ((pphy_head[PHY_STAT_GAIN_TRSW_SHT + i]
809 & 0x3F) * 2) - 110;
810 /* Translate DBM to percentage. */
811 rssi = query_rx_pwr_percentage(rx_pwr[i]);
812 total_rssi += rssi;
813 }
814 /* (2)PWDB, Average PWDB calculated by hardware (for
815 * rate adaptive)
816 */
817 rx_pwr_all = (((pphy_head[PHY_STAT_PWDB_ALL_SHT]) >> 1) & 0x7f)
818 - 106;
819 pwdb_all = query_rx_pwr_percentage(rx_pwr_all);
820
821 {
822 /* (3)EVM of HT rate */
823 if (prframe->u.hdr.attrib.htc &&
824 prframe->u.hdr.attrib.mcs_rate >= 20 &&
825 prframe->u.hdr.attrib.mcs_rate <= 27) {
826 /* both spatial stream make sense */
827 max_spatial_stream = 2;
828 } else {
829 /* only spatial stream 1 makes sense */
830 max_spatial_stream = 1;
831 }
832 for (i = 0; i < max_spatial_stream; i++) {
833 evm = evm_db2percentage((pphy_head
834 [PHY_STAT_RXEVM_SHT + i]));/*dbm*/
835 prframe->u.hdr.attrib.signal_qual =
836 (u8)(evm & 0xff);
837 prframe->u.hdr.attrib.rx_mimo_signal_qual[i] =
838 (u8)(evm & 0xff);
839 }
840 }
841 }
842 /* UI BSS List signal strength(in percentage), make it good looking,
843 * from 0~100. It is assigned to the BSS List in
844 * GetValueFromBeaconOrProbeRsp().
845 */
846 if (bcck_rate) {
847 prframe->u.hdr.attrib.signal_strength =
848 (u8)r8712_signal_scale_mapping(pwdb_all);
849 } else {
850 if (rf_rx_num != 0)
851 prframe->u.hdr.attrib.signal_strength =
852 (u8)(r8712_signal_scale_mapping(total_rssi /=
853 rf_rx_num));
854 }
855 }
856
process_link_qual(struct _adapter * padapter,union recv_frame * prframe)857 static void process_link_qual(struct _adapter *padapter,
858 union recv_frame *prframe)
859 {
860 u32 last_evm = 0, avg_val;
861 struct rx_pkt_attrib *pattrib;
862 struct smooth_rssi_data *sqd = &padapter->recvpriv.signal_qual_data;
863
864 if (!prframe || !padapter)
865 return;
866 pattrib = &prframe->u.hdr.attrib;
867 if (pattrib->signal_qual != 0) {
868 /*
869 * 1. Record the general EVM to the sliding window.
870 */
871 if (sqd->total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) {
872 sqd->total_num = PHY_LINKQUALITY_SLID_WIN_MAX;
873 last_evm = sqd->elements[sqd->index];
874 sqd->total_val -= last_evm;
875 }
876 sqd->total_val += pattrib->signal_qual;
877 sqd->elements[sqd->index++] = pattrib->signal_qual;
878 if (sqd->index >= PHY_LINKQUALITY_SLID_WIN_MAX)
879 sqd->index = 0;
880
881 /* <1> Showed on UI for user, in percentage. */
882 avg_val = sqd->total_val / sqd->total_num;
883 padapter->recvpriv.signal = (u8)avg_val;
884 }
885 }
886
process_rssi(struct _adapter * padapter,union recv_frame * prframe)887 static void process_rssi(struct _adapter *padapter, union recv_frame *prframe)
888 {
889 u32 last_rssi, tmp_val;
890 struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
891 struct smooth_rssi_data *ssd = &padapter->recvpriv.signal_strength_data;
892
893 if (ssd->total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
894 ssd->total_num = PHY_RSSI_SLID_WIN_MAX;
895 last_rssi = ssd->elements[ssd->index];
896 ssd->total_val -= last_rssi;
897 }
898 ssd->total_val += pattrib->signal_strength;
899 ssd->elements[ssd->index++] = pattrib->signal_strength;
900 if (ssd->index >= PHY_RSSI_SLID_WIN_MAX)
901 ssd->index = 0;
902 tmp_val = ssd->total_val / ssd->total_num;
903 padapter->recvpriv.rssi = (s8)translate2dbm(padapter, (u8)tmp_val);
904 }
905
process_phy_info(struct _adapter * padapter,union recv_frame * prframe)906 static void process_phy_info(struct _adapter *padapter,
907 union recv_frame *prframe)
908 {
909 query_rx_phy_status(padapter, prframe);
910 process_rssi(padapter, prframe);
911 process_link_qual(padapter, prframe);
912 }
913
recv_func(struct _adapter * padapter,void * pcontext)914 int recv_func(struct _adapter *padapter, void *pcontext)
915 {
916 struct rx_pkt_attrib *pattrib;
917 union recv_frame *prframe, *orig_prframe;
918 int retval = _SUCCESS;
919 struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
920 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
921
922 prframe = pcontext;
923 orig_prframe = prframe;
924 pattrib = &prframe->u.hdr.attrib;
925 if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
926 if (pattrib->crc_err == 1)
927 padapter->mppriv.rx_crcerrpktcount++;
928 else
929 padapter->mppriv.rx_pktcount++;
930 if (!check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE)) {
931 /* free this recv_frame */
932 r8712_free_recvframe(orig_prframe, pfree_recv_queue);
933 goto _exit_recv_func;
934 }
935 }
936 /* check the frame crtl field and decache */
937 retval = r8712_validate_recv_frame(padapter, prframe);
938 if (retval != _SUCCESS) {
939 /* free this recv_frame */
940 r8712_free_recvframe(orig_prframe, pfree_recv_queue);
941 goto _exit_recv_func;
942 }
943 process_phy_info(padapter, prframe);
944 prframe = r8712_decryptor(padapter, prframe);
945 if (!prframe) {
946 retval = _FAIL;
947 goto _exit_recv_func;
948 }
949 prframe = r8712_recvframe_chk_defrag(padapter, prframe);
950 if (!prframe)
951 goto _exit_recv_func;
952 prframe = r8712_portctrl(padapter, prframe);
953 if (!prframe) {
954 retval = _FAIL;
955 goto _exit_recv_func;
956 }
957 retval = r8712_process_recv_indicatepkts(padapter, prframe);
958 if (retval != _SUCCESS) {
959 r8712_free_recvframe(orig_prframe, pfree_recv_queue);
960 goto _exit_recv_func;
961 }
962 _exit_recv_func:
963 return retval;
964 }
965
recvbuf2recvframe(struct _adapter * padapter,struct sk_buff * pskb)966 static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
967 {
968 u8 *pbuf, shift_sz = 0;
969 u8 frag, mf;
970 uint pkt_len;
971 u32 transfer_len;
972 struct recv_stat *prxstat;
973 u16 pkt_cnt, drvinfo_sz, pkt_offset, tmp_len, alloc_sz;
974 struct __queue *pfree_recv_queue;
975 _pkt *pkt_copy = NULL;
976 union recv_frame *precvframe = NULL;
977 struct recv_priv *precvpriv = &padapter->recvpriv;
978
979 pfree_recv_queue = &precvpriv->free_recv_queue;
980 pbuf = pskb->data;
981 prxstat = (struct recv_stat *)pbuf;
982 pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
983 pkt_len = le32_to_cpu(prxstat->rxdw0) & 0x00003fff;
984 transfer_len = pskb->len;
985 /* Test throughput with Netgear 3700 (No security) with Chariot 3T3R
986 * pairs. The packet count will be a big number so that the containing
987 * packet will effect the Rx reordering.
988 */
989 if (transfer_len < pkt_len) {
990 /* In this case, it means the MAX_RECVBUF_SZ is too small to
991 * get the data from 8712u.
992 */
993 return;
994 }
995 do {
996 prxstat = (struct recv_stat *)pbuf;
997 pkt_len = le32_to_cpu(prxstat->rxdw0) & 0x00003fff;
998 /* more fragment bit */
999 mf = (le32_to_cpu(prxstat->rxdw1) >> 27) & 0x1;
1000 /* ragmentation number */
1001 frag = (le32_to_cpu(prxstat->rxdw2) >> 12) & 0xf;
1002 /* uint 2^3 = 8 bytes */
1003 drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
1004 drvinfo_sz <<= 3;
1005 if (pkt_len <= 0)
1006 return;
1007 /* Qos data, wireless lan header length is 26 */
1008 if ((le32_to_cpu(prxstat->rxdw0) >> 23) & 0x01)
1009 shift_sz = 2;
1010 precvframe = r8712_alloc_recvframe(pfree_recv_queue);
1011 if (!precvframe)
1012 return;
1013 INIT_LIST_HEAD(&precvframe->u.hdr.list);
1014 precvframe->u.hdr.precvbuf = NULL; /*can't access the precvbuf*/
1015 precvframe->u.hdr.len = 0;
1016 tmp_len = pkt_len + drvinfo_sz + RXDESC_SIZE;
1017 pkt_offset = (u16)round_up(tmp_len, 128);
1018 /* for first fragment packet, driver need allocate 1536 +
1019 * drvinfo_sz + RXDESC_SIZE to defrag packet.
1020 */
1021 if ((mf == 1) && (frag == 0))
1022 /*1658+6=1664, 1664 is 128 alignment.*/
1023 alloc_sz = max_t(u16, tmp_len, 1658);
1024 else
1025 alloc_sz = tmp_len;
1026 /* 2 is for IP header 4 bytes alignment in QoS packet case.
1027 * 4 is for skb->data 4 bytes alignment.
1028 */
1029 alloc_sz += 6;
1030 pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
1031 if (!pkt_copy)
1032 return;
1033
1034 precvframe->u.hdr.pkt = pkt_copy;
1035 skb_reserve(pkt_copy, 4 - ((addr_t)(pkt_copy->data) % 4));
1036 skb_reserve(pkt_copy, shift_sz);
1037 memcpy(pkt_copy->data, pbuf, tmp_len);
1038 precvframe->u.hdr.rx_head = pkt_copy->data;
1039 precvframe->u.hdr.rx_data = pkt_copy->data;
1040 precvframe->u.hdr.rx_tail = pkt_copy->data;
1041 precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
1042
1043 recvframe_put(precvframe, tmp_len);
1044 recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE);
1045 /* because the endian issue, driver avoid reference to the
1046 * rxstat after calling update_recvframe_attrib_from_recvstat();
1047 */
1048 update_recvframe_attrib_from_recvstat(&precvframe->u.hdr.attrib,
1049 prxstat);
1050 r8712_recv_entry(precvframe);
1051 transfer_len -= pkt_offset;
1052 pbuf += pkt_offset;
1053 pkt_cnt--;
1054 precvframe = NULL;
1055 pkt_copy = NULL;
1056 } while ((transfer_len > 0) && pkt_cnt > 0);
1057 }
1058
recv_tasklet(struct tasklet_struct * t)1059 static void recv_tasklet(struct tasklet_struct *t)
1060 {
1061 struct sk_buff *pskb;
1062 struct _adapter *padapter = from_tasklet(padapter, t,
1063 recvpriv.recv_tasklet);
1064 struct recv_priv *precvpriv = &padapter->recvpriv;
1065
1066 while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
1067 recvbuf2recvframe(padapter, pskb);
1068 skb_reset_tail_pointer(pskb);
1069 pskb->len = 0;
1070 if (!skb_cloned(pskb))
1071 skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
1072 else
1073 consume_skb(pskb);
1074 }
1075 }
1076