xref: /freebsd/sys/contrib/dev/broadcom/brcm80211/brcmfmac/msgbuf.c (revision 902136e0fe112383ec64d2ef43a446063b5e6417)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2014 Broadcom Corporation
4  */
5 
6 /*******************************************************************************
7  * Communicates with the dongle by using dcmd codes.
8  * For certain dcmd codes, the dongle interprets string data from the host.
9  ******************************************************************************/
10 
11 #include <linux/types.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #if defined(__FreeBSD__)
15 #include <linux/delay.h>
16 #ifdef DEBUG
17 #include <linux/seq_file.h>
18 #endif
19 #endif
20 
21 #include <brcmu_utils.h>
22 #include <brcmu_wifi.h>
23 
24 #include "core.h"
25 #include "debug.h"
26 #include "proto.h"
27 #include "msgbuf.h"
28 #include "commonring.h"
29 #include "flowring.h"
30 #include "bus.h"
31 #include "tracepoint.h"
32 
33 
34 #define MSGBUF_IOCTL_RESP_TIMEOUT		msecs_to_jiffies(2000)
35 
36 #define MSGBUF_TYPE_GEN_STATUS			0x1
37 #define MSGBUF_TYPE_RING_STATUS			0x2
38 #define MSGBUF_TYPE_FLOW_RING_CREATE		0x3
39 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT	0x4
40 #define MSGBUF_TYPE_FLOW_RING_DELETE		0x5
41 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT	0x6
42 #define MSGBUF_TYPE_FLOW_RING_FLUSH		0x7
43 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT	0x8
44 #define MSGBUF_TYPE_IOCTLPTR_REQ		0x9
45 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK		0xA
46 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST		0xB
47 #define MSGBUF_TYPE_IOCTL_CMPLT			0xC
48 #define MSGBUF_TYPE_EVENT_BUF_POST		0xD
49 #define MSGBUF_TYPE_WL_EVENT			0xE
50 #define MSGBUF_TYPE_TX_POST			0xF
51 #define MSGBUF_TYPE_TX_STATUS			0x10
52 #define MSGBUF_TYPE_RXBUF_POST			0x11
53 #define MSGBUF_TYPE_RX_CMPLT			0x12
54 #define MSGBUF_TYPE_LPBK_DMAXFER		0x13
55 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT		0x14
56 
57 #define NR_TX_PKTIDS				2048
58 #define NR_RX_PKTIDS				1024
59 
60 #define BRCMF_IOCTL_REQ_PKTID			0xFFFE
61 
62 #define BRCMF_MSGBUF_MAX_PKT_SIZE		2048
63 #define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE           8192
64 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD	32
65 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST	8
66 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST		8
67 
68 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3	0x01
69 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11	0x02
70 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK	0x07
71 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT	5
72 
73 #define BRCMF_MSGBUF_TX_FLUSH_CNT1		32
74 #define BRCMF_MSGBUF_TX_FLUSH_CNT2		96
75 
76 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS	96
77 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS	32
78 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS		48
79 
80 #define BRCMF_MAX_TXSTATUS_WAIT_RETRIES		10
81 
82 struct msgbuf_common_hdr {
83 	u8				msgtype;
84 	u8				ifidx;
85 	u8				flags;
86 	u8				rsvd0;
87 	__le32				request_id;
88 };
89 
90 struct msgbuf_ioctl_req_hdr {
91 	struct msgbuf_common_hdr	msg;
92 	__le32				cmd;
93 	__le16				trans_id;
94 	__le16				input_buf_len;
95 	__le16				output_buf_len;
96 	__le16				rsvd0[3];
97 	struct msgbuf_buf_addr		req_buf_addr;
98 	__le32				rsvd1[2];
99 };
100 
101 struct msgbuf_tx_msghdr {
102 	struct msgbuf_common_hdr	msg;
103 	u8				txhdr[ETH_HLEN];
104 	u8				flags;
105 	u8				seg_cnt;
106 	struct msgbuf_buf_addr		metadata_buf_addr;
107 	struct msgbuf_buf_addr		data_buf_addr;
108 	__le16				metadata_buf_len;
109 	__le16				data_len;
110 	__le32				rsvd0;
111 };
112 
113 struct msgbuf_rx_bufpost {
114 	struct msgbuf_common_hdr	msg;
115 	__le16				metadata_buf_len;
116 	__le16				data_buf_len;
117 	__le32				rsvd0;
118 	struct msgbuf_buf_addr		metadata_buf_addr;
119 	struct msgbuf_buf_addr		data_buf_addr;
120 };
121 
122 struct msgbuf_rx_ioctl_resp_or_event {
123 	struct msgbuf_common_hdr	msg;
124 	__le16				host_buf_len;
125 	__le16				rsvd0[3];
126 	struct msgbuf_buf_addr		host_buf_addr;
127 	__le32				rsvd1[4];
128 };
129 
130 struct msgbuf_completion_hdr {
131 	__le16				status;
132 	__le16				flow_ring_id;
133 };
134 
135 /* Data struct for the MSGBUF_TYPE_GEN_STATUS */
136 struct msgbuf_gen_status {
137 	struct msgbuf_common_hdr	msg;
138 	struct msgbuf_completion_hdr	compl_hdr;
139 	__le16				write_idx;
140 	__le32				rsvd0[3];
141 };
142 
143 /* Data struct for the MSGBUF_TYPE_RING_STATUS */
144 struct msgbuf_ring_status {
145 	struct msgbuf_common_hdr	msg;
146 	struct msgbuf_completion_hdr	compl_hdr;
147 	__le16				write_idx;
148 	__le16				rsvd0[5];
149 };
150 
151 struct msgbuf_rx_event {
152 	struct msgbuf_common_hdr	msg;
153 	struct msgbuf_completion_hdr	compl_hdr;
154 	__le16				event_data_len;
155 	__le16				seqnum;
156 	__le16				rsvd0[4];
157 };
158 
159 struct msgbuf_ioctl_resp_hdr {
160 	struct msgbuf_common_hdr	msg;
161 	struct msgbuf_completion_hdr	compl_hdr;
162 	__le16				resp_len;
163 	__le16				trans_id;
164 	__le32				cmd;
165 	__le32				rsvd0;
166 };
167 
168 struct msgbuf_tx_status {
169 	struct msgbuf_common_hdr	msg;
170 	struct msgbuf_completion_hdr	compl_hdr;
171 	__le16				metadata_len;
172 	__le16				tx_status;
173 };
174 
175 struct msgbuf_rx_complete {
176 	struct msgbuf_common_hdr	msg;
177 	struct msgbuf_completion_hdr	compl_hdr;
178 	__le16				metadata_len;
179 	__le16				data_len;
180 	__le16				data_offset;
181 	__le16				flags;
182 	__le32				rx_status_0;
183 	__le32				rx_status_1;
184 	__le32				rsvd0;
185 };
186 
187 struct msgbuf_tx_flowring_create_req {
188 	struct msgbuf_common_hdr	msg;
189 	u8				da[ETH_ALEN];
190 	u8				sa[ETH_ALEN];
191 	u8				tid;
192 	u8				if_flags;
193 	__le16				flow_ring_id;
194 	u8				tc;
195 	u8				priority;
196 	__le16				int_vector;
197 	__le16				max_items;
198 	__le16				len_item;
199 	struct msgbuf_buf_addr		flow_ring_addr;
200 };
201 
202 struct msgbuf_tx_flowring_delete_req {
203 	struct msgbuf_common_hdr	msg;
204 	__le16				flow_ring_id;
205 	__le16				reason;
206 	__le32				rsvd0[7];
207 };
208 
209 struct msgbuf_flowring_create_resp {
210 	struct msgbuf_common_hdr	msg;
211 	struct msgbuf_completion_hdr	compl_hdr;
212 	__le32				rsvd0[3];
213 };
214 
215 struct msgbuf_flowring_delete_resp {
216 	struct msgbuf_common_hdr	msg;
217 	struct msgbuf_completion_hdr	compl_hdr;
218 	__le32				rsvd0[3];
219 };
220 
221 struct msgbuf_flowring_flush_resp {
222 	struct msgbuf_common_hdr	msg;
223 	struct msgbuf_completion_hdr	compl_hdr;
224 	__le32				rsvd0[3];
225 };
226 
227 struct brcmf_msgbuf_work_item {
228 	struct list_head queue;
229 	u32 flowid;
230 	int ifidx;
231 	u8 sa[ETH_ALEN];
232 	u8 da[ETH_ALEN];
233 };
234 
235 struct brcmf_msgbuf {
236 	struct brcmf_pub *drvr;
237 
238 	struct brcmf_commonring **commonrings;
239 	struct brcmf_commonring **flowrings;
240 	dma_addr_t *flowring_dma_handle;
241 
242 	u16 max_flowrings;
243 	u16 max_submissionrings;
244 	u16 max_completionrings;
245 
246 	u16 rx_dataoffset;
247 	u32 max_rxbufpost;
248 	u16 rx_metadata_offset;
249 	u32 rxbufpost;
250 
251 	u32 max_ioctlrespbuf;
252 	u32 cur_ioctlrespbuf;
253 	u32 max_eventbuf;
254 	u32 cur_eventbuf;
255 
256 	void *ioctbuf;
257 	dma_addr_t ioctbuf_handle;
258 	u32 ioctbuf_phys_hi;
259 	u32 ioctbuf_phys_lo;
260 	int ioctl_resp_status;
261 	u32 ioctl_resp_ret_len;
262 	u32 ioctl_resp_pktid;
263 
264 	u16 data_seq_no;
265 	u16 ioctl_seq_no;
266 	u32 reqid;
267 	wait_queue_head_t ioctl_resp_wait;
268 	bool ctl_completed;
269 
270 	struct brcmf_msgbuf_pktids *tx_pktids;
271 	struct brcmf_msgbuf_pktids *rx_pktids;
272 	struct brcmf_flowring *flow;
273 
274 	struct workqueue_struct *txflow_wq;
275 	struct work_struct txflow_work;
276 	unsigned long *flow_map;
277 	unsigned long *txstatus_done_map;
278 
279 	struct work_struct flowring_work;
280 	spinlock_t flowring_work_lock;
281 	struct list_head work_queue;
282 };
283 
284 struct brcmf_msgbuf_pktid {
285 	atomic_t  allocated;
286 	u16 data_offset;
287 	struct sk_buff *skb;
288 	dma_addr_t physaddr;
289 };
290 
291 struct brcmf_msgbuf_pktids {
292 	u32 array_size;
293 	u32 last_allocated_idx;
294 	enum dma_data_direction direction;
295 	struct brcmf_msgbuf_pktid *array;
296 };
297 
298 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
299 
300 
301 static struct brcmf_msgbuf_pktids *
brcmf_msgbuf_init_pktids(u32 nr_array_entries,enum dma_data_direction direction)302 brcmf_msgbuf_init_pktids(u32 nr_array_entries,
303 			 enum dma_data_direction direction)
304 {
305 	struct brcmf_msgbuf_pktid *array;
306 	struct brcmf_msgbuf_pktids *pktids;
307 
308 	array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
309 	if (!array)
310 		return NULL;
311 
312 	pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
313 	if (!pktids) {
314 		kfree(array);
315 		return NULL;
316 	}
317 	pktids->array = array;
318 	pktids->array_size = nr_array_entries;
319 
320 	return pktids;
321 }
322 
323 
324 static int
brcmf_msgbuf_alloc_pktid(struct device * dev,struct brcmf_msgbuf_pktids * pktids,struct sk_buff * skb,u16 data_offset,dma_addr_t * physaddr,u32 * idx)325 brcmf_msgbuf_alloc_pktid(struct device *dev,
326 			 struct brcmf_msgbuf_pktids *pktids,
327 			 struct sk_buff *skb, u16 data_offset,
328 			 dma_addr_t *physaddr, u32 *idx)
329 {
330 	struct brcmf_msgbuf_pktid *array;
331 	u32 count;
332 
333 	array = pktids->array;
334 
335 	*physaddr = dma_map_single(dev, skb->data + data_offset,
336 				   skb->len - data_offset, pktids->direction);
337 
338 	if (dma_mapping_error(dev, *physaddr)) {
339 		brcmf_err("dma_map_single failed !!\n");
340 		return -ENOMEM;
341 	}
342 
343 	*idx = pktids->last_allocated_idx;
344 
345 	count = 0;
346 	do {
347 		(*idx)++;
348 		if (*idx == pktids->array_size)
349 			*idx = 0;
350 		if (array[*idx].allocated.counter == 0)
351 			if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
352 				break;
353 		count++;
354 	} while (count < pktids->array_size);
355 
356 	if (count == pktids->array_size) {
357 		dma_unmap_single(dev, *physaddr, skb->len - data_offset,
358 				 pktids->direction);
359 		return -ENOMEM;
360 	}
361 
362 	array[*idx].data_offset = data_offset;
363 	array[*idx].physaddr = *physaddr;
364 	array[*idx].skb = skb;
365 
366 	pktids->last_allocated_idx = *idx;
367 
368 	return 0;
369 }
370 
371 
372 static struct sk_buff *
brcmf_msgbuf_get_pktid(struct device * dev,struct brcmf_msgbuf_pktids * pktids,u32 idx)373 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
374 		       u32 idx)
375 {
376 	struct brcmf_msgbuf_pktid *pktid;
377 	struct sk_buff *skb;
378 
379 	if (idx >= pktids->array_size) {
380 		brcmf_err("Invalid packet id %d (max %d)\n", idx,
381 			  pktids->array_size);
382 		return NULL;
383 	}
384 	if (pktids->array[idx].allocated.counter) {
385 		pktid = &pktids->array[idx];
386 		dma_unmap_single(dev, pktid->physaddr,
387 				 pktid->skb->len - pktid->data_offset,
388 				 pktids->direction);
389 		skb = pktid->skb;
390 		pktid->allocated.counter = 0;
391 		return skb;
392 	} else {
393 		brcmf_err("Invalid packet id %d (not in use)\n", idx);
394 	}
395 
396 	return NULL;
397 }
398 
399 
400 static void
brcmf_msgbuf_release_array(struct device * dev,struct brcmf_msgbuf_pktids * pktids)401 brcmf_msgbuf_release_array(struct device *dev,
402 			   struct brcmf_msgbuf_pktids *pktids)
403 {
404 	struct brcmf_msgbuf_pktid *array;
405 	struct brcmf_msgbuf_pktid *pktid;
406 	u32 count;
407 
408 	array = pktids->array;
409 	count = 0;
410 	do {
411 		if (array[count].allocated.counter) {
412 			pktid = &array[count];
413 			dma_unmap_single(dev, pktid->physaddr,
414 					 pktid->skb->len - pktid->data_offset,
415 					 pktids->direction);
416 			brcmu_pkt_buf_free_skb(pktid->skb);
417 		}
418 		count++;
419 	} while (count < pktids->array_size);
420 
421 	kfree(array);
422 	kfree(pktids);
423 }
424 
425 
brcmf_msgbuf_release_pktids(struct brcmf_msgbuf * msgbuf)426 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
427 {
428 	if (msgbuf->rx_pktids)
429 		brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
430 					   msgbuf->rx_pktids);
431 	if (msgbuf->tx_pktids)
432 		brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
433 					   msgbuf->tx_pktids);
434 }
435 
436 
brcmf_msgbuf_tx_ioctl(struct brcmf_pub * drvr,int ifidx,uint cmd,void * buf,uint len)437 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
438 #if defined(__linux__)
439 				 uint cmd, void *buf, uint len)
440 #elif defined(__FreeBSD__)
441 				 uint cmd, const void *buf, uint len)
442 #endif
443 {
444 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
445 	struct brcmf_commonring *commonring;
446 	struct msgbuf_ioctl_req_hdr *request;
447 	u16 buf_len;
448 	void *ret_ptr;
449 	int err;
450 
451 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
452 	brcmf_commonring_lock(commonring);
453 	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
454 	if (!ret_ptr) {
455 		bphy_err(drvr, "Failed to reserve space in commonring\n");
456 		brcmf_commonring_unlock(commonring);
457 		return -ENOMEM;
458 	}
459 
460 	msgbuf->reqid++;
461 
462 	request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
463 	request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
464 	request->msg.ifidx = (u8)ifidx;
465 	request->msg.flags = 0;
466 	request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
467 	request->cmd = cpu_to_le32(cmd);
468 	request->output_buf_len = cpu_to_le16(len);
469 	request->trans_id = cpu_to_le16(msgbuf->reqid);
470 
471 	buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
472 	request->input_buf_len = cpu_to_le16(buf_len);
473 	request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
474 	request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
475 	if (buf)
476 		memcpy(msgbuf->ioctbuf, buf, buf_len);
477 	else
478 		memset(msgbuf->ioctbuf, 0, buf_len);
479 
480 	err = brcmf_commonring_write_complete(commonring);
481 	brcmf_commonring_unlock(commonring);
482 
483 	return err;
484 }
485 
486 
brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf * msgbuf)487 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
488 {
489 	return wait_event_timeout(msgbuf->ioctl_resp_wait,
490 				  msgbuf->ctl_completed,
491 				  MSGBUF_IOCTL_RESP_TIMEOUT);
492 }
493 
494 
brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf * msgbuf)495 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
496 {
497 	msgbuf->ctl_completed = true;
498 	wake_up(&msgbuf->ioctl_resp_wait);
499 }
500 
501 
brcmf_msgbuf_query_dcmd(struct brcmf_pub * drvr,int ifidx,uint cmd,void * buf,uint len,int * fwerr)502 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
503 				   uint cmd, void *buf, uint len, int *fwerr)
504 {
505 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
506 	struct sk_buff *skb = NULL;
507 	int timeout;
508 	int err;
509 
510 	brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
511 	*fwerr = 0;
512 	msgbuf->ctl_completed = false;
513 	err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
514 	if (err)
515 		return err;
516 
517 	timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
518 	if (!timeout) {
519 		bphy_err(drvr, "Timeout on response for query command\n");
520 		return -EIO;
521 	}
522 
523 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
524 				     msgbuf->rx_pktids,
525 				     msgbuf->ioctl_resp_pktid);
526 	if (msgbuf->ioctl_resp_ret_len != 0) {
527 		if (!skb)
528 			return -EBADF;
529 
530 		memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
531 				       len : msgbuf->ioctl_resp_ret_len);
532 	}
533 	brcmu_pkt_buf_free_skb(skb);
534 
535 	*fwerr = msgbuf->ioctl_resp_status;
536 	return 0;
537 }
538 
539 
brcmf_msgbuf_set_dcmd(struct brcmf_pub * drvr,int ifidx,uint cmd,void * buf,uint len,int * fwerr)540 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
541 				 uint cmd, void *buf, uint len, int *fwerr)
542 {
543 	return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
544 }
545 
546 
brcmf_msgbuf_hdrpull(struct brcmf_pub * drvr,bool do_fws,struct sk_buff * skb,struct brcmf_if ** ifp)547 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
548 				struct sk_buff *skb, struct brcmf_if **ifp)
549 {
550 	return -ENODEV;
551 }
552 
brcmf_msgbuf_rxreorder(struct brcmf_if * ifp,struct sk_buff * skb)553 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
554 {
555 }
556 
557 static void
brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf * msgbuf,u16 flowid)558 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
559 {
560 	u32 dma_sz;
561 	void *dma_buf;
562 
563 	brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
564 
565 	dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
566 	dma_buf = msgbuf->flowrings[flowid]->buf_addr;
567 	dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
568 			  msgbuf->flowring_dma_handle[flowid]);
569 
570 	brcmf_flowring_delete(msgbuf->flow, flowid);
571 }
572 
573 
574 static struct brcmf_msgbuf_work_item *
brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf * msgbuf)575 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
576 {
577 	struct brcmf_msgbuf_work_item *work = NULL;
578 	ulong flags;
579 
580 	spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
581 	if (!list_empty(&msgbuf->work_queue)) {
582 		work = list_first_entry(&msgbuf->work_queue,
583 					struct brcmf_msgbuf_work_item, queue);
584 		list_del(&work->queue);
585 	}
586 	spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
587 
588 	return work;
589 }
590 
591 
592 static u32
brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf * msgbuf,struct brcmf_msgbuf_work_item * work)593 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
594 				    struct brcmf_msgbuf_work_item *work)
595 {
596 	struct brcmf_pub *drvr = msgbuf->drvr;
597 	struct msgbuf_tx_flowring_create_req *create;
598 	struct brcmf_commonring *commonring;
599 	void *ret_ptr;
600 	u32 flowid;
601 	void *dma_buf;
602 	u32 dma_sz;
603 	u64 address;
604 	int err;
605 
606 	flowid = work->flowid;
607 	dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
608 	dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
609 				     &msgbuf->flowring_dma_handle[flowid],
610 				     GFP_KERNEL);
611 	if (!dma_buf) {
612 		bphy_err(drvr, "dma_alloc_coherent failed\n");
613 		brcmf_flowring_delete(msgbuf->flow, flowid);
614 		return BRCMF_FLOWRING_INVALID_ID;
615 	}
616 
617 	brcmf_commonring_config(msgbuf->flowrings[flowid],
618 				BRCMF_H2D_TXFLOWRING_MAX_ITEM,
619 				BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
620 
621 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
622 	brcmf_commonring_lock(commonring);
623 	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
624 	if (!ret_ptr) {
625 		bphy_err(drvr, "Failed to reserve space in commonring\n");
626 		brcmf_commonring_unlock(commonring);
627 		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
628 		return BRCMF_FLOWRING_INVALID_ID;
629 	}
630 
631 	create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
632 	create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
633 	create->msg.ifidx = work->ifidx;
634 	create->msg.request_id = 0;
635 	create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
636 	create->flow_ring_id = cpu_to_le16(flowid +
637 					   BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
638 	memcpy(create->sa, work->sa, ETH_ALEN);
639 	memcpy(create->da, work->da, ETH_ALEN);
640 	address = (u64)msgbuf->flowring_dma_handle[flowid];
641 	create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
642 	create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
643 	create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
644 	create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
645 
646 	brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
647 		  flowid, work->da, create->tid, work->ifidx);
648 
649 	err = brcmf_commonring_write_complete(commonring);
650 	brcmf_commonring_unlock(commonring);
651 	if (err) {
652 		bphy_err(drvr, "Failed to write commonring\n");
653 		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
654 		return BRCMF_FLOWRING_INVALID_ID;
655 	}
656 
657 	return flowid;
658 }
659 
660 
brcmf_msgbuf_flowring_worker(struct work_struct * work)661 static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
662 {
663 	struct brcmf_msgbuf *msgbuf;
664 	struct brcmf_msgbuf_work_item *create;
665 
666 	msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
667 
668 	while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
669 		brcmf_msgbuf_flowring_create_worker(msgbuf, create);
670 		kfree(create);
671 	}
672 }
673 
674 
brcmf_msgbuf_flowring_create(struct brcmf_msgbuf * msgbuf,int ifidx,struct sk_buff * skb)675 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
676 					struct sk_buff *skb)
677 {
678 	struct brcmf_msgbuf_work_item *create;
679 	struct ethhdr *eh = (struct ethhdr *)(skb->data);
680 	u32 flowid;
681 	ulong flags;
682 
683 	create = kzalloc(sizeof(*create), GFP_ATOMIC);
684 	if (create == NULL)
685 		return BRCMF_FLOWRING_INVALID_ID;
686 
687 	flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
688 				       skb->priority, ifidx);
689 	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
690 		kfree(create);
691 		return flowid;
692 	}
693 
694 	create->flowid = flowid;
695 	create->ifidx = ifidx;
696 	memcpy(create->sa, eh->h_source, ETH_ALEN);
697 	memcpy(create->da, eh->h_dest, ETH_ALEN);
698 
699 	spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
700 	list_add_tail(&create->queue, &msgbuf->work_queue);
701 	spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
702 	schedule_work(&msgbuf->flowring_work);
703 
704 	return flowid;
705 }
706 
707 
brcmf_msgbuf_txflow(struct brcmf_msgbuf * msgbuf,u16 flowid)708 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
709 {
710 	struct brcmf_flowring *flow = msgbuf->flow;
711 	struct brcmf_pub *drvr = msgbuf->drvr;
712 	struct brcmf_commonring *commonring;
713 	void *ret_ptr;
714 	u32 count;
715 	struct sk_buff *skb;
716 	dma_addr_t physaddr;
717 	u32 pktid;
718 	struct msgbuf_tx_msghdr *tx_msghdr;
719 	u64 address;
720 
721 	commonring = msgbuf->flowrings[flowid];
722 	if (!brcmf_commonring_write_available(commonring))
723 		return;
724 
725 	brcmf_commonring_lock(commonring);
726 
727 	count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
728 	while (brcmf_flowring_qlen(flow, flowid)) {
729 		skb = brcmf_flowring_dequeue(flow, flowid);
730 		if (skb == NULL) {
731 			bphy_err(drvr, "No SKB, but qlen %d\n",
732 				 brcmf_flowring_qlen(flow, flowid));
733 			break;
734 		}
735 		skb_orphan(skb);
736 		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
737 					     msgbuf->tx_pktids, skb, ETH_HLEN,
738 					     &physaddr, &pktid)) {
739 			brcmf_flowring_reinsert(flow, flowid, skb);
740 			bphy_err(drvr, "No PKTID available !!\n");
741 			break;
742 		}
743 		ret_ptr = brcmf_commonring_reserve_for_write(commonring);
744 		if (!ret_ptr) {
745 			brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
746 					       msgbuf->tx_pktids, pktid);
747 			brcmf_flowring_reinsert(flow, flowid, skb);
748 			break;
749 		}
750 		count++;
751 
752 		tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
753 
754 		tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
755 		tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
756 		tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
757 		tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
758 		tx_msghdr->flags |= (skb->priority & 0x07) <<
759 				    BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
760 		tx_msghdr->seg_cnt = 1;
761 		memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
762 		tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
763 		address = (u64)physaddr;
764 		tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
765 		tx_msghdr->data_buf_addr.low_addr =
766 			cpu_to_le32(address & 0xffffffff);
767 		tx_msghdr->metadata_buf_len = 0;
768 		tx_msghdr->metadata_buf_addr.high_addr = 0;
769 		tx_msghdr->metadata_buf_addr.low_addr = 0;
770 		atomic_inc(&commonring->outstanding_tx);
771 		if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
772 			brcmf_commonring_write_complete(commonring);
773 			count = 0;
774 		}
775 	}
776 	if (count)
777 		brcmf_commonring_write_complete(commonring);
778 	brcmf_commonring_unlock(commonring);
779 }
780 
781 
brcmf_msgbuf_txflow_worker(struct work_struct * worker)782 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
783 {
784 	struct brcmf_msgbuf *msgbuf;
785 	u32 flowid;
786 
787 	msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
788 	for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) {
789 		clear_bit(flowid, msgbuf->flow_map);
790 		brcmf_msgbuf_txflow(msgbuf, flowid);
791 	}
792 }
793 
794 
brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf * msgbuf,u32 flowid,bool force)795 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
796 					bool force)
797 {
798 	struct brcmf_commonring *commonring;
799 
800 	set_bit(flowid, msgbuf->flow_map);
801 	commonring = msgbuf->flowrings[flowid];
802 	if ((force) || (atomic_read(&commonring->outstanding_tx) <
803 			BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
804 		queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
805 
806 	return 0;
807 }
808 
809 
brcmf_msgbuf_tx_queue_data(struct brcmf_pub * drvr,int ifidx,struct sk_buff * skb)810 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
811 				      struct sk_buff *skb)
812 {
813 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
814 	struct brcmf_flowring *flow = msgbuf->flow;
815 	struct ethhdr *eh = (struct ethhdr *)(skb->data);
816 	u32 flowid;
817 	u32 queue_count;
818 	bool force;
819 
820 	flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
821 	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
822 		flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
823 		if (flowid == BRCMF_FLOWRING_INVALID_ID) {
824 			return -ENOMEM;
825 		} else {
826 			brcmf_flowring_enqueue(flow, flowid, skb);
827 			return 0;
828 		}
829 	}
830 	queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
831 	force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
832 	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
833 
834 	return 0;
835 }
836 
837 
838 static void
brcmf_msgbuf_configure_addr_mode(struct brcmf_pub * drvr,int ifidx,enum proto_addr_mode addr_mode)839 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
840 				 enum proto_addr_mode addr_mode)
841 {
842 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
843 
844 	brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
845 }
846 
847 
848 static void
849 #if defined(__linux__)
brcmf_msgbuf_delete_peer(struct brcmf_pub * drvr,int ifidx,u8 peer[ETH_ALEN])850 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
851 #elif defined(__FreeBSD__)
852 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, const u8 peer[ETH_ALEN])
853 #endif
854 {
855 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
856 
857 	brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
858 }
859 
860 
861 static void
862 #if defined(__linux__)
brcmf_msgbuf_add_tdls_peer(struct brcmf_pub * drvr,int ifidx,u8 peer[ETH_ALEN])863 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
864 #elif defined(__FreeBSD__)
865 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, const u8 peer[ETH_ALEN])
866 #endif
867 {
868 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
869 
870 	brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
871 }
872 
873 
874 static void
brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf * msgbuf,void * buf)875 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
876 {
877 	struct msgbuf_ioctl_resp_hdr *ioctl_resp;
878 
879 	ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
880 
881 	msgbuf->ioctl_resp_status =
882 			(s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
883 	msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
884 	msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
885 
886 	brcmf_msgbuf_ioctl_resp_wake(msgbuf);
887 
888 	if (msgbuf->cur_ioctlrespbuf)
889 		msgbuf->cur_ioctlrespbuf--;
890 	brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
891 }
892 
893 
894 static void
brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf * msgbuf,void * buf)895 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
896 {
897 	struct brcmf_commonring *commonring;
898 	struct msgbuf_tx_status *tx_status;
899 	u32 idx;
900 	struct sk_buff *skb;
901 	u16 flowid;
902 
903 	tx_status = (struct msgbuf_tx_status *)buf;
904 	idx = le32_to_cpu(tx_status->msg.request_id) - 1;
905 	flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
906 	flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
907 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
908 				     msgbuf->tx_pktids, idx);
909 	if (!skb)
910 		return;
911 
912 	set_bit(flowid, msgbuf->txstatus_done_map);
913 	commonring = msgbuf->flowrings[flowid];
914 	atomic_dec(&commonring->outstanding_tx);
915 
916 	brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
917 			 skb, true);
918 }
919 
920 
brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf * msgbuf,u32 count)921 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
922 {
923 	struct brcmf_pub *drvr = msgbuf->drvr;
924 	struct brcmf_commonring *commonring;
925 #if defined(__linux__)
926 	void *ret_ptr;
927 #elif defined(__FreeBSD__)
928 	u8 *ret_ptr;
929 #endif
930 	struct sk_buff *skb;
931 	u16 alloced;
932 	u32 pktlen;
933 	dma_addr_t physaddr;
934 	struct msgbuf_rx_bufpost *rx_bufpost;
935 	u64 address;
936 	u32 pktid;
937 	u32 i;
938 
939 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
940 	ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
941 							      count,
942 							      &alloced);
943 	if (!ret_ptr) {
944 		brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
945 		return 0;
946 	}
947 
948 	for (i = 0; i < alloced; i++) {
949 		rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
950 		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
951 
952 		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
953 
954 		if (skb == NULL) {
955 			bphy_err(drvr, "Failed to alloc SKB\n");
956 			brcmf_commonring_write_cancel(commonring, alloced - i);
957 			break;
958 		}
959 
960 		pktlen = skb->len;
961 		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
962 					     msgbuf->rx_pktids, skb, 0,
963 					     &physaddr, &pktid)) {
964 			dev_kfree_skb_any(skb);
965 			bphy_err(drvr, "No PKTID available !!\n");
966 			brcmf_commonring_write_cancel(commonring, alloced - i);
967 			break;
968 		}
969 
970 		if (msgbuf->rx_metadata_offset) {
971 			address = (u64)physaddr;
972 			rx_bufpost->metadata_buf_len =
973 				cpu_to_le16(msgbuf->rx_metadata_offset);
974 			rx_bufpost->metadata_buf_addr.high_addr =
975 				cpu_to_le32(address >> 32);
976 			rx_bufpost->metadata_buf_addr.low_addr =
977 				cpu_to_le32(address & 0xffffffff);
978 
979 			skb_pull(skb, msgbuf->rx_metadata_offset);
980 			pktlen = skb->len;
981 			physaddr += msgbuf->rx_metadata_offset;
982 		}
983 		rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
984 		rx_bufpost->msg.request_id = cpu_to_le32(pktid);
985 
986 		address = (u64)physaddr;
987 		rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
988 		rx_bufpost->data_buf_addr.high_addr =
989 			cpu_to_le32(address >> 32);
990 		rx_bufpost->data_buf_addr.low_addr =
991 			cpu_to_le32(address & 0xffffffff);
992 
993 		ret_ptr += brcmf_commonring_len_item(commonring);
994 	}
995 
996 	if (i)
997 		brcmf_commonring_write_complete(commonring);
998 
999 	return i;
1000 }
1001 
1002 
1003 static void
brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf * msgbuf)1004 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
1005 {
1006 	u32 fillbufs;
1007 	u32 retcount;
1008 
1009 	fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
1010 
1011 	while (fillbufs) {
1012 		retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
1013 		if (!retcount)
1014 			break;
1015 		msgbuf->rxbufpost += retcount;
1016 		fillbufs -= retcount;
1017 	}
1018 }
1019 
1020 
1021 static void
brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf * msgbuf,u16 rxcnt)1022 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
1023 {
1024 	msgbuf->rxbufpost -= rxcnt;
1025 	if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
1026 				  BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
1027 		brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1028 }
1029 
1030 
1031 static u32
brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf * msgbuf,bool event_buf,u32 count)1032 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
1033 			     u32 count)
1034 {
1035 	struct brcmf_pub *drvr = msgbuf->drvr;
1036 	struct brcmf_commonring *commonring;
1037 #if defined(__linux__)
1038 	void *ret_ptr;
1039 #elif defined(__FreeBSD__)
1040 	u8 *ret_ptr;
1041 #endif
1042 	struct sk_buff *skb;
1043 	u16 alloced;
1044 	u32 pktlen;
1045 	dma_addr_t physaddr;
1046 	struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
1047 	u64 address;
1048 	u32 pktid;
1049 	u32 i;
1050 
1051 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1052 	brcmf_commonring_lock(commonring);
1053 	ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1054 							      count,
1055 							      &alloced);
1056 	if (!ret_ptr) {
1057 		bphy_err(drvr, "Failed to reserve space in commonring\n");
1058 		brcmf_commonring_unlock(commonring);
1059 		return 0;
1060 	}
1061 
1062 	for (i = 0; i < alloced; i++) {
1063 		rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1064 		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1065 
1066 		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE);
1067 
1068 		if (skb == NULL) {
1069 			bphy_err(drvr, "Failed to alloc SKB\n");
1070 			brcmf_commonring_write_cancel(commonring, alloced - i);
1071 			break;
1072 		}
1073 
1074 		pktlen = skb->len;
1075 		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1076 					     msgbuf->rx_pktids, skb, 0,
1077 					     &physaddr, &pktid)) {
1078 			dev_kfree_skb_any(skb);
1079 			bphy_err(drvr, "No PKTID available !!\n");
1080 			brcmf_commonring_write_cancel(commonring, alloced - i);
1081 			break;
1082 		}
1083 		if (event_buf)
1084 			rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1085 		else
1086 			rx_bufpost->msg.msgtype =
1087 				MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1088 		rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1089 
1090 		address = (u64)physaddr;
1091 		rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1092 		rx_bufpost->host_buf_addr.high_addr =
1093 			cpu_to_le32(address >> 32);
1094 		rx_bufpost->host_buf_addr.low_addr =
1095 			cpu_to_le32(address & 0xffffffff);
1096 
1097 		ret_ptr += brcmf_commonring_len_item(commonring);
1098 	}
1099 
1100 	if (i)
1101 		brcmf_commonring_write_complete(commonring);
1102 
1103 	brcmf_commonring_unlock(commonring);
1104 
1105 	return i;
1106 }
1107 
1108 
brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf * msgbuf)1109 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1110 {
1111 	u32 count;
1112 
1113 	count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1114 	count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1115 	msgbuf->cur_ioctlrespbuf += count;
1116 }
1117 
1118 
brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf * msgbuf)1119 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1120 {
1121 	u32 count;
1122 
1123 	count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1124 	count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1125 	msgbuf->cur_eventbuf += count;
1126 }
1127 
1128 
brcmf_msgbuf_process_event(struct brcmf_msgbuf * msgbuf,void * buf)1129 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1130 {
1131 	struct brcmf_pub *drvr = msgbuf->drvr;
1132 	struct msgbuf_rx_event *event;
1133 	u32 idx;
1134 	u16 buflen;
1135 	struct sk_buff *skb;
1136 	struct brcmf_if *ifp;
1137 
1138 	event = (struct msgbuf_rx_event *)buf;
1139 	idx = le32_to_cpu(event->msg.request_id);
1140 	buflen = le16_to_cpu(event->event_data_len);
1141 
1142 	if (msgbuf->cur_eventbuf)
1143 		msgbuf->cur_eventbuf--;
1144 	brcmf_msgbuf_rxbuf_event_post(msgbuf);
1145 
1146 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1147 				     msgbuf->rx_pktids, idx);
1148 	if (!skb)
1149 		return;
1150 
1151 	if (msgbuf->rx_dataoffset)
1152 		skb_pull(skb, msgbuf->rx_dataoffset);
1153 
1154 	skb_trim(skb, buflen);
1155 
1156 	ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
1157 	if (!ifp || !ifp->ndev) {
1158 		bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1159 			 event->msg.ifidx);
1160 		goto exit;
1161 	}
1162 
1163 	skb->protocol = eth_type_trans(skb, ifp->ndev);
1164 
1165 	brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
1166 
1167 exit:
1168 	brcmu_pkt_buf_free_skb(skb);
1169 }
1170 
1171 
1172 static void
brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf * msgbuf,void * buf)1173 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1174 {
1175 	struct brcmf_pub *drvr = msgbuf->drvr;
1176 	struct msgbuf_rx_complete *rx_complete;
1177 	struct sk_buff *skb;
1178 	u16 data_offset;
1179 	u16 buflen;
1180 	u16 flags;
1181 	u32 idx;
1182 	struct brcmf_if *ifp;
1183 
1184 	brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1185 
1186 	rx_complete = (struct msgbuf_rx_complete *)buf;
1187 	data_offset = le16_to_cpu(rx_complete->data_offset);
1188 	buflen = le16_to_cpu(rx_complete->data_len);
1189 	idx = le32_to_cpu(rx_complete->msg.request_id);
1190 	flags = le16_to_cpu(rx_complete->flags);
1191 
1192 	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1193 				     msgbuf->rx_pktids, idx);
1194 	if (!skb)
1195 		return;
1196 
1197 	if (data_offset)
1198 		skb_pull(skb, data_offset);
1199 	else if (msgbuf->rx_dataoffset)
1200 		skb_pull(skb, msgbuf->rx_dataoffset);
1201 
1202 	skb_trim(skb, buflen);
1203 
1204 	if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
1205 	    BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
1206 		ifp = msgbuf->drvr->mon_if;
1207 
1208 		if (!ifp) {
1209 			bphy_err(drvr, "Received unexpected monitor pkt\n");
1210 			brcmu_pkt_buf_free_skb(skb);
1211 			return;
1212 		}
1213 
1214 		brcmf_netif_mon_rx(ifp, skb);
1215 		return;
1216 	}
1217 
1218 	ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
1219 	if (!ifp || !ifp->ndev) {
1220 		bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1221 			 rx_complete->msg.ifidx);
1222 		brcmu_pkt_buf_free_skb(skb);
1223 		return;
1224 	}
1225 
1226 	skb->protocol = eth_type_trans(skb, ifp->ndev);
1227 	brcmf_netif_rx(ifp, skb);
1228 }
1229 
brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf * msgbuf,void * buf)1230 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
1231 					    void *buf)
1232 {
1233 	struct msgbuf_gen_status *gen_status = buf;
1234 	struct brcmf_pub *drvr = msgbuf->drvr;
1235 	int err;
1236 
1237 	err = le16_to_cpu(gen_status->compl_hdr.status);
1238 	if (err)
1239 		bphy_err(drvr, "Firmware reported general error: %d\n", err);
1240 }
1241 
brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf * msgbuf,void * buf)1242 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf,
1243 					     void *buf)
1244 {
1245 	struct msgbuf_ring_status *ring_status = buf;
1246 	struct brcmf_pub *drvr = msgbuf->drvr;
1247 	int err;
1248 
1249 	err = le16_to_cpu(ring_status->compl_hdr.status);
1250 	if (err) {
1251 		int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id);
1252 
1253 		bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring,
1254 			 err);
1255 	}
1256 }
1257 
1258 static void
brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf * msgbuf,void * buf)1259 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1260 					       void *buf)
1261 {
1262 	struct brcmf_pub *drvr = msgbuf->drvr;
1263 	struct msgbuf_flowring_create_resp *flowring_create_resp;
1264 	u16 status;
1265 	u16 flowid;
1266 
1267 	flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1268 
1269 	flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1270 	flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1271 	status =  le16_to_cpu(flowring_create_resp->compl_hdr.status);
1272 
1273 	if (status) {
1274 		bphy_err(drvr, "Flowring creation failed, code %d\n", status);
1275 		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1276 		return;
1277 	}
1278 	brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1279 		  status);
1280 
1281 	brcmf_flowring_open(msgbuf->flow, flowid);
1282 
1283 	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1284 }
1285 
1286 
1287 static void
brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf * msgbuf,void * buf)1288 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1289 					       void *buf)
1290 {
1291 	struct brcmf_pub *drvr = msgbuf->drvr;
1292 	struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1293 	u16 status;
1294 	u16 flowid;
1295 
1296 	flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1297 
1298 	flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1299 	flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1300 	status =  le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1301 
1302 	if (status) {
1303 		bphy_err(drvr, "Flowring deletion failed, code %d\n", status);
1304 		brcmf_flowring_delete(msgbuf->flow, flowid);
1305 		return;
1306 	}
1307 	brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1308 		  status);
1309 
1310 	brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1311 }
1312 
1313 
brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf * msgbuf,void * buf)1314 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1315 {
1316 	struct brcmf_pub *drvr = msgbuf->drvr;
1317 	struct msgbuf_common_hdr *msg;
1318 
1319 	msg = (struct msgbuf_common_hdr *)buf;
1320 	switch (msg->msgtype) {
1321 	case MSGBUF_TYPE_GEN_STATUS:
1322 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n");
1323 		brcmf_msgbuf_process_gen_status(msgbuf, buf);
1324 		break;
1325 	case MSGBUF_TYPE_RING_STATUS:
1326 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n");
1327 		brcmf_msgbuf_process_ring_status(msgbuf, buf);
1328 		break;
1329 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1330 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1331 		brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1332 		break;
1333 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1334 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1335 		brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1336 		break;
1337 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1338 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1339 		break;
1340 	case MSGBUF_TYPE_IOCTL_CMPLT:
1341 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1342 		brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1343 		break;
1344 	case MSGBUF_TYPE_WL_EVENT:
1345 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1346 		brcmf_msgbuf_process_event(msgbuf, buf);
1347 		break;
1348 	case MSGBUF_TYPE_TX_STATUS:
1349 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1350 		brcmf_msgbuf_process_txstatus(msgbuf, buf);
1351 		break;
1352 	case MSGBUF_TYPE_RX_CMPLT:
1353 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1354 		brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1355 		break;
1356 	default:
1357 		bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
1358 		break;
1359 	}
1360 }
1361 
1362 
brcmf_msgbuf_process_rx(struct brcmf_msgbuf * msgbuf,struct brcmf_commonring * commonring)1363 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1364 				    struct brcmf_commonring *commonring)
1365 {
1366 #if defined(__linux__)
1367 	void *buf;
1368 #elif defined(__FreeBSD__)
1369 	u8 *buf;
1370 #endif
1371 	u16 count;
1372 	u16 processed;
1373 
1374 again:
1375 	buf = brcmf_commonring_get_read_ptr(commonring, &count);
1376 	if (buf == NULL)
1377 		return;
1378 
1379 	processed = 0;
1380 	while (count) {
1381 		brcmf_msgbuf_process_msgtype(msgbuf,
1382 					     buf + msgbuf->rx_dataoffset);
1383 		buf += brcmf_commonring_len_item(commonring);
1384 		processed++;
1385 		if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
1386 			brcmf_commonring_read_complete(commonring, processed);
1387 			processed = 0;
1388 		}
1389 		count--;
1390 	}
1391 	if (processed)
1392 		brcmf_commonring_read_complete(commonring, processed);
1393 
1394 	if (commonring->r_ptr == 0)
1395 		goto again;
1396 }
1397 
1398 
brcmf_proto_msgbuf_rx_trigger(struct device * dev)1399 int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1400 {
1401 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1402 	struct brcmf_pub *drvr = bus_if->drvr;
1403 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1404 	struct brcmf_commonring *commonring;
1405 	void *buf;
1406 	u32 flowid;
1407 	int qlen;
1408 
1409 	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1410 	brcmf_msgbuf_process_rx(msgbuf, buf);
1411 	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1412 	brcmf_msgbuf_process_rx(msgbuf, buf);
1413 	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1414 	brcmf_msgbuf_process_rx(msgbuf, buf);
1415 
1416 	for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1417 			 msgbuf->max_flowrings) {
1418 		clear_bit(flowid, msgbuf->txstatus_done_map);
1419 		commonring = msgbuf->flowrings[flowid];
1420 		qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1421 		if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1422 		    ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1423 				BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1424 			brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 
brcmf_msgbuf_delete_flowring(struct brcmf_pub * drvr,u16 flowid)1431 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
1432 {
1433 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1434 	struct msgbuf_tx_flowring_delete_req *delete;
1435 	struct brcmf_commonring *commonring;
1436 	struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid];
1437 	struct brcmf_flowring *flow = msgbuf->flow;
1438 	void *ret_ptr;
1439 	u8 ifidx;
1440 	int err;
1441 	int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES;
1442 
1443 	/* make sure it is not in txflow */
1444 	brcmf_commonring_lock(commonring_del);
1445 	flow->rings[flowid]->status = RING_CLOSING;
1446 	brcmf_commonring_unlock(commonring_del);
1447 
1448 	/* wait for commonring txflow finished */
1449 	while (retry && atomic_read(&commonring_del->outstanding_tx)) {
1450 		usleep_range(5000, 10000);
1451 		retry--;
1452 	}
1453 	if (!retry) {
1454 		brcmf_err("timed out waiting for txstatus\n");
1455 		atomic_set(&commonring_del->outstanding_tx, 0);
1456 	}
1457 
1458 	/* no need to submit if firmware can not be reached */
1459 	if (drvr->bus_if->state != BRCMF_BUS_UP) {
1460 		brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n");
1461 		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1462 		return;
1463 	}
1464 
1465 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1466 	brcmf_commonring_lock(commonring);
1467 	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1468 	if (!ret_ptr) {
1469 		bphy_err(drvr, "FW unaware, flowring will be removed !!\n");
1470 		brcmf_commonring_unlock(commonring);
1471 		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1472 		return;
1473 	}
1474 
1475 	delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1476 
1477 	ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1478 
1479 	delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1480 	delete->msg.ifidx = ifidx;
1481 	delete->msg.request_id = 0;
1482 
1483 	delete->flow_ring_id = cpu_to_le16(flowid +
1484 					   BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
1485 	delete->reason = 0;
1486 
1487 	brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1488 		  flowid, ifidx);
1489 
1490 	err = brcmf_commonring_write_complete(commonring);
1491 	brcmf_commonring_unlock(commonring);
1492 	if (err) {
1493 		bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n");
1494 		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1495 	}
1496 }
1497 
1498 #ifdef DEBUG
brcmf_msgbuf_stats_read(struct seq_file * seq,void * data)1499 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1500 {
1501 	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
1502 	struct brcmf_pub *drvr = bus_if->drvr;
1503 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1504 	struct brcmf_commonring *commonring;
1505 	u16 i;
1506 	struct brcmf_flowring_ring *ring;
1507 	struct brcmf_flowring_hash *hash;
1508 
1509 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1510 	seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
1511 		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1512 	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
1513 	seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
1514 		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1515 	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1516 	seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
1517 		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1518 	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1519 	seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1520 		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1521 	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1522 	seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1523 		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1524 
1525 	seq_printf(seq, "\nh2d_flowrings: depth %u\n",
1526 		   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
1527 	seq_puts(seq, "Active flowrings:\n");
1528 	for (i = 0; i < msgbuf->flow->nrofrings; i++) {
1529 		if (!msgbuf->flow->rings[i])
1530 			continue;
1531 		ring = msgbuf->flow->rings[i];
1532 		if (ring->status != RING_OPEN)
1533 			continue;
1534 		commonring = msgbuf->flowrings[i];
1535 		hash = &msgbuf->flow->hash[ring->hash_id];
1536 		seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
1537 				"        ifidx %u, fifo %u, da %pM\n",
1538 				i, commonring->r_ptr, commonring->w_ptr,
1539 				skb_queue_len(&ring->skblist), ring->blocked,
1540 				hash->ifidx, hash->fifo, hash->mac);
1541 	}
1542 
1543 	return 0;
1544 }
1545 #else
brcmf_msgbuf_stats_read(struct seq_file * seq,void * data)1546 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1547 {
1548 	return 0;
1549 }
1550 #endif
1551 
brcmf_msgbuf_debugfs_create(struct brcmf_pub * drvr)1552 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr)
1553 {
1554 	brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
1555 }
1556 
brcmf_proto_msgbuf_attach(struct brcmf_pub * drvr)1557 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1558 {
1559 	struct brcmf_bus_msgbuf *if_msgbuf;
1560 	struct brcmf_msgbuf *msgbuf;
1561 	u64 address;
1562 	u32 count;
1563 
1564 	if_msgbuf = drvr->bus_if->msgbuf;
1565 
1566 	if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
1567 		bphy_err(drvr, "driver not configured for this many flowrings %d\n",
1568 			 if_msgbuf->max_flowrings);
1569 		if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
1570 	}
1571 
1572 	msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1573 	if (!msgbuf)
1574 		goto fail;
1575 
1576 	msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1577 	if (msgbuf->txflow_wq == NULL) {
1578 		bphy_err(drvr, "workqueue creation failed\n");
1579 		goto fail;
1580 	}
1581 	INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1582 	count = BITS_TO_LONGS(if_msgbuf->max_flowrings);
1583 	count = count * sizeof(unsigned long);
1584 	msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1585 	if (!msgbuf->flow_map)
1586 		goto fail;
1587 
1588 	msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1589 	if (!msgbuf->txstatus_done_map)
1590 		goto fail;
1591 
1592 	msgbuf->drvr = drvr;
1593 	msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1594 					     BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1595 					     &msgbuf->ioctbuf_handle,
1596 					     GFP_KERNEL);
1597 	if (!msgbuf->ioctbuf)
1598 		goto fail;
1599 	address = (u64)msgbuf->ioctbuf_handle;
1600 	msgbuf->ioctbuf_phys_hi = address >> 32;
1601 	msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1602 
1603 	drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1604 	drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1605 	drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1606 	drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data;
1607 	drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1608 	drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1609 	drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1610 	drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
1611 	drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create;
1612 	drvr->proto->pd = msgbuf;
1613 
1614 	init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1615 
1616 	msgbuf->commonrings =
1617 		(struct brcmf_commonring **)if_msgbuf->commonrings;
1618 	msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1619 	msgbuf->max_flowrings = if_msgbuf->max_flowrings;
1620 	msgbuf->flowring_dma_handle =
1621 		kcalloc(msgbuf->max_flowrings,
1622 			sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1623 	if (!msgbuf->flowring_dma_handle)
1624 		goto fail;
1625 
1626 	msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1627 	msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1628 
1629 	msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1630 	msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1631 
1632 	msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1633 						     DMA_TO_DEVICE);
1634 	if (!msgbuf->tx_pktids)
1635 		goto fail;
1636 	msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1637 						     DMA_FROM_DEVICE);
1638 	if (!msgbuf->rx_pktids)
1639 		goto fail;
1640 
1641 	msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1642 					     if_msgbuf->max_flowrings);
1643 	if (!msgbuf->flow)
1644 		goto fail;
1645 
1646 
1647 	brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1648 		  msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1649 		  msgbuf->max_ioctlrespbuf);
1650 	count = 0;
1651 	do {
1652 		brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1653 		if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1654 #if defined(__linux__)
1655 			msleep(10);
1656 #elif defined(__FreeBSD__)
1657 			linux_msleep(10);
1658 #endif
1659 		else
1660 			break;
1661 		count++;
1662 	} while (count < 10);
1663 	brcmf_msgbuf_rxbuf_event_post(msgbuf);
1664 	brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1665 
1666 	INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1667 	spin_lock_init(&msgbuf->flowring_work_lock);
1668 	INIT_LIST_HEAD(&msgbuf->work_queue);
1669 
1670 	return 0;
1671 
1672 fail:
1673 	if (msgbuf) {
1674 		kfree(msgbuf->flow_map);
1675 		kfree(msgbuf->txstatus_done_map);
1676 		brcmf_msgbuf_release_pktids(msgbuf);
1677 		kfree(msgbuf->flowring_dma_handle);
1678 		if (msgbuf->ioctbuf)
1679 			dma_free_coherent(drvr->bus_if->dev,
1680 					  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1681 					  msgbuf->ioctbuf,
1682 					  msgbuf->ioctbuf_handle);
1683 		if (msgbuf->txflow_wq)
1684 			destroy_workqueue(msgbuf->txflow_wq);
1685 		kfree(msgbuf);
1686 	}
1687 	return -ENOMEM;
1688 }
1689 
1690 
brcmf_proto_msgbuf_detach(struct brcmf_pub * drvr)1691 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1692 {
1693 	struct brcmf_msgbuf *msgbuf;
1694 	struct brcmf_msgbuf_work_item *work;
1695 
1696 	brcmf_dbg(TRACE, "Enter\n");
1697 	if (drvr->proto->pd) {
1698 		msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1699 		cancel_work_sync(&msgbuf->flowring_work);
1700 		while (!list_empty(&msgbuf->work_queue)) {
1701 			work = list_first_entry(&msgbuf->work_queue,
1702 						struct brcmf_msgbuf_work_item,
1703 						queue);
1704 			list_del(&work->queue);
1705 			kfree(work);
1706 		}
1707 		kfree(msgbuf->flow_map);
1708 		kfree(msgbuf->txstatus_done_map);
1709 		if (msgbuf->txflow_wq)
1710 			destroy_workqueue(msgbuf->txflow_wq);
1711 
1712 		brcmf_flowring_detach(msgbuf->flow);
1713 		dma_free_coherent(drvr->bus_if->dev,
1714 				  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1715 				  msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1716 		brcmf_msgbuf_release_pktids(msgbuf);
1717 		kfree(msgbuf->flowring_dma_handle);
1718 		kfree(msgbuf);
1719 		drvr->proto->pd = NULL;
1720 	}
1721 }
1722