xref: /linux/drivers/net/wireless/ath/wcn36xx/dxe.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /* DXE - DMA transfer engine
18  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19  * through low channels data packets are transfered
20  * through high channels managment packets are transfered
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include "wcn36xx.h"
27 #include "txrx.h"
28 
29 void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30 {
31 	struct wcn36xx_dxe_ch *ch = is_low ?
32 		&wcn->dxe_tx_l_ch :
33 		&wcn->dxe_tx_h_ch;
34 
35 	return ch->head_blk_ctl->bd_cpu_addr;
36 }
37 
38 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
39 {
40 	wcn36xx_dbg(WCN36XX_DBG_DXE,
41 		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
42 		    addr, data);
43 
44 	writel(data, wcn->mmio + addr);
45 }
46 
47 #define wcn36xx_dxe_write_register_x(wcn, reg, reg_data)		 \
48 do {									 \
49 	if (wcn->chip_version == WCN36XX_CHIP_3680)			 \
50 		wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
51 	else								 \
52 		wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
53 } while (0)								 \
54 
55 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
56 {
57 	*data = readl(wcn->mmio + addr);
58 
59 	wcn36xx_dbg(WCN36XX_DBG_DXE,
60 		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
61 		    addr, *data);
62 }
63 
64 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
65 {
66 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
67 	int i;
68 
69 	for (i = 0; i < ch->desc_num && ctl; i++) {
70 		next = ctl->next;
71 		kfree(ctl);
72 		ctl = next;
73 	}
74 }
75 
76 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
77 {
78 	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
79 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
80 	int i;
81 
82 	for (i = 0; i < ch->desc_num; i++) {
83 		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
84 		if (!cur_ctl)
85 			goto out_fail;
86 
87 		cur_ctl->ctl_blk_order = i;
88 		if (i == 0) {
89 			ch->head_blk_ctl = cur_ctl;
90 			ch->tail_blk_ctl = cur_ctl;
91 		} else if (ch->desc_num - 1 == i) {
92 			prev_ctl->next = cur_ctl;
93 			cur_ctl->next = ch->head_blk_ctl;
94 		} else {
95 			prev_ctl->next = cur_ctl;
96 		}
97 		prev_ctl = cur_ctl;
98 	}
99 
100 	return 0;
101 
102 out_fail:
103 	wcn36xx_dxe_free_ctl_block(ch);
104 	return -ENOMEM;
105 }
106 
107 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
108 {
109 	int ret;
110 
111 	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
112 	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
113 	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
114 	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
115 
116 	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
117 	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
118 	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
119 	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
120 
121 	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
122 	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
123 
124 	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
125 	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
126 
127 	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
128 	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
129 
130 	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
131 	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
132 
133 	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
134 	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
135 
136 	/* DXE control block allocation */
137 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
138 	if (ret)
139 		goto out_err;
140 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
141 	if (ret)
142 		goto out_err;
143 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
144 	if (ret)
145 		goto out_err;
146 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
147 	if (ret)
148 		goto out_err;
149 
150 	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
151 	ret = wcn->ctrl_ops->smsm_change_state(
152 		WCN36XX_SMSM_WLAN_TX_ENABLE,
153 		WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
154 
155 	return 0;
156 
157 out_err:
158 	wcn36xx_err("Failed to allocate DXE control blocks\n");
159 	wcn36xx_dxe_free_ctl_blks(wcn);
160 	return -ENOMEM;
161 }
162 
163 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
164 {
165 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
166 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
167 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
168 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
169 }
170 
171 static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
172 {
173 	struct wcn36xx_dxe_desc *cur_dxe = NULL;
174 	struct wcn36xx_dxe_desc *prev_dxe = NULL;
175 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
176 	size_t size;
177 	int i;
178 
179 	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
180 	wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
181 					      GFP_KERNEL);
182 	if (!wcn_ch->cpu_addr)
183 		return -ENOMEM;
184 
185 	memset(wcn_ch->cpu_addr, 0, size);
186 
187 	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
188 	cur_ctl = wcn_ch->head_blk_ctl;
189 
190 	for (i = 0; i < wcn_ch->desc_num; i++) {
191 		cur_ctl->desc = cur_dxe;
192 		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
193 			i * sizeof(struct wcn36xx_dxe_desc);
194 
195 		switch (wcn_ch->ch_type) {
196 		case WCN36XX_DXE_CH_TX_L:
197 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
198 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
199 			break;
200 		case WCN36XX_DXE_CH_TX_H:
201 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
202 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
203 			break;
204 		case WCN36XX_DXE_CH_RX_L:
205 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
206 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
207 			break;
208 		case WCN36XX_DXE_CH_RX_H:
209 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
210 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
211 			break;
212 		}
213 		if (0 == i) {
214 			cur_dxe->phy_next_l = 0;
215 		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
216 			prev_dxe->phy_next_l =
217 				cur_ctl->desc_phy_addr;
218 		} else if (i == (wcn_ch->desc_num - 1)) {
219 			prev_dxe->phy_next_l =
220 				cur_ctl->desc_phy_addr;
221 			cur_dxe->phy_next_l =
222 				wcn_ch->head_blk_ctl->desc_phy_addr;
223 		}
224 		cur_ctl = cur_ctl->next;
225 		prev_dxe = cur_dxe;
226 		cur_dxe++;
227 	}
228 
229 	return 0;
230 }
231 
232 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
233 				   struct wcn36xx_dxe_mem_pool *pool)
234 {
235 	int i, chunk_size = pool->chunk_size;
236 	dma_addr_t bd_phy_addr = pool->phy_addr;
237 	void *bd_cpu_addr = pool->virt_addr;
238 	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
239 
240 	for (i = 0; i < ch->desc_num; i++) {
241 		/* Only every second dxe needs a bd pointer,
242 		   the other will point to the skb data */
243 		if (!(i & 1)) {
244 			cur->bd_phy_addr = bd_phy_addr;
245 			cur->bd_cpu_addr = bd_cpu_addr;
246 			bd_phy_addr += chunk_size;
247 			bd_cpu_addr += chunk_size;
248 		} else {
249 			cur->bd_phy_addr = 0;
250 			cur->bd_cpu_addr = NULL;
251 		}
252 		cur = cur->next;
253 	}
254 }
255 
256 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
257 {
258 	int reg_data = 0;
259 
260 	wcn36xx_dxe_read_register(wcn,
261 				  WCN36XX_DXE_INT_MASK_REG,
262 				  &reg_data);
263 
264 	reg_data |= wcn_ch;
265 
266 	wcn36xx_dxe_write_register(wcn,
267 				   WCN36XX_DXE_INT_MASK_REG,
268 				   (int)reg_data);
269 	return 0;
270 }
271 
272 static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
273 {
274 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
275 	struct sk_buff *skb;
276 
277 	skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
278 	if (skb == NULL)
279 		return -ENOMEM;
280 
281 	dxe->dst_addr_l = dma_map_single(NULL,
282 					 skb_tail_pointer(skb),
283 					 WCN36XX_PKT_SIZE,
284 					 DMA_FROM_DEVICE);
285 	ctl->skb = skb;
286 
287 	return 0;
288 }
289 
290 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
291 				    struct wcn36xx_dxe_ch *wcn_ch)
292 {
293 	int i;
294 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
295 
296 	cur_ctl = wcn_ch->head_blk_ctl;
297 
298 	for (i = 0; i < wcn_ch->desc_num; i++) {
299 		wcn36xx_dxe_fill_skb(cur_ctl);
300 		cur_ctl = cur_ctl->next;
301 	}
302 
303 	return 0;
304 }
305 
306 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
307 				     struct wcn36xx_dxe_ch *wcn_ch)
308 {
309 	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
310 	int i;
311 
312 	for (i = 0; i < wcn_ch->desc_num; i++) {
313 		kfree_skb(cur->skb);
314 		cur = cur->next;
315 	}
316 }
317 
318 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
319 {
320 	struct ieee80211_tx_info *info;
321 	struct sk_buff *skb;
322 	unsigned long flags;
323 
324 	spin_lock_irqsave(&wcn->dxe_lock, flags);
325 	skb = wcn->tx_ack_skb;
326 	wcn->tx_ack_skb = NULL;
327 	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
328 
329 	if (!skb) {
330 		wcn36xx_warn("Spurious TX complete indication\n");
331 		return;
332 	}
333 
334 	info = IEEE80211_SKB_CB(skb);
335 
336 	if (status == 1)
337 		info->flags |= IEEE80211_TX_STAT_ACK;
338 
339 	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
340 
341 	ieee80211_tx_status_irqsafe(wcn->hw, skb);
342 	ieee80211_wake_queues(wcn->hw);
343 }
344 
345 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
346 {
347 	struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
348 	struct ieee80211_tx_info *info;
349 	unsigned long flags;
350 
351 	/*
352 	 * Make at least one loop of do-while because in case ring is
353 	 * completely full head and tail are pointing to the same element
354 	 * and while-do will not make any cycles.
355 	 */
356 	do {
357 		if (ctl->skb) {
358 			dma_unmap_single(NULL, ctl->desc->src_addr_l,
359 					 ctl->skb->len, DMA_TO_DEVICE);
360 			info = IEEE80211_SKB_CB(ctl->skb);
361 			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
362 				/* Keep frame until TX status comes */
363 				ieee80211_free_txskb(wcn->hw, ctl->skb);
364 			}
365 			spin_lock_irqsave(&ctl->skb_lock, flags);
366 			if (wcn->queues_stopped) {
367 				wcn->queues_stopped = false;
368 				ieee80211_wake_queues(wcn->hw);
369 			}
370 			spin_unlock_irqrestore(&ctl->skb_lock, flags);
371 
372 			ctl->skb = NULL;
373 		}
374 		ctl = ctl->next;
375 	} while (ctl != ch->head_blk_ctl &&
376 	       !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
377 
378 	ch->tail_blk_ctl = ctl;
379 }
380 
381 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
382 {
383 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
384 	int int_src, int_reason;
385 
386 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
387 
388 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
389 		wcn36xx_dxe_read_register(wcn,
390 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
391 					  &int_reason);
392 
393 		/* TODO: Check int_reason */
394 
395 		wcn36xx_dxe_write_register(wcn,
396 					   WCN36XX_DXE_0_INT_CLR,
397 					   WCN36XX_INT_MASK_CHAN_TX_H);
398 
399 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
400 					   WCN36XX_INT_MASK_CHAN_TX_H);
401 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
402 		reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
403 	}
404 
405 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
406 		wcn36xx_dxe_read_register(wcn,
407 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
408 					  &int_reason);
409 		/* TODO: Check int_reason */
410 
411 		wcn36xx_dxe_write_register(wcn,
412 					   WCN36XX_DXE_0_INT_CLR,
413 					   WCN36XX_INT_MASK_CHAN_TX_L);
414 
415 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
416 					   WCN36XX_INT_MASK_CHAN_TX_L);
417 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
418 		reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
419 	}
420 
421 	return IRQ_HANDLED;
422 }
423 
424 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
425 {
426 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
427 
428 	disable_irq_nosync(wcn->rx_irq);
429 	wcn36xx_dxe_rx_frame(wcn);
430 	enable_irq(wcn->rx_irq);
431 	return IRQ_HANDLED;
432 }
433 
434 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
435 {
436 	int ret;
437 
438 	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
439 			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
440 	if (ret) {
441 		wcn36xx_err("failed to alloc tx irq\n");
442 		goto out_err;
443 	}
444 
445 	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
446 			  "wcn36xx_rx", wcn);
447 	if (ret) {
448 		wcn36xx_err("failed to alloc rx irq\n");
449 		goto out_txirq;
450 	}
451 
452 	enable_irq_wake(wcn->rx_irq);
453 
454 	return 0;
455 
456 out_txirq:
457 	free_irq(wcn->tx_irq, wcn);
458 out_err:
459 	return ret;
460 
461 }
462 
463 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
464 				     struct wcn36xx_dxe_ch *ch)
465 {
466 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
467 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
468 	dma_addr_t  dma_addr;
469 	struct sk_buff *skb;
470 
471 	while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
472 		skb = ctl->skb;
473 		dma_addr = dxe->dst_addr_l;
474 		wcn36xx_dxe_fill_skb(ctl);
475 
476 		switch (ch->ch_type) {
477 		case WCN36XX_DXE_CH_RX_L:
478 			dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
479 			wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
480 						   WCN36XX_DXE_INT_CH1_MASK);
481 			break;
482 		case WCN36XX_DXE_CH_RX_H:
483 			dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
484 			wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
485 						   WCN36XX_DXE_INT_CH3_MASK);
486 			break;
487 		default:
488 			wcn36xx_warn("Unknown channel\n");
489 		}
490 
491 		dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
492 				 DMA_FROM_DEVICE);
493 		wcn36xx_rx_skb(wcn, skb);
494 		ctl = ctl->next;
495 		dxe = ctl->desc;
496 	}
497 
498 	ch->head_blk_ctl = ctl;
499 
500 	return 0;
501 }
502 
503 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
504 {
505 	int int_src;
506 
507 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
508 
509 	/* RX_LOW_PRI */
510 	if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
511 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
512 					   WCN36XX_DXE_INT_CH1_MASK);
513 		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
514 	}
515 
516 	/* RX_HIGH_PRI */
517 	if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
518 		/* Clean up all the INT within this channel */
519 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
520 					   WCN36XX_DXE_INT_CH3_MASK);
521 		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
522 	}
523 
524 	if (!int_src)
525 		wcn36xx_warn("No DXE interrupt pending\n");
526 }
527 
528 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
529 {
530 	size_t s;
531 	void *cpu_addr;
532 
533 	/* Allocate BD headers for MGMT frames */
534 
535 	/* Where this come from ask QC */
536 	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
537 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
538 
539 	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
540 	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
541 				      GFP_KERNEL);
542 	if (!cpu_addr)
543 		goto out_err;
544 
545 	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
546 	memset(cpu_addr, 0, s);
547 
548 	/* Allocate BD headers for DATA frames */
549 
550 	/* Where this come from ask QC */
551 	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
552 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
553 
554 	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
555 	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
556 				      GFP_KERNEL);
557 	if (!cpu_addr)
558 		goto out_err;
559 
560 	wcn->data_mem_pool.virt_addr = cpu_addr;
561 	memset(cpu_addr, 0, s);
562 
563 	return 0;
564 
565 out_err:
566 	wcn36xx_dxe_free_mem_pools(wcn);
567 	wcn36xx_err("Failed to allocate BD mempool\n");
568 	return -ENOMEM;
569 }
570 
571 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
572 {
573 	if (wcn->mgmt_mem_pool.virt_addr)
574 		dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
575 				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
576 				  wcn->mgmt_mem_pool.virt_addr,
577 				  wcn->mgmt_mem_pool.phy_addr);
578 
579 	if (wcn->data_mem_pool.virt_addr) {
580 		dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
581 				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
582 				  wcn->data_mem_pool.virt_addr,
583 				  wcn->data_mem_pool.phy_addr);
584 	}
585 }
586 
587 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
588 			 struct wcn36xx_vif *vif_priv,
589 			 struct sk_buff *skb,
590 			 bool is_low)
591 {
592 	struct wcn36xx_dxe_ctl *ctl = NULL;
593 	struct wcn36xx_dxe_desc *desc = NULL;
594 	struct wcn36xx_dxe_ch *ch = NULL;
595 	unsigned long flags;
596 
597 	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
598 
599 	ctl = ch->head_blk_ctl;
600 
601 	spin_lock_irqsave(&ctl->next->skb_lock, flags);
602 
603 	/*
604 	 * If skb is not null that means that we reached the tail of the ring
605 	 * hence ring is full. Stop queues to let mac80211 back off until ring
606 	 * has an empty slot again.
607 	 */
608 	if (NULL != ctl->next->skb) {
609 		ieee80211_stop_queues(wcn->hw);
610 		wcn->queues_stopped = true;
611 		spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
612 		return -EBUSY;
613 	}
614 	spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
615 
616 	ctl->skb = NULL;
617 	desc = ctl->desc;
618 
619 	/* Set source address of the BD we send */
620 	desc->src_addr_l = ctl->bd_phy_addr;
621 
622 	desc->dst_addr_l = ch->dxe_wq;
623 	desc->fr_len = sizeof(struct wcn36xx_tx_bd);
624 	desc->ctrl = ch->ctrl_bd;
625 
626 	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
627 
628 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
629 			 (char *)desc, sizeof(*desc));
630 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
631 			 "BD   >>> ", (char *)ctl->bd_cpu_addr,
632 			 sizeof(struct wcn36xx_tx_bd));
633 
634 	/* Set source address of the SKB we send */
635 	ctl = ctl->next;
636 	ctl->skb = skb;
637 	desc = ctl->desc;
638 	if (ctl->bd_cpu_addr) {
639 		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
640 		return -EINVAL;
641 	}
642 
643 	desc->src_addr_l = dma_map_single(NULL,
644 					  ctl->skb->data,
645 					  ctl->skb->len,
646 					  DMA_TO_DEVICE);
647 
648 	desc->dst_addr_l = ch->dxe_wq;
649 	desc->fr_len = ctl->skb->len;
650 
651 	/* set dxe descriptor to VALID */
652 	desc->ctrl = ch->ctrl_skb;
653 
654 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
655 			 (char *)desc, sizeof(*desc));
656 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
657 			 (char *)ctl->skb->data, ctl->skb->len);
658 
659 	/* Move the head of the ring to the next empty descriptor */
660 	 ch->head_blk_ctl = ctl->next;
661 
662 	/*
663 	 * When connected and trying to send data frame chip can be in sleep
664 	 * mode and writing to the register will not wake up the chip. Instead
665 	 * notify chip about new frame through SMSM bus.
666 	 */
667 	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
668 		wcn->ctrl_ops->smsm_change_state(
669 				  0,
670 				  WCN36XX_SMSM_WLAN_TX_ENABLE);
671 	} else {
672 		/* indicate End Of Packet and generate interrupt on descriptor
673 		 * done.
674 		 */
675 		wcn36xx_dxe_write_register(wcn,
676 			ch->reg_ctrl, ch->def_ctrl);
677 	}
678 
679 	return 0;
680 }
681 
682 int wcn36xx_dxe_init(struct wcn36xx *wcn)
683 {
684 	int reg_data = 0, ret;
685 
686 	reg_data = WCN36XX_DXE_REG_RESET;
687 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
688 
689 	/* Setting interrupt path */
690 	reg_data = WCN36XX_DXE_CCU_INT;
691 	wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
692 
693 	/***************************************/
694 	/* Init descriptors for TX LOW channel */
695 	/***************************************/
696 	wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
697 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
698 
699 	/* Write channel head to a NEXT register */
700 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
701 		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
702 
703 	/* Program DMA destination addr for TX LOW */
704 	wcn36xx_dxe_write_register(wcn,
705 		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
706 		WCN36XX_DXE_WQ_TX_L);
707 
708 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
709 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
710 
711 	/***************************************/
712 	/* Init descriptors for TX HIGH channel */
713 	/***************************************/
714 	wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
715 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
716 
717 	/* Write channel head to a NEXT register */
718 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
719 		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
720 
721 	/* Program DMA destination addr for TX HIGH */
722 	wcn36xx_dxe_write_register(wcn,
723 		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
724 		WCN36XX_DXE_WQ_TX_H);
725 
726 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
727 
728 	/* Enable channel interrupts */
729 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
730 
731 	/***************************************/
732 	/* Init descriptors for RX LOW channel */
733 	/***************************************/
734 	wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
735 
736 	/* For RX we need to preallocated buffers */
737 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
738 
739 	/* Write channel head to a NEXT register */
740 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
741 		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
742 
743 	/* Write DMA source address */
744 	wcn36xx_dxe_write_register(wcn,
745 		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
746 		WCN36XX_DXE_WQ_RX_L);
747 
748 	/* Program preallocated destination address */
749 	wcn36xx_dxe_write_register(wcn,
750 		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
751 		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
752 
753 	/* Enable default control registers */
754 	wcn36xx_dxe_write_register(wcn,
755 		WCN36XX_DXE_REG_CTL_RX_L,
756 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
757 
758 	/* Enable channel interrupts */
759 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
760 
761 	/***************************************/
762 	/* Init descriptors for RX HIGH channel */
763 	/***************************************/
764 	wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
765 
766 	/* For RX we need to prealocat buffers */
767 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
768 
769 	/* Write chanel head to a NEXT register */
770 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
771 		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
772 
773 	/* Write DMA source address */
774 	wcn36xx_dxe_write_register(wcn,
775 		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
776 		WCN36XX_DXE_WQ_RX_H);
777 
778 	/* Program preallocated destination address */
779 	wcn36xx_dxe_write_register(wcn,
780 		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
781 		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
782 
783 	/* Enable default control registers */
784 	wcn36xx_dxe_write_register(wcn,
785 		WCN36XX_DXE_REG_CTL_RX_H,
786 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
787 
788 	/* Enable channel interrupts */
789 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
790 
791 	ret = wcn36xx_dxe_request_irqs(wcn);
792 	if (ret < 0)
793 		goto out_err;
794 
795 	return 0;
796 
797 out_err:
798 	return ret;
799 }
800 
801 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
802 {
803 	free_irq(wcn->tx_irq, wcn);
804 	free_irq(wcn->rx_irq, wcn);
805 
806 	if (wcn->tx_ack_skb) {
807 		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
808 		wcn->tx_ack_skb = NULL;
809 	}
810 
811 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
812 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
813 }
814