xref: /linux/drivers/net/wireless/ath/ath10k/snoc.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/regulator/consumer.h>
24 
25 #include "ce.h"
26 #include "debug.h"
27 #include "hif.h"
28 #include "htc.h"
29 #include "snoc.h"
30 
31 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
32 #define CE_POLL_PIPE 4
33 
34 static char *const ce_name[] = {
35 	"WLAN_CE_0",
36 	"WLAN_CE_1",
37 	"WLAN_CE_2",
38 	"WLAN_CE_3",
39 	"WLAN_CE_4",
40 	"WLAN_CE_5",
41 	"WLAN_CE_6",
42 	"WLAN_CE_7",
43 	"WLAN_CE_8",
44 	"WLAN_CE_9",
45 	"WLAN_CE_10",
46 	"WLAN_CE_11",
47 };
48 
49 static struct ath10k_vreg_info vreg_cfg[] = {
50 	{NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
51 	{NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
52 	{NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
53 	{NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
54 };
55 
56 static struct ath10k_clk_info clk_cfg[] = {
57 	{NULL, "cxo_ref_clk_pin", 0, false},
58 };
59 
60 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
64 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
65 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
66 
67 static const struct ath10k_snoc_drv_priv drv_priv = {
68 	.hw_rev = ATH10K_HW_WCN3990,
69 	.dma_mask = DMA_BIT_MASK(37),
70 	.msa_size = 0x100000,
71 };
72 
73 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
74 #define WCN3990_DST_WR_IDX_OFFSET 0x40
75 
76 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
77 		{
78 			.ce_id = __cpu_to_le16(0),
79 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
80 		},
81 
82 		{
83 			.ce_id = __cpu_to_le16(3),
84 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
85 		},
86 
87 		{
88 			.ce_id = __cpu_to_le16(4),
89 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
90 		},
91 
92 		{
93 			.ce_id = __cpu_to_le16(5),
94 			.reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
95 		},
96 
97 		{
98 			.ce_id = __cpu_to_le16(7),
99 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
100 		},
101 
102 		{
103 			.ce_id = __cpu_to_le16(1),
104 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
105 		},
106 
107 		{
108 			.ce_id = __cpu_to_le16(2),
109 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
110 		},
111 
112 		{
113 			.ce_id = __cpu_to_le16(7),
114 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
115 		},
116 
117 		{
118 			.ce_id = __cpu_to_le16(8),
119 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
120 		},
121 
122 		{
123 			.ce_id = __cpu_to_le16(9),
124 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
125 		},
126 
127 		{
128 			.ce_id = __cpu_to_le16(10),
129 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
130 		},
131 
132 		{
133 			.ce_id = __cpu_to_le16(11),
134 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
135 		},
136 };
137 
138 static struct ce_attr host_ce_config_wlan[] = {
139 	/* CE0: host->target HTC control streams */
140 	{
141 		.flags = CE_ATTR_FLAGS,
142 		.src_nentries = 16,
143 		.src_sz_max = 2048,
144 		.dest_nentries = 0,
145 		.send_cb = ath10k_snoc_htc_tx_cb,
146 	},
147 
148 	/* CE1: target->host HTT + HTC control */
149 	{
150 		.flags = CE_ATTR_FLAGS,
151 		.src_nentries = 0,
152 		.src_sz_max = 2048,
153 		.dest_nentries = 512,
154 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
155 	},
156 
157 	/* CE2: target->host WMI */
158 	{
159 		.flags = CE_ATTR_FLAGS,
160 		.src_nentries = 0,
161 		.src_sz_max = 2048,
162 		.dest_nentries = 64,
163 		.recv_cb = ath10k_snoc_htc_rx_cb,
164 	},
165 
166 	/* CE3: host->target WMI */
167 	{
168 		.flags = CE_ATTR_FLAGS,
169 		.src_nentries = 32,
170 		.src_sz_max = 2048,
171 		.dest_nentries = 0,
172 		.send_cb = ath10k_snoc_htc_tx_cb,
173 	},
174 
175 	/* CE4: host->target HTT */
176 	{
177 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
178 		.src_nentries = 256,
179 		.src_sz_max = 256,
180 		.dest_nentries = 0,
181 		.send_cb = ath10k_snoc_htt_tx_cb,
182 	},
183 
184 	/* CE5: target->host HTT (ipa_uc->target ) */
185 	{
186 		.flags = CE_ATTR_FLAGS,
187 		.src_nentries = 0,
188 		.src_sz_max = 512,
189 		.dest_nentries = 512,
190 		.recv_cb = ath10k_snoc_htt_rx_cb,
191 	},
192 
193 	/* CE6: target autonomous hif_memcpy */
194 	{
195 		.flags = CE_ATTR_FLAGS,
196 		.src_nentries = 0,
197 		.src_sz_max = 0,
198 		.dest_nentries = 0,
199 	},
200 
201 	/* CE7: ce_diag, the Diagnostic Window */
202 	{
203 		.flags = CE_ATTR_FLAGS,
204 		.src_nentries = 2,
205 		.src_sz_max = 2048,
206 		.dest_nentries = 2,
207 	},
208 
209 	/* CE8: Target to uMC */
210 	{
211 		.flags = CE_ATTR_FLAGS,
212 		.src_nentries = 0,
213 		.src_sz_max = 2048,
214 		.dest_nentries = 128,
215 	},
216 
217 	/* CE9 target->host HTT */
218 	{
219 		.flags = CE_ATTR_FLAGS,
220 		.src_nentries = 0,
221 		.src_sz_max = 2048,
222 		.dest_nentries = 512,
223 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
224 	},
225 
226 	/* CE10: target->host HTT */
227 	{
228 		.flags = CE_ATTR_FLAGS,
229 		.src_nentries = 0,
230 		.src_sz_max = 2048,
231 		.dest_nentries = 512,
232 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
233 	},
234 
235 	/* CE11: target -> host PKTLOG */
236 	{
237 		.flags = CE_ATTR_FLAGS,
238 		.src_nentries = 0,
239 		.src_sz_max = 2048,
240 		.dest_nentries = 512,
241 		.recv_cb = ath10k_snoc_pktlog_rx_cb,
242 	},
243 };
244 
245 static struct ce_pipe_config target_ce_config_wlan[] = {
246 	/* CE0: host->target HTC control and raw streams */
247 	{
248 		.pipenum = __cpu_to_le32(0),
249 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
250 		.nentries = __cpu_to_le32(32),
251 		.nbytes_max = __cpu_to_le32(2048),
252 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 		.reserved = __cpu_to_le32(0),
254 	},
255 
256 	/* CE1: target->host HTT + HTC control */
257 	{
258 		.pipenum = __cpu_to_le32(1),
259 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
260 		.nentries = __cpu_to_le32(32),
261 		.nbytes_max = __cpu_to_le32(2048),
262 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 		.reserved = __cpu_to_le32(0),
264 	},
265 
266 	/* CE2: target->host WMI */
267 	{
268 		.pipenum = __cpu_to_le32(2),
269 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
270 		.nentries = __cpu_to_le32(64),
271 		.nbytes_max = __cpu_to_le32(2048),
272 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 		.reserved = __cpu_to_le32(0),
274 	},
275 
276 	/* CE3: host->target WMI */
277 	{
278 		.pipenum = __cpu_to_le32(3),
279 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
280 		.nentries = __cpu_to_le32(32),
281 		.nbytes_max = __cpu_to_le32(2048),
282 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
283 		.reserved = __cpu_to_le32(0),
284 	},
285 
286 	/* CE4: host->target HTT */
287 	{
288 		.pipenum = __cpu_to_le32(4),
289 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
290 		.nentries = __cpu_to_le32(256),
291 		.nbytes_max = __cpu_to_le32(256),
292 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
293 		.reserved = __cpu_to_le32(0),
294 	},
295 
296 	/* CE5: target->host HTT (HIF->HTT) */
297 	{
298 		.pipenum = __cpu_to_le32(5),
299 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
300 		.nentries = __cpu_to_le32(1024),
301 		.nbytes_max = __cpu_to_le32(64),
302 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
303 		.reserved = __cpu_to_le32(0),
304 	},
305 
306 	/* CE6: Reserved for target autonomous hif_memcpy */
307 	{
308 		.pipenum = __cpu_to_le32(6),
309 		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
310 		.nentries = __cpu_to_le32(32),
311 		.nbytes_max = __cpu_to_le32(16384),
312 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
313 		.reserved = __cpu_to_le32(0),
314 	},
315 
316 	/* CE7 used only by Host */
317 	{
318 		.pipenum = __cpu_to_le32(7),
319 		.pipedir = __cpu_to_le32(4),
320 		.nentries = __cpu_to_le32(0),
321 		.nbytes_max = __cpu_to_le32(0),
322 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
323 		.reserved = __cpu_to_le32(0),
324 	},
325 
326 	/* CE8 Target to uMC */
327 	{
328 		.pipenum = __cpu_to_le32(8),
329 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
330 		.nentries = __cpu_to_le32(32),
331 		.nbytes_max = __cpu_to_le32(2048),
332 		.flags = __cpu_to_le32(0),
333 		.reserved = __cpu_to_le32(0),
334 	},
335 
336 	/* CE9 target->host HTT */
337 	{
338 		.pipenum = __cpu_to_le32(9),
339 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
340 		.nentries = __cpu_to_le32(32),
341 		.nbytes_max = __cpu_to_le32(2048),
342 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
343 		.reserved = __cpu_to_le32(0),
344 	},
345 
346 	/* CE10 target->host HTT */
347 	{
348 		.pipenum = __cpu_to_le32(10),
349 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
350 		.nentries = __cpu_to_le32(32),
351 		.nbytes_max = __cpu_to_le32(2048),
352 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
353 		.reserved = __cpu_to_le32(0),
354 	},
355 
356 	/* CE11 target autonomous qcache memcpy */
357 	{
358 		.pipenum = __cpu_to_le32(11),
359 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
360 		.nentries = __cpu_to_le32(32),
361 		.nbytes_max = __cpu_to_le32(2048),
362 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
363 		.reserved = __cpu_to_le32(0),
364 	},
365 };
366 
367 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
368 	{
369 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
370 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
371 		__cpu_to_le32(3),
372 	},
373 	{
374 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
375 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
376 		__cpu_to_le32(2),
377 	},
378 	{
379 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
380 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
381 		__cpu_to_le32(3),
382 	},
383 	{
384 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
385 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
386 		__cpu_to_le32(2),
387 	},
388 	{
389 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
390 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
391 		__cpu_to_le32(3),
392 	},
393 	{
394 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
395 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
396 		__cpu_to_le32(2),
397 	},
398 	{
399 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
400 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
401 		__cpu_to_le32(3),
402 	},
403 	{
404 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
405 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
406 		__cpu_to_le32(2),
407 	},
408 	{
409 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
410 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
411 		__cpu_to_le32(3),
412 	},
413 	{
414 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
415 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
416 		__cpu_to_le32(2),
417 	},
418 	{
419 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
420 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
421 		__cpu_to_le32(0),
422 	},
423 	{
424 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
425 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
426 		__cpu_to_le32(2),
427 	},
428 	{ /* not used */
429 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
430 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
431 		__cpu_to_le32(0),
432 	},
433 	{ /* not used */
434 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
435 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
436 		__cpu_to_le32(2),
437 	},
438 	{
439 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
440 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
441 		__cpu_to_le32(4),
442 	},
443 	{
444 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
445 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
446 		__cpu_to_le32(1),
447 	},
448 	{ /* not used */
449 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
450 		__cpu_to_le32(PIPEDIR_OUT),
451 		__cpu_to_le32(5),
452 	},
453 	{ /* in = DL = target -> host */
454 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
455 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
456 		__cpu_to_le32(9),
457 	},
458 	{ /* in = DL = target -> host */
459 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
460 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
461 		__cpu_to_le32(10),
462 	},
463 	{ /* in = DL = target -> host pktlog */
464 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
465 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
466 		__cpu_to_le32(11),
467 	},
468 	/* (Additions here) */
469 
470 	{ /* must be last */
471 		__cpu_to_le32(0),
472 		__cpu_to_le32(0),
473 		__cpu_to_le32(0),
474 	},
475 };
476 
477 static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
478 {
479 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
480 
481 	iowrite32(value, ar_snoc->mem + offset);
482 }
483 
484 static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
485 {
486 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
487 	u32 val;
488 
489 	val = ioread32(ar_snoc->mem + offset);
490 
491 	return val;
492 }
493 
494 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
495 {
496 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
497 	struct ath10k *ar = pipe->hif_ce_state;
498 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
499 	struct sk_buff *skb;
500 	dma_addr_t paddr;
501 	int ret;
502 
503 	skb = dev_alloc_skb(pipe->buf_sz);
504 	if (!skb)
505 		return -ENOMEM;
506 
507 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
508 
509 	paddr = dma_map_single(ar->dev, skb->data,
510 			       skb->len + skb_tailroom(skb),
511 			       DMA_FROM_DEVICE);
512 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
513 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
514 		dev_kfree_skb_any(skb);
515 		return -EIO;
516 	}
517 
518 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
519 
520 	spin_lock_bh(&ce->ce_lock);
521 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
522 	spin_unlock_bh(&ce->ce_lock);
523 	if (ret) {
524 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
525 				 DMA_FROM_DEVICE);
526 		dev_kfree_skb_any(skb);
527 		return ret;
528 	}
529 
530 	return 0;
531 }
532 
533 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
534 {
535 	struct ath10k *ar = pipe->hif_ce_state;
536 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
537 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
538 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
539 	int ret, num;
540 
541 	if (pipe->buf_sz == 0)
542 		return;
543 
544 	if (!ce_pipe->dest_ring)
545 		return;
546 
547 	spin_lock_bh(&ce->ce_lock);
548 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
549 	spin_unlock_bh(&ce->ce_lock);
550 	while (num--) {
551 		ret = __ath10k_snoc_rx_post_buf(pipe);
552 		if (ret) {
553 			if (ret == -ENOSPC)
554 				break;
555 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
556 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
557 				  ATH10K_SNOC_RX_POST_RETRY_MS);
558 			break;
559 		}
560 	}
561 }
562 
563 static void ath10k_snoc_rx_post(struct ath10k *ar)
564 {
565 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
566 	int i;
567 
568 	for (i = 0; i < CE_COUNT; i++)
569 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
570 }
571 
572 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
573 				      void (*callback)(struct ath10k *ar,
574 						       struct sk_buff *skb))
575 {
576 	struct ath10k *ar = ce_state->ar;
577 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
578 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
579 	struct sk_buff *skb;
580 	struct sk_buff_head list;
581 	void *transfer_context;
582 	unsigned int nbytes, max_nbytes;
583 
584 	__skb_queue_head_init(&list);
585 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
586 					     &nbytes) == 0) {
587 		skb = transfer_context;
588 		max_nbytes = skb->len + skb_tailroom(skb);
589 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
590 				 max_nbytes, DMA_FROM_DEVICE);
591 
592 		if (unlikely(max_nbytes < nbytes)) {
593 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
594 				    nbytes, max_nbytes);
595 			dev_kfree_skb_any(skb);
596 			continue;
597 		}
598 
599 		skb_put(skb, nbytes);
600 		__skb_queue_tail(&list, skb);
601 	}
602 
603 	while ((skb = __skb_dequeue(&list))) {
604 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
605 			   ce_state->id, skb->len);
606 
607 		callback(ar, skb);
608 	}
609 
610 	ath10k_snoc_rx_post_pipe(pipe_info);
611 }
612 
613 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
614 {
615 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
616 }
617 
618 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
619 {
620 	/* CE4 polling needs to be done whenever CE pipe which transports
621 	 * HTT Rx (target->host) is processed.
622 	 */
623 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
624 
625 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
626 }
627 
628 /* Called by lower (CE) layer when data is received from the Target.
629  * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
630  */
631 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
632 {
633 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
634 }
635 
636 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
637 {
638 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
639 	ath10k_htt_t2h_msg_handler(ar, skb);
640 }
641 
642 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
643 {
644 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
645 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
646 }
647 
648 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
649 {
650 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
651 	struct ath10k *ar = ar_snoc->ar;
652 
653 	ath10k_snoc_rx_post(ar);
654 }
655 
656 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
657 {
658 	struct ath10k *ar = ce_state->ar;
659 	struct sk_buff_head list;
660 	struct sk_buff *skb;
661 
662 	__skb_queue_head_init(&list);
663 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
664 		if (!skb)
665 			continue;
666 
667 		__skb_queue_tail(&list, skb);
668 	}
669 
670 	while ((skb = __skb_dequeue(&list)))
671 		ath10k_htc_tx_completion_handler(ar, skb);
672 }
673 
674 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
675 {
676 	struct ath10k *ar = ce_state->ar;
677 	struct sk_buff *skb;
678 
679 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
680 		if (!skb)
681 			continue;
682 
683 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
684 				 skb->len, DMA_TO_DEVICE);
685 		ath10k_htt_hif_tx_complete(ar, skb);
686 	}
687 }
688 
689 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
690 				 struct ath10k_hif_sg_item *items, int n_items)
691 {
692 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
693 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
694 	struct ath10k_snoc_pipe *snoc_pipe;
695 	struct ath10k_ce_pipe *ce_pipe;
696 	int err, i = 0;
697 
698 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
699 	ce_pipe = snoc_pipe->ce_hdl;
700 	spin_lock_bh(&ce->ce_lock);
701 
702 	for (i = 0; i < n_items - 1; i++) {
703 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
704 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
705 			   i, &items[i].paddr, items[i].len, n_items);
706 
707 		err = ath10k_ce_send_nolock(ce_pipe,
708 					    items[i].transfer_context,
709 					    items[i].paddr,
710 					    items[i].len,
711 					    items[i].transfer_id,
712 					    CE_SEND_FLAG_GATHER);
713 		if (err)
714 			goto err;
715 	}
716 
717 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
718 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
719 		   i, &items[i].paddr, items[i].len, n_items);
720 
721 	err = ath10k_ce_send_nolock(ce_pipe,
722 				    items[i].transfer_context,
723 				    items[i].paddr,
724 				    items[i].len,
725 				    items[i].transfer_id,
726 				    0);
727 	if (err)
728 		goto err;
729 
730 	spin_unlock_bh(&ce->ce_lock);
731 
732 	return 0;
733 
734 err:
735 	for (; i > 0; i--)
736 		__ath10k_ce_send_revert(ce_pipe);
737 
738 	spin_unlock_bh(&ce->ce_lock);
739 	return err;
740 }
741 
742 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
743 					   struct bmi_target_info *target_info)
744 {
745 	target_info->version = ATH10K_HW_WCN3990;
746 	target_info->type = ATH10K_HW_WCN3990;
747 
748 	return 0;
749 }
750 
751 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
752 {
753 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
754 
755 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
756 
757 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
758 }
759 
760 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
761 						int force)
762 {
763 	int resources;
764 
765 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
766 
767 	if (!force) {
768 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
769 
770 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
771 			return;
772 	}
773 	ath10k_ce_per_engine_service(ar, pipe);
774 }
775 
776 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
777 					       u16 service_id,
778 					       u8 *ul_pipe, u8 *dl_pipe)
779 {
780 	const struct service_to_pipe *entry;
781 	bool ul_set = false, dl_set = false;
782 	int i;
783 
784 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
785 
786 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
787 		entry = &target_service_to_ce_map_wlan[i];
788 
789 		if (__le32_to_cpu(entry->service_id) != service_id)
790 			continue;
791 
792 		switch (__le32_to_cpu(entry->pipedir)) {
793 		case PIPEDIR_NONE:
794 			break;
795 		case PIPEDIR_IN:
796 			WARN_ON(dl_set);
797 			*dl_pipe = __le32_to_cpu(entry->pipenum);
798 			dl_set = true;
799 			break;
800 		case PIPEDIR_OUT:
801 			WARN_ON(ul_set);
802 			*ul_pipe = __le32_to_cpu(entry->pipenum);
803 			ul_set = true;
804 			break;
805 		case PIPEDIR_INOUT:
806 			WARN_ON(dl_set);
807 			WARN_ON(ul_set);
808 			*dl_pipe = __le32_to_cpu(entry->pipenum);
809 			*ul_pipe = __le32_to_cpu(entry->pipenum);
810 			dl_set = true;
811 			ul_set = true;
812 			break;
813 		}
814 	}
815 
816 	if (!ul_set || !dl_set)
817 		return -ENOENT;
818 
819 	return 0;
820 }
821 
822 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
823 					     u8 *ul_pipe, u8 *dl_pipe)
824 {
825 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
826 
827 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
828 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
829 						 ul_pipe, dl_pipe);
830 }
831 
832 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
833 {
834 	ath10k_ce_disable_interrupts(ar);
835 }
836 
837 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
838 {
839 	ath10k_ce_enable_interrupts(ar);
840 }
841 
842 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
843 {
844 	struct ath10k_ce_pipe *ce_pipe;
845 	struct ath10k_ce_ring *ce_ring;
846 	struct sk_buff *skb;
847 	struct ath10k *ar;
848 	int i;
849 
850 	ar = snoc_pipe->hif_ce_state;
851 	ce_pipe = snoc_pipe->ce_hdl;
852 	ce_ring = ce_pipe->dest_ring;
853 
854 	if (!ce_ring)
855 		return;
856 
857 	if (!snoc_pipe->buf_sz)
858 		return;
859 
860 	for (i = 0; i < ce_ring->nentries; i++) {
861 		skb = ce_ring->per_transfer_context[i];
862 		if (!skb)
863 			continue;
864 
865 		ce_ring->per_transfer_context[i] = NULL;
866 
867 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
868 				 skb->len + skb_tailroom(skb),
869 				 DMA_FROM_DEVICE);
870 		dev_kfree_skb_any(skb);
871 	}
872 }
873 
874 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
875 {
876 	struct ath10k_ce_pipe *ce_pipe;
877 	struct ath10k_ce_ring *ce_ring;
878 	struct ath10k_snoc *ar_snoc;
879 	struct sk_buff *skb;
880 	struct ath10k *ar;
881 	int i;
882 
883 	ar = snoc_pipe->hif_ce_state;
884 	ar_snoc = ath10k_snoc_priv(ar);
885 	ce_pipe = snoc_pipe->ce_hdl;
886 	ce_ring = ce_pipe->src_ring;
887 
888 	if (!ce_ring)
889 		return;
890 
891 	if (!snoc_pipe->buf_sz)
892 		return;
893 
894 	for (i = 0; i < ce_ring->nentries; i++) {
895 		skb = ce_ring->per_transfer_context[i];
896 		if (!skb)
897 			continue;
898 
899 		ce_ring->per_transfer_context[i] = NULL;
900 
901 		ath10k_htc_tx_completion_handler(ar, skb);
902 	}
903 }
904 
905 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
906 {
907 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
908 	struct ath10k_snoc_pipe *pipe_info;
909 	int pipe_num;
910 
911 	del_timer_sync(&ar_snoc->rx_post_retry);
912 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
913 		pipe_info = &ar_snoc->pipe_info[pipe_num];
914 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
915 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
916 	}
917 }
918 
919 static void ath10k_snoc_hif_stop(struct ath10k *ar)
920 {
921 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
922 		ath10k_snoc_irq_disable(ar);
923 
924 	napi_synchronize(&ar->napi);
925 	napi_disable(&ar->napi);
926 	ath10k_snoc_buffer_cleanup(ar);
927 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
928 }
929 
930 static int ath10k_snoc_hif_start(struct ath10k *ar)
931 {
932 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
933 
934 	napi_enable(&ar->napi);
935 	ath10k_snoc_irq_enable(ar);
936 	ath10k_snoc_rx_post(ar);
937 
938 	clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
939 
940 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
941 
942 	return 0;
943 }
944 
945 static int ath10k_snoc_init_pipes(struct ath10k *ar)
946 {
947 	int i, ret;
948 
949 	for (i = 0; i < CE_COUNT; i++) {
950 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
951 		if (ret) {
952 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
953 				   i, ret);
954 			return ret;
955 		}
956 	}
957 
958 	return 0;
959 }
960 
961 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
962 {
963 	struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
964 	struct ath10k_qmi_wlan_enable_cfg cfg;
965 	enum wlfw_driver_mode_enum_v01 mode;
966 	int pipe_num;
967 
968 	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
969 		tgt_cfg[pipe_num].pipe_num =
970 				target_ce_config_wlan[pipe_num].pipenum;
971 		tgt_cfg[pipe_num].pipe_dir =
972 				target_ce_config_wlan[pipe_num].pipedir;
973 		tgt_cfg[pipe_num].nentries =
974 				target_ce_config_wlan[pipe_num].nentries;
975 		tgt_cfg[pipe_num].nbytes_max =
976 				target_ce_config_wlan[pipe_num].nbytes_max;
977 		tgt_cfg[pipe_num].flags =
978 				target_ce_config_wlan[pipe_num].flags;
979 		tgt_cfg[pipe_num].reserved = 0;
980 	}
981 
982 	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
983 				sizeof(struct ath10k_tgt_pipe_cfg);
984 	cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
985 		&tgt_cfg;
986 	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
987 				  sizeof(struct ath10k_svc_pipe_cfg);
988 	cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
989 		&target_service_to_ce_map_wlan;
990 	cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
991 					sizeof(struct ath10k_shadow_reg_cfg);
992 	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
993 		&target_shadow_reg_cfg_map;
994 
995 	mode = QMI_WLFW_MISSION_V01;
996 
997 	return ath10k_qmi_wlan_enable(ar, &cfg, mode,
998 				       NULL);
999 }
1000 
1001 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1002 {
1003 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1004 		ath10k_qmi_wlan_disable(ar);
1005 }
1006 
1007 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1008 {
1009 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1010 
1011 	ath10k_snoc_wlan_disable(ar);
1012 	ath10k_ce_free_rri(ar);
1013 }
1014 
1015 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
1016 {
1017 	int ret;
1018 
1019 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1020 		   __func__, ar->state);
1021 
1022 	ret = ath10k_snoc_wlan_enable(ar);
1023 	if (ret) {
1024 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1025 		return ret;
1026 	}
1027 
1028 	ath10k_ce_alloc_rri(ar);
1029 
1030 	ret = ath10k_snoc_init_pipes(ar);
1031 	if (ret) {
1032 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1033 		goto err_wlan_enable;
1034 	}
1035 
1036 	return 0;
1037 
1038 err_wlan_enable:
1039 	ath10k_snoc_wlan_disable(ar);
1040 
1041 	return ret;
1042 }
1043 
1044 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1045 	.read32		= ath10k_snoc_read32,
1046 	.write32	= ath10k_snoc_write32,
1047 	.start		= ath10k_snoc_hif_start,
1048 	.stop		= ath10k_snoc_hif_stop,
1049 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
1050 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
1051 	.power_up		= ath10k_snoc_hif_power_up,
1052 	.power_down		= ath10k_snoc_hif_power_down,
1053 	.tx_sg			= ath10k_snoc_hif_tx_sg,
1054 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
1055 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
1056 	.get_target_info	= ath10k_snoc_hif_get_target_info,
1057 };
1058 
1059 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1060 	.read32		= ath10k_snoc_read32,
1061 	.write32	= ath10k_snoc_write32,
1062 };
1063 
1064 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1065 {
1066 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1067 	int i;
1068 
1069 	for (i = 0; i < CE_COUNT_MAX; i++) {
1070 		if (ar_snoc->ce_irqs[i].irq_line == irq)
1071 			return i;
1072 	}
1073 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1074 
1075 	return -EINVAL;
1076 }
1077 
1078 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1079 {
1080 	struct ath10k *ar = arg;
1081 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1082 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1083 
1084 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1085 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1086 			    ce_id);
1087 		return IRQ_HANDLED;
1088 	}
1089 
1090 	ath10k_snoc_irq_disable(ar);
1091 	napi_schedule(&ar->napi);
1092 
1093 	return IRQ_HANDLED;
1094 }
1095 
1096 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1097 {
1098 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1099 	int done = 0;
1100 
1101 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1102 		napi_complete(ctx);
1103 		return done;
1104 	}
1105 
1106 	ath10k_ce_per_engine_service_any(ar);
1107 	done = ath10k_htt_txrx_compl_task(ar, budget);
1108 
1109 	if (done < budget) {
1110 		napi_complete(ctx);
1111 		ath10k_snoc_irq_enable(ar);
1112 	}
1113 
1114 	return done;
1115 }
1116 
1117 static void ath10k_snoc_init_napi(struct ath10k *ar)
1118 {
1119 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1120 		       ATH10K_NAPI_BUDGET);
1121 }
1122 
1123 static int ath10k_snoc_request_irq(struct ath10k *ar)
1124 {
1125 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1126 	int irqflags = IRQF_TRIGGER_RISING;
1127 	int ret, id;
1128 
1129 	for (id = 0; id < CE_COUNT_MAX; id++) {
1130 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1131 				  ath10k_snoc_per_engine_handler,
1132 				  irqflags, ce_name[id], ar);
1133 		if (ret) {
1134 			ath10k_err(ar,
1135 				   "failed to register IRQ handler for CE %d: %d",
1136 				   id, ret);
1137 			goto err_irq;
1138 		}
1139 	}
1140 
1141 	return 0;
1142 
1143 err_irq:
1144 	for (id -= 1; id >= 0; id--)
1145 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1146 
1147 	return ret;
1148 }
1149 
1150 static void ath10k_snoc_free_irq(struct ath10k *ar)
1151 {
1152 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1153 	int id;
1154 
1155 	for (id = 0; id < CE_COUNT_MAX; id++)
1156 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1157 }
1158 
1159 static int ath10k_snoc_resource_init(struct ath10k *ar)
1160 {
1161 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1162 	struct platform_device *pdev;
1163 	struct resource *res;
1164 	int i, ret = 0;
1165 
1166 	pdev = ar_snoc->dev;
1167 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1168 	if (!res) {
1169 		ath10k_err(ar, "Memory base not found in DT\n");
1170 		return -EINVAL;
1171 	}
1172 
1173 	ar_snoc->mem_pa = res->start;
1174 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1175 				    resource_size(res));
1176 	if (!ar_snoc->mem) {
1177 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1178 			   &ar_snoc->mem_pa);
1179 		return -EINVAL;
1180 	}
1181 
1182 	for (i = 0; i < CE_COUNT; i++) {
1183 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1184 		if (!res) {
1185 			ath10k_err(ar, "failed to get IRQ%d\n", i);
1186 			ret = -ENODEV;
1187 			goto out;
1188 		}
1189 		ar_snoc->ce_irqs[i].irq_line = res->start;
1190 	}
1191 
1192 out:
1193 	return ret;
1194 }
1195 
1196 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1197 {
1198 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1199 	struct ath10k_bus_params bus_params;
1200 	int ret;
1201 
1202 	if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1203 		return 0;
1204 
1205 	switch (type) {
1206 	case ATH10K_QMI_EVENT_FW_READY_IND:
1207 		if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1208 			queue_work(ar->workqueue, &ar->restart_work);
1209 			break;
1210 		}
1211 
1212 		bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1213 		bus_params.chip_id = ar_snoc->target_info.soc_version;
1214 		ret = ath10k_core_register(ar, &bus_params);
1215 		if (ret) {
1216 			ath10k_err(ar, "Failed to register driver core: %d\n",
1217 				   ret);
1218 			return ret;
1219 		}
1220 		set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1221 		break;
1222 	case ATH10K_QMI_EVENT_FW_DOWN_IND:
1223 		set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1224 		set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1225 		break;
1226 	default:
1227 		ath10k_err(ar, "invalid fw indication: %llx\n", type);
1228 		return -EINVAL;
1229 	}
1230 
1231 	return 0;
1232 }
1233 
1234 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1235 {
1236 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1237 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1238 	struct ath10k_snoc_pipe *pipe;
1239 	int i, ret;
1240 
1241 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1242 	spin_lock_init(&ce->ce_lock);
1243 	for (i = 0; i < CE_COUNT; i++) {
1244 		pipe = &ar_snoc->pipe_info[i];
1245 		pipe->ce_hdl = &ce->ce_states[i];
1246 		pipe->pipe_num = i;
1247 		pipe->hif_ce_state = ar;
1248 
1249 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1250 		if (ret) {
1251 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1252 				   i, ret);
1253 			return ret;
1254 		}
1255 
1256 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1257 	}
1258 	ath10k_snoc_init_napi(ar);
1259 
1260 	return 0;
1261 }
1262 
1263 static void ath10k_snoc_release_resource(struct ath10k *ar)
1264 {
1265 	int i;
1266 
1267 	netif_napi_del(&ar->napi);
1268 	for (i = 0; i < CE_COUNT; i++)
1269 		ath10k_ce_free_pipe(ar, i);
1270 }
1271 
1272 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
1273 				struct ath10k_vreg_info *vreg_info)
1274 {
1275 	struct regulator *reg;
1276 	int ret = 0;
1277 
1278 	reg = devm_regulator_get_optional(dev, vreg_info->name);
1279 
1280 	if (IS_ERR(reg)) {
1281 		ret = PTR_ERR(reg);
1282 
1283 		if (ret  == -EPROBE_DEFER) {
1284 			ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1285 				   vreg_info->name);
1286 			return ret;
1287 		}
1288 		if (vreg_info->required) {
1289 			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1290 				   vreg_info->name, ret);
1291 			return ret;
1292 		}
1293 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
1294 			   "Optional regulator %s doesn't exist: %d\n",
1295 			   vreg_info->name, ret);
1296 		goto done;
1297 	}
1298 
1299 	vreg_info->reg = reg;
1300 
1301 done:
1302 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
1303 		   "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1304 		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1305 		   vreg_info->load_ua, vreg_info->settle_delay);
1306 
1307 	return 0;
1308 }
1309 
1310 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1311 			       struct ath10k_clk_info *clk_info)
1312 {
1313 	struct clk *handle;
1314 	int ret = 0;
1315 
1316 	handle = devm_clk_get(dev, clk_info->name);
1317 	if (IS_ERR(handle)) {
1318 		ret = PTR_ERR(handle);
1319 		if (clk_info->required) {
1320 			ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1321 				   clk_info->name, ret);
1322 			return ret;
1323 		}
1324 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1325 			   clk_info->name,
1326 			   ret);
1327 		return 0;
1328 	}
1329 
1330 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1331 		   clk_info->name, clk_info->freq);
1332 
1333 	clk_info->handle = handle;
1334 
1335 	return ret;
1336 }
1337 
1338 static int __ath10k_snoc_vreg_on(struct ath10k *ar,
1339 				 struct ath10k_vreg_info *vreg_info)
1340 {
1341 	int ret;
1342 
1343 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1344 		   vreg_info->name);
1345 
1346 	ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1347 				    vreg_info->max_v);
1348 	if (ret) {
1349 		ath10k_err(ar,
1350 			   "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1351 			   vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1352 		return ret;
1353 	}
1354 
1355 	if (vreg_info->load_ua) {
1356 		ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
1357 		if (ret < 0) {
1358 			ath10k_err(ar, "failed to set regulator %s load: %d\n",
1359 				   vreg_info->name, vreg_info->load_ua);
1360 			goto err_set_load;
1361 		}
1362 	}
1363 
1364 	ret = regulator_enable(vreg_info->reg);
1365 	if (ret) {
1366 		ath10k_err(ar, "failed to enable regulator %s\n",
1367 			   vreg_info->name);
1368 		goto err_enable;
1369 	}
1370 
1371 	if (vreg_info->settle_delay)
1372 		udelay(vreg_info->settle_delay);
1373 
1374 	return 0;
1375 
1376 err_enable:
1377 	regulator_set_load(vreg_info->reg, 0);
1378 err_set_load:
1379 	regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1380 
1381 	return ret;
1382 }
1383 
1384 static int __ath10k_snoc_vreg_off(struct ath10k *ar,
1385 				  struct ath10k_vreg_info *vreg_info)
1386 {
1387 	int ret;
1388 
1389 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1390 		   vreg_info->name);
1391 
1392 	ret = regulator_disable(vreg_info->reg);
1393 	if (ret)
1394 		ath10k_err(ar, "failed to disable regulator %s\n",
1395 			   vreg_info->name);
1396 
1397 	ret = regulator_set_load(vreg_info->reg, 0);
1398 	if (ret < 0)
1399 		ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
1400 
1401 	ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1402 	if (ret)
1403 		ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
1404 
1405 	return ret;
1406 }
1407 
1408 static int ath10k_snoc_vreg_on(struct ath10k *ar)
1409 {
1410 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1411 	struct ath10k_vreg_info *vreg_info;
1412 	int ret = 0;
1413 	int i;
1414 
1415 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1416 		vreg_info = &ar_snoc->vreg[i];
1417 
1418 		if (!vreg_info->reg)
1419 			continue;
1420 
1421 		ret = __ath10k_snoc_vreg_on(ar, vreg_info);
1422 		if (ret)
1423 			goto err_reg_config;
1424 	}
1425 
1426 	return 0;
1427 
1428 err_reg_config:
1429 	for (i = i - 1; i >= 0; i--) {
1430 		vreg_info = &ar_snoc->vreg[i];
1431 
1432 		if (!vreg_info->reg)
1433 			continue;
1434 
1435 		__ath10k_snoc_vreg_off(ar, vreg_info);
1436 	}
1437 
1438 	return ret;
1439 }
1440 
1441 static int ath10k_snoc_vreg_off(struct ath10k *ar)
1442 {
1443 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1444 	struct ath10k_vreg_info *vreg_info;
1445 	int ret = 0;
1446 	int i;
1447 
1448 	for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1449 		vreg_info = &ar_snoc->vreg[i];
1450 
1451 		if (!vreg_info->reg)
1452 			continue;
1453 
1454 		ret = __ath10k_snoc_vreg_off(ar, vreg_info);
1455 	}
1456 
1457 	return ret;
1458 }
1459 
1460 static int ath10k_snoc_clk_init(struct ath10k *ar)
1461 {
1462 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1463 	struct ath10k_clk_info *clk_info;
1464 	int ret = 0;
1465 	int i;
1466 
1467 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1468 		clk_info = &ar_snoc->clk[i];
1469 
1470 		if (!clk_info->handle)
1471 			continue;
1472 
1473 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1474 			   clk_info->name);
1475 
1476 		if (clk_info->freq) {
1477 			ret = clk_set_rate(clk_info->handle, clk_info->freq);
1478 
1479 			if (ret) {
1480 				ath10k_err(ar, "failed to set clock %s freq %u\n",
1481 					   clk_info->name, clk_info->freq);
1482 				goto err_clock_config;
1483 			}
1484 		}
1485 
1486 		ret = clk_prepare_enable(clk_info->handle);
1487 		if (ret) {
1488 			ath10k_err(ar, "failed to enable clock %s\n",
1489 				   clk_info->name);
1490 			goto err_clock_config;
1491 		}
1492 	}
1493 
1494 	return 0;
1495 
1496 err_clock_config:
1497 	for (i = i - 1; i >= 0; i--) {
1498 		clk_info = &ar_snoc->clk[i];
1499 
1500 		if (!clk_info->handle)
1501 			continue;
1502 
1503 		clk_disable_unprepare(clk_info->handle);
1504 	}
1505 
1506 	return ret;
1507 }
1508 
1509 static int ath10k_snoc_clk_deinit(struct ath10k *ar)
1510 {
1511 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1512 	struct ath10k_clk_info *clk_info;
1513 	int i;
1514 
1515 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1516 		clk_info = &ar_snoc->clk[i];
1517 
1518 		if (!clk_info->handle)
1519 			continue;
1520 
1521 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1522 			   clk_info->name);
1523 
1524 		clk_disable_unprepare(clk_info->handle);
1525 	}
1526 
1527 	return 0;
1528 }
1529 
1530 static int ath10k_hw_power_on(struct ath10k *ar)
1531 {
1532 	int ret;
1533 
1534 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1535 
1536 	ret = ath10k_snoc_vreg_on(ar);
1537 	if (ret)
1538 		return ret;
1539 
1540 	ret = ath10k_snoc_clk_init(ar);
1541 	if (ret)
1542 		goto vreg_off;
1543 
1544 	return ret;
1545 
1546 vreg_off:
1547 	ath10k_snoc_vreg_off(ar);
1548 	return ret;
1549 }
1550 
1551 static int ath10k_hw_power_off(struct ath10k *ar)
1552 {
1553 	int ret;
1554 
1555 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1556 
1557 	ath10k_snoc_clk_deinit(ar);
1558 
1559 	ret = ath10k_snoc_vreg_off(ar);
1560 
1561 	return ret;
1562 }
1563 
1564 static const struct of_device_id ath10k_snoc_dt_match[] = {
1565 	{ .compatible = "qcom,wcn3990-wifi",
1566 	 .data = &drv_priv,
1567 	},
1568 	{ }
1569 };
1570 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1571 
1572 static int ath10k_snoc_probe(struct platform_device *pdev)
1573 {
1574 	const struct ath10k_snoc_drv_priv *drv_data;
1575 	const struct of_device_id *of_id;
1576 	struct ath10k_snoc *ar_snoc;
1577 	struct device *dev;
1578 	struct ath10k *ar;
1579 	u32 msa_size;
1580 	int ret;
1581 	u32 i;
1582 
1583 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1584 	if (!of_id) {
1585 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1586 		return -EINVAL;
1587 	}
1588 
1589 	drv_data = of_id->data;
1590 	dev = &pdev->dev;
1591 
1592 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1593 	if (ret) {
1594 		dev_err(dev, "failed to set dma mask: %d", ret);
1595 		return ret;
1596 	}
1597 
1598 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1599 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1600 	if (!ar) {
1601 		dev_err(dev, "failed to allocate core\n");
1602 		return -ENOMEM;
1603 	}
1604 
1605 	ar_snoc = ath10k_snoc_priv(ar);
1606 	ar_snoc->dev = pdev;
1607 	platform_set_drvdata(pdev, ar);
1608 	ar_snoc->ar = ar;
1609 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1610 	ar->ce_priv = &ar_snoc->ce;
1611 	msa_size = drv_data->msa_size;
1612 
1613 	ret = ath10k_snoc_resource_init(ar);
1614 	if (ret) {
1615 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1616 		goto err_core_destroy;
1617 	}
1618 
1619 	ret = ath10k_snoc_setup_resource(ar);
1620 	if (ret) {
1621 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1622 		goto err_core_destroy;
1623 	}
1624 	ret = ath10k_snoc_request_irq(ar);
1625 	if (ret) {
1626 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1627 		goto err_release_resource;
1628 	}
1629 
1630 	ar_snoc->vreg = vreg_cfg;
1631 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1632 		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1633 		if (ret)
1634 			goto err_free_irq;
1635 	}
1636 
1637 	ar_snoc->clk = clk_cfg;
1638 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1639 		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1640 		if (ret)
1641 			goto err_free_irq;
1642 	}
1643 
1644 	ret = ath10k_hw_power_on(ar);
1645 	if (ret) {
1646 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1647 		goto err_free_irq;
1648 	}
1649 
1650 	ret = ath10k_qmi_init(ar, msa_size);
1651 	if (ret) {
1652 		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1653 		goto err_core_destroy;
1654 	}
1655 
1656 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1657 
1658 	return 0;
1659 
1660 err_free_irq:
1661 	ath10k_snoc_free_irq(ar);
1662 
1663 err_release_resource:
1664 	ath10k_snoc_release_resource(ar);
1665 
1666 err_core_destroy:
1667 	ath10k_core_destroy(ar);
1668 
1669 	return ret;
1670 }
1671 
1672 static int ath10k_snoc_remove(struct platform_device *pdev)
1673 {
1674 	struct ath10k *ar = platform_get_drvdata(pdev);
1675 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1676 
1677 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1678 
1679 	reinit_completion(&ar->driver_recovery);
1680 
1681 	if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1682 		wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1683 
1684 	set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1685 
1686 	ath10k_core_unregister(ar);
1687 	ath10k_hw_power_off(ar);
1688 	ath10k_snoc_free_irq(ar);
1689 	ath10k_snoc_release_resource(ar);
1690 	ath10k_qmi_deinit(ar);
1691 	ath10k_core_destroy(ar);
1692 
1693 	return 0;
1694 }
1695 
1696 static struct platform_driver ath10k_snoc_driver = {
1697 	.probe  = ath10k_snoc_probe,
1698 	.remove = ath10k_snoc_remove,
1699 	.driver = {
1700 		.name   = "ath10k_snoc",
1701 		.of_match_table = ath10k_snoc_dt_match,
1702 	},
1703 };
1704 module_platform_driver(ath10k_snoc_driver);
1705 
1706 MODULE_AUTHOR("Qualcomm");
1707 MODULE_LICENSE("Dual BSD/GPL");
1708 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1709