xref: /linux/drivers/net/wireless/ath/ath10k/snoc.c (revision bab2c80e5a6c855657482eac9e97f5f3eedb509a)
1 /*
2  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include "debug.h"
20 #include "hif.h"
21 #include "htc.h"
22 #include "ce.h"
23 #include "snoc.h"
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/platform_device.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/clk.h>
29 #define  WCN3990_CE_ATTR_FLAGS 0
30 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
31 #define CE_POLL_PIPE 4
32 
33 static char *const ce_name[] = {
34 	"WLAN_CE_0",
35 	"WLAN_CE_1",
36 	"WLAN_CE_2",
37 	"WLAN_CE_3",
38 	"WLAN_CE_4",
39 	"WLAN_CE_5",
40 	"WLAN_CE_6",
41 	"WLAN_CE_7",
42 	"WLAN_CE_8",
43 	"WLAN_CE_9",
44 	"WLAN_CE_10",
45 	"WLAN_CE_11",
46 };
47 
48 static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
49 	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
50 	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
51 	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
52 	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
53 };
54 
55 static struct ath10k_wcn3990_clk_info clk_cfg[] = {
56 	{NULL, "cxo_ref_clk_pin", 0, false},
57 };
58 
59 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
60 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
64 
65 static const struct ath10k_snoc_drv_priv drv_priv = {
66 	.hw_rev = ATH10K_HW_WCN3990,
67 	.dma_mask = DMA_BIT_MASK(37),
68 };
69 
70 static struct ce_attr host_ce_config_wlan[] = {
71 	/* CE0: host->target HTC control streams */
72 	{
73 		.flags = CE_ATTR_FLAGS,
74 		.src_nentries = 16,
75 		.src_sz_max = 2048,
76 		.dest_nentries = 0,
77 		.send_cb = ath10k_snoc_htc_tx_cb,
78 	},
79 
80 	/* CE1: target->host HTT + HTC control */
81 	{
82 		.flags = CE_ATTR_FLAGS,
83 		.src_nentries = 0,
84 		.src_sz_max = 2048,
85 		.dest_nentries = 512,
86 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
87 	},
88 
89 	/* CE2: target->host WMI */
90 	{
91 		.flags = CE_ATTR_FLAGS,
92 		.src_nentries = 0,
93 		.src_sz_max = 2048,
94 		.dest_nentries = 64,
95 		.recv_cb = ath10k_snoc_htc_rx_cb,
96 	},
97 
98 	/* CE3: host->target WMI */
99 	{
100 		.flags = CE_ATTR_FLAGS,
101 		.src_nentries = 32,
102 		.src_sz_max = 2048,
103 		.dest_nentries = 0,
104 		.send_cb = ath10k_snoc_htc_tx_cb,
105 	},
106 
107 	/* CE4: host->target HTT */
108 	{
109 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
110 		.src_nentries = 256,
111 		.src_sz_max = 256,
112 		.dest_nentries = 0,
113 		.send_cb = ath10k_snoc_htt_tx_cb,
114 	},
115 
116 	/* CE5: target->host HTT (ipa_uc->target ) */
117 	{
118 		.flags = CE_ATTR_FLAGS,
119 		.src_nentries = 0,
120 		.src_sz_max = 512,
121 		.dest_nentries = 512,
122 		.recv_cb = ath10k_snoc_htt_rx_cb,
123 	},
124 
125 	/* CE6: target autonomous hif_memcpy */
126 	{
127 		.flags = CE_ATTR_FLAGS,
128 		.src_nentries = 0,
129 		.src_sz_max = 0,
130 		.dest_nentries = 0,
131 	},
132 
133 	/* CE7: ce_diag, the Diagnostic Window */
134 	{
135 		.flags = CE_ATTR_FLAGS,
136 		.src_nentries = 2,
137 		.src_sz_max = 2048,
138 		.dest_nentries = 2,
139 	},
140 
141 	/* CE8: Target to uMC */
142 	{
143 		.flags = CE_ATTR_FLAGS,
144 		.src_nentries = 0,
145 		.src_sz_max = 2048,
146 		.dest_nentries = 128,
147 	},
148 
149 	/* CE9 target->host HTT */
150 	{
151 		.flags = CE_ATTR_FLAGS,
152 		.src_nentries = 0,
153 		.src_sz_max = 2048,
154 		.dest_nentries = 512,
155 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
156 	},
157 
158 	/* CE10: target->host HTT */
159 	{
160 		.flags = CE_ATTR_FLAGS,
161 		.src_nentries = 0,
162 		.src_sz_max = 2048,
163 		.dest_nentries = 512,
164 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
165 	},
166 
167 	/* CE11: target -> host PKTLOG */
168 	{
169 		.flags = CE_ATTR_FLAGS,
170 		.src_nentries = 0,
171 		.src_sz_max = 2048,
172 		.dest_nentries = 512,
173 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
174 	},
175 };
176 
177 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
178 	{
179 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
180 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
181 		__cpu_to_le32(3),
182 	},
183 	{
184 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
185 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
186 		__cpu_to_le32(2),
187 	},
188 	{
189 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
190 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
191 		__cpu_to_le32(3),
192 	},
193 	{
194 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
195 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
196 		__cpu_to_le32(2),
197 	},
198 	{
199 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
200 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
201 		__cpu_to_le32(3),
202 	},
203 	{
204 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
205 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
206 		__cpu_to_le32(2),
207 	},
208 	{
209 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
210 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
211 		__cpu_to_le32(3),
212 	},
213 	{
214 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
215 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
216 		__cpu_to_le32(2),
217 	},
218 	{
219 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
220 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
221 		__cpu_to_le32(3),
222 	},
223 	{
224 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
225 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
226 		__cpu_to_le32(2),
227 	},
228 	{
229 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
230 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
231 		__cpu_to_le32(0),
232 	},
233 	{
234 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
235 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
236 		__cpu_to_le32(2),
237 	},
238 	{ /* not used */
239 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
240 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
241 		__cpu_to_le32(0),
242 	},
243 	{ /* not used */
244 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
245 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
246 		__cpu_to_le32(2),
247 	},
248 	{
249 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
250 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
251 		__cpu_to_le32(4),
252 	},
253 	{
254 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
255 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
256 		__cpu_to_le32(1),
257 	},
258 	{ /* not used */
259 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
260 		__cpu_to_le32(PIPEDIR_OUT),
261 		__cpu_to_le32(5),
262 	},
263 	{ /* in = DL = target -> host */
264 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
265 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
266 		__cpu_to_le32(9),
267 	},
268 	{ /* in = DL = target -> host */
269 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
270 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
271 		__cpu_to_le32(10),
272 	},
273 	{ /* in = DL = target -> host pktlog */
274 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
275 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
276 		__cpu_to_le32(11),
277 	},
278 	/* (Additions here) */
279 
280 	{ /* must be last */
281 		__cpu_to_le32(0),
282 		__cpu_to_le32(0),
283 		__cpu_to_le32(0),
284 	},
285 };
286 
287 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
288 {
289 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
290 
291 	iowrite32(value, ar_snoc->mem + offset);
292 }
293 
294 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
295 {
296 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
297 	u32 val;
298 
299 	val = ioread32(ar_snoc->mem + offset);
300 
301 	return val;
302 }
303 
304 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
305 {
306 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
307 	struct ath10k *ar = pipe->hif_ce_state;
308 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
309 	struct sk_buff *skb;
310 	dma_addr_t paddr;
311 	int ret;
312 
313 	skb = dev_alloc_skb(pipe->buf_sz);
314 	if (!skb)
315 		return -ENOMEM;
316 
317 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
318 
319 	paddr = dma_map_single(ar->dev, skb->data,
320 			       skb->len + skb_tailroom(skb),
321 			       DMA_FROM_DEVICE);
322 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
323 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
324 		dev_kfree_skb_any(skb);
325 		return -EIO;
326 	}
327 
328 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
329 
330 	spin_lock_bh(&ce->ce_lock);
331 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
332 	spin_unlock_bh(&ce->ce_lock);
333 	if (ret) {
334 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
335 				 DMA_FROM_DEVICE);
336 		dev_kfree_skb_any(skb);
337 		return ret;
338 	}
339 
340 	return 0;
341 }
342 
343 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
344 {
345 	struct ath10k *ar = pipe->hif_ce_state;
346 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
347 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
348 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
349 	int ret, num;
350 
351 	if (pipe->buf_sz == 0)
352 		return;
353 
354 	if (!ce_pipe->dest_ring)
355 		return;
356 
357 	spin_lock_bh(&ce->ce_lock);
358 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
359 	spin_unlock_bh(&ce->ce_lock);
360 	while (num--) {
361 		ret = __ath10k_snoc_rx_post_buf(pipe);
362 		if (ret) {
363 			if (ret == -ENOSPC)
364 				break;
365 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
366 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
367 				  ATH10K_SNOC_RX_POST_RETRY_MS);
368 			break;
369 		}
370 	}
371 }
372 
373 static void ath10k_snoc_rx_post(struct ath10k *ar)
374 {
375 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
376 	int i;
377 
378 	for (i = 0; i < CE_COUNT; i++)
379 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
380 }
381 
382 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
383 				      void (*callback)(struct ath10k *ar,
384 						       struct sk_buff *skb))
385 {
386 	struct ath10k *ar = ce_state->ar;
387 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
388 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
389 	struct sk_buff *skb;
390 	struct sk_buff_head list;
391 	void *transfer_context;
392 	unsigned int nbytes, max_nbytes;
393 
394 	__skb_queue_head_init(&list);
395 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
396 					     &nbytes) == 0) {
397 		skb = transfer_context;
398 		max_nbytes = skb->len + skb_tailroom(skb);
399 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
400 				 max_nbytes, DMA_FROM_DEVICE);
401 
402 		if (unlikely(max_nbytes < nbytes)) {
403 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
404 				    nbytes, max_nbytes);
405 			dev_kfree_skb_any(skb);
406 			continue;
407 		}
408 
409 		skb_put(skb, nbytes);
410 		__skb_queue_tail(&list, skb);
411 	}
412 
413 	while ((skb = __skb_dequeue(&list))) {
414 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
415 			   ce_state->id, skb->len);
416 
417 		callback(ar, skb);
418 	}
419 
420 	ath10k_snoc_rx_post_pipe(pipe_info);
421 }
422 
423 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
424 {
425 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
426 }
427 
428 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
429 {
430 	/* CE4 polling needs to be done whenever CE pipe which transports
431 	 * HTT Rx (target->host) is processed.
432 	 */
433 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
434 
435 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
436 }
437 
438 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
439 {
440 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
441 	ath10k_htt_t2h_msg_handler(ar, skb);
442 }
443 
444 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
445 {
446 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
447 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
448 }
449 
450 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
451 {
452 	struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
453 	struct ath10k *ar = ar_snoc->ar;
454 
455 	ath10k_snoc_rx_post(ar);
456 }
457 
458 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
459 {
460 	struct ath10k *ar = ce_state->ar;
461 	struct sk_buff_head list;
462 	struct sk_buff *skb;
463 
464 	__skb_queue_head_init(&list);
465 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
466 		if (!skb)
467 			continue;
468 
469 		__skb_queue_tail(&list, skb);
470 	}
471 
472 	while ((skb = __skb_dequeue(&list)))
473 		ath10k_htc_tx_completion_handler(ar, skb);
474 }
475 
476 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
477 {
478 	struct ath10k *ar = ce_state->ar;
479 	struct sk_buff *skb;
480 
481 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
482 		if (!skb)
483 			continue;
484 
485 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
486 				 skb->len, DMA_TO_DEVICE);
487 		ath10k_htt_hif_tx_complete(ar, skb);
488 	}
489 }
490 
491 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
492 				 struct ath10k_hif_sg_item *items, int n_items)
493 {
494 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
495 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
496 	struct ath10k_snoc_pipe *snoc_pipe;
497 	struct ath10k_ce_pipe *ce_pipe;
498 	int err, i = 0;
499 
500 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
501 	ce_pipe = snoc_pipe->ce_hdl;
502 	spin_lock_bh(&ce->ce_lock);
503 
504 	for (i = 0; i < n_items - 1; i++) {
505 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
506 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
507 			   i, &items[i].paddr, items[i].len, n_items);
508 
509 		err = ath10k_ce_send_nolock(ce_pipe,
510 					    items[i].transfer_context,
511 					    items[i].paddr,
512 					    items[i].len,
513 					    items[i].transfer_id,
514 					    CE_SEND_FLAG_GATHER);
515 		if (err)
516 			goto err;
517 	}
518 
519 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
520 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
521 		   i, &items[i].paddr, items[i].len, n_items);
522 
523 	err = ath10k_ce_send_nolock(ce_pipe,
524 				    items[i].transfer_context,
525 				    items[i].paddr,
526 				    items[i].len,
527 				    items[i].transfer_id,
528 				    0);
529 	if (err)
530 		goto err;
531 
532 	spin_unlock_bh(&ce->ce_lock);
533 
534 	return 0;
535 
536 err:
537 	for (; i > 0; i--)
538 		__ath10k_ce_send_revert(ce_pipe);
539 
540 	spin_unlock_bh(&ce->ce_lock);
541 	return err;
542 }
543 
544 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
545 					   struct bmi_target_info *target_info)
546 {
547 	target_info->version = ATH10K_HW_WCN3990;
548 	target_info->type = ATH10K_HW_WCN3990;
549 
550 	return 0;
551 }
552 
553 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
554 {
555 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
556 
557 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
558 
559 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
560 }
561 
562 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
563 						int force)
564 {
565 	int resources;
566 
567 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
568 
569 	if (!force) {
570 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
571 
572 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
573 			return;
574 	}
575 	ath10k_ce_per_engine_service(ar, pipe);
576 }
577 
578 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
579 					       u16 service_id,
580 					       u8 *ul_pipe, u8 *dl_pipe)
581 {
582 	const struct service_to_pipe *entry;
583 	bool ul_set = false, dl_set = false;
584 	int i;
585 
586 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
587 
588 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
589 		entry = &target_service_to_ce_map_wlan[i];
590 
591 		if (__le32_to_cpu(entry->service_id) != service_id)
592 			continue;
593 
594 		switch (__le32_to_cpu(entry->pipedir)) {
595 		case PIPEDIR_NONE:
596 			break;
597 		case PIPEDIR_IN:
598 			WARN_ON(dl_set);
599 			*dl_pipe = __le32_to_cpu(entry->pipenum);
600 			dl_set = true;
601 			break;
602 		case PIPEDIR_OUT:
603 			WARN_ON(ul_set);
604 			*ul_pipe = __le32_to_cpu(entry->pipenum);
605 			ul_set = true;
606 			break;
607 		case PIPEDIR_INOUT:
608 			WARN_ON(dl_set);
609 			WARN_ON(ul_set);
610 			*dl_pipe = __le32_to_cpu(entry->pipenum);
611 			*ul_pipe = __le32_to_cpu(entry->pipenum);
612 			dl_set = true;
613 			ul_set = true;
614 			break;
615 		}
616 	}
617 
618 	if (WARN_ON(!ul_set || !dl_set))
619 		return -ENOENT;
620 
621 	return 0;
622 }
623 
624 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
625 					     u8 *ul_pipe, u8 *dl_pipe)
626 {
627 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
628 
629 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
630 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
631 						 ul_pipe, dl_pipe);
632 }
633 
634 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
635 {
636 	ath10k_ce_disable_interrupts(ar);
637 }
638 
639 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
640 {
641 	ath10k_ce_enable_interrupts(ar);
642 }
643 
644 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
645 {
646 	struct ath10k_ce_pipe *ce_pipe;
647 	struct ath10k_ce_ring *ce_ring;
648 	struct sk_buff *skb;
649 	struct ath10k *ar;
650 	int i;
651 
652 	ar = snoc_pipe->hif_ce_state;
653 	ce_pipe = snoc_pipe->ce_hdl;
654 	ce_ring = ce_pipe->dest_ring;
655 
656 	if (!ce_ring)
657 		return;
658 
659 	if (!snoc_pipe->buf_sz)
660 		return;
661 
662 	for (i = 0; i < ce_ring->nentries; i++) {
663 		skb = ce_ring->per_transfer_context[i];
664 		if (!skb)
665 			continue;
666 
667 		ce_ring->per_transfer_context[i] = NULL;
668 
669 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
670 				 skb->len + skb_tailroom(skb),
671 				 DMA_FROM_DEVICE);
672 		dev_kfree_skb_any(skb);
673 	}
674 }
675 
676 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
677 {
678 	struct ath10k_ce_pipe *ce_pipe;
679 	struct ath10k_ce_ring *ce_ring;
680 	struct ath10k_snoc *ar_snoc;
681 	struct sk_buff *skb;
682 	struct ath10k *ar;
683 	int i;
684 
685 	ar = snoc_pipe->hif_ce_state;
686 	ar_snoc = ath10k_snoc_priv(ar);
687 	ce_pipe = snoc_pipe->ce_hdl;
688 	ce_ring = ce_pipe->src_ring;
689 
690 	if (!ce_ring)
691 		return;
692 
693 	if (!snoc_pipe->buf_sz)
694 		return;
695 
696 	for (i = 0; i < ce_ring->nentries; i++) {
697 		skb = ce_ring->per_transfer_context[i];
698 		if (!skb)
699 			continue;
700 
701 		ce_ring->per_transfer_context[i] = NULL;
702 
703 		ath10k_htc_tx_completion_handler(ar, skb);
704 	}
705 }
706 
707 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
708 {
709 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
710 	struct ath10k_snoc_pipe *pipe_info;
711 	int pipe_num;
712 
713 	del_timer_sync(&ar_snoc->rx_post_retry);
714 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
715 		pipe_info = &ar_snoc->pipe_info[pipe_num];
716 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
717 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
718 	}
719 }
720 
721 static void ath10k_snoc_hif_stop(struct ath10k *ar)
722 {
723 	ath10k_snoc_irq_disable(ar);
724 	ath10k_snoc_buffer_cleanup(ar);
725 	napi_synchronize(&ar->napi);
726 	napi_disable(&ar->napi);
727 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
728 }
729 
730 static int ath10k_snoc_hif_start(struct ath10k *ar)
731 {
732 	ath10k_snoc_irq_enable(ar);
733 	ath10k_snoc_rx_post(ar);
734 
735 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
736 
737 	return 0;
738 }
739 
740 static int ath10k_snoc_init_pipes(struct ath10k *ar)
741 {
742 	int i, ret;
743 
744 	for (i = 0; i < CE_COUNT; i++) {
745 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
746 		if (ret) {
747 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
748 				   i, ret);
749 			return ret;
750 		}
751 	}
752 
753 	return 0;
754 }
755 
756 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
757 {
758 	return 0;
759 }
760 
761 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
762 {
763 }
764 
765 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
766 {
767 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
768 
769 	ath10k_snoc_wlan_disable(ar);
770 	ath10k_ce_free_rri(ar);
771 }
772 
773 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
774 {
775 	int ret;
776 
777 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
778 		   __func__, ar->state);
779 
780 	ret = ath10k_snoc_wlan_enable(ar);
781 	if (ret) {
782 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
783 		return ret;
784 	}
785 
786 	ath10k_ce_alloc_rri(ar);
787 
788 	ret = ath10k_snoc_init_pipes(ar);
789 	if (ret) {
790 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
791 		goto err_wlan_enable;
792 	}
793 
794 	napi_enable(&ar->napi);
795 	return 0;
796 
797 err_wlan_enable:
798 	ath10k_snoc_wlan_disable(ar);
799 
800 	return ret;
801 }
802 
803 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
804 	.read32		= ath10k_snoc_read32,
805 	.write32	= ath10k_snoc_write32,
806 	.start		= ath10k_snoc_hif_start,
807 	.stop		= ath10k_snoc_hif_stop,
808 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
809 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
810 	.power_up		= ath10k_snoc_hif_power_up,
811 	.power_down		= ath10k_snoc_hif_power_down,
812 	.tx_sg			= ath10k_snoc_hif_tx_sg,
813 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
814 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
815 	.get_target_info	= ath10k_snoc_hif_get_target_info,
816 };
817 
818 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
819 	.read32		= ath10k_snoc_read32,
820 	.write32	= ath10k_snoc_write32,
821 };
822 
823 int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
824 {
825 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
826 	int i;
827 
828 	for (i = 0; i < CE_COUNT_MAX; i++) {
829 		if (ar_snoc->ce_irqs[i].irq_line == irq)
830 			return i;
831 	}
832 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
833 
834 	return -EINVAL;
835 }
836 
837 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
838 {
839 	struct ath10k *ar = arg;
840 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
841 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
842 
843 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
844 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
845 			    ce_id);
846 		return IRQ_HANDLED;
847 	}
848 
849 	ath10k_snoc_irq_disable(ar);
850 	napi_schedule(&ar->napi);
851 
852 	return IRQ_HANDLED;
853 }
854 
855 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
856 {
857 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
858 	int done = 0;
859 
860 	ath10k_ce_per_engine_service_any(ar);
861 	done = ath10k_htt_txrx_compl_task(ar, budget);
862 
863 	if (done < budget) {
864 		napi_complete(ctx);
865 		ath10k_snoc_irq_enable(ar);
866 	}
867 
868 	return done;
869 }
870 
871 void ath10k_snoc_init_napi(struct ath10k *ar)
872 {
873 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
874 		       ATH10K_NAPI_BUDGET);
875 }
876 
877 static int ath10k_snoc_request_irq(struct ath10k *ar)
878 {
879 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
880 	int irqflags = IRQF_TRIGGER_RISING;
881 	int ret, id;
882 
883 	for (id = 0; id < CE_COUNT_MAX; id++) {
884 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
885 				  ath10k_snoc_per_engine_handler,
886 				  irqflags, ce_name[id], ar);
887 		if (ret) {
888 			ath10k_err(ar,
889 				   "failed to register IRQ handler for CE %d: %d",
890 				   id, ret);
891 			goto err_irq;
892 		}
893 	}
894 
895 	return 0;
896 
897 err_irq:
898 	for (id -= 1; id >= 0; id--)
899 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
900 
901 	return ret;
902 }
903 
904 static void ath10k_snoc_free_irq(struct ath10k *ar)
905 {
906 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
907 	int id;
908 
909 	for (id = 0; id < CE_COUNT_MAX; id++)
910 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
911 }
912 
913 static int ath10k_snoc_resource_init(struct ath10k *ar)
914 {
915 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
916 	struct platform_device *pdev;
917 	struct resource *res;
918 	int i, ret = 0;
919 
920 	pdev = ar_snoc->dev;
921 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
922 	if (!res) {
923 		ath10k_err(ar, "Memory base not found in DT\n");
924 		return -EINVAL;
925 	}
926 
927 	ar_snoc->mem_pa = res->start;
928 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
929 				    resource_size(res));
930 	if (!ar_snoc->mem) {
931 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
932 			   &ar_snoc->mem_pa);
933 		return -EINVAL;
934 	}
935 
936 	for (i = 0; i < CE_COUNT; i++) {
937 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
938 		if (!res) {
939 			ath10k_err(ar, "failed to get IRQ%d\n", i);
940 			ret = -ENODEV;
941 			goto out;
942 		}
943 		ar_snoc->ce_irqs[i].irq_line = res->start;
944 	}
945 
946 out:
947 	return ret;
948 }
949 
950 static int ath10k_snoc_setup_resource(struct ath10k *ar)
951 {
952 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
953 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
954 	struct ath10k_snoc_pipe *pipe;
955 	int i, ret;
956 
957 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
958 	spin_lock_init(&ce->ce_lock);
959 	for (i = 0; i < CE_COUNT; i++) {
960 		pipe = &ar_snoc->pipe_info[i];
961 		pipe->ce_hdl = &ce->ce_states[i];
962 		pipe->pipe_num = i;
963 		pipe->hif_ce_state = ar;
964 
965 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
966 		if (ret) {
967 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
968 				   i, ret);
969 			return ret;
970 		}
971 
972 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
973 	}
974 	ath10k_snoc_init_napi(ar);
975 
976 	return 0;
977 }
978 
979 static void ath10k_snoc_release_resource(struct ath10k *ar)
980 {
981 	int i;
982 
983 	netif_napi_del(&ar->napi);
984 	for (i = 0; i < CE_COUNT; i++)
985 		ath10k_ce_free_pipe(ar, i);
986 }
987 
988 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
989 				struct ath10k_wcn3990_vreg_info *vreg_info)
990 {
991 	struct regulator *reg;
992 	int ret = 0;
993 
994 	reg = devm_regulator_get_optional(dev, vreg_info->name);
995 
996 	if (IS_ERR(reg)) {
997 		ret = PTR_ERR(reg);
998 
999 		if (ret  == -EPROBE_DEFER) {
1000 			ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1001 				   vreg_info->name);
1002 			return ret;
1003 		}
1004 		if (vreg_info->required) {
1005 			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1006 				   vreg_info->name, ret);
1007 			return ret;
1008 		}
1009 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
1010 			   "Optional regulator %s doesn't exist: %d\n",
1011 			   vreg_info->name, ret);
1012 		goto done;
1013 	}
1014 
1015 	vreg_info->reg = reg;
1016 
1017 done:
1018 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
1019 		   "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1020 		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1021 		   vreg_info->load_ua, vreg_info->settle_delay);
1022 
1023 	return 0;
1024 }
1025 
1026 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1027 			       struct ath10k_wcn3990_clk_info *clk_info)
1028 {
1029 	struct clk *handle;
1030 	int ret = 0;
1031 
1032 	handle = devm_clk_get(dev, clk_info->name);
1033 	if (IS_ERR(handle)) {
1034 		ret = PTR_ERR(handle);
1035 		if (clk_info->required) {
1036 			ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1037 				   clk_info->name, ret);
1038 			return ret;
1039 		}
1040 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1041 			   clk_info->name,
1042 			   ret);
1043 		return 0;
1044 	}
1045 
1046 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1047 		   clk_info->name, clk_info->freq);
1048 
1049 	clk_info->handle = handle;
1050 
1051 	return ret;
1052 }
1053 
1054 static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
1055 {
1056 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1057 	struct ath10k_wcn3990_vreg_info *vreg_info;
1058 	int ret = 0;
1059 	int i;
1060 
1061 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1062 		vreg_info = &ar_snoc->vreg[i];
1063 
1064 		if (!vreg_info->reg)
1065 			continue;
1066 
1067 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1068 			   vreg_info->name);
1069 
1070 		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1071 					    vreg_info->max_v);
1072 		if (ret) {
1073 			ath10k_err(ar,
1074 				   "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1075 				   vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1076 			goto err_reg_config;
1077 		}
1078 
1079 		if (vreg_info->load_ua) {
1080 			ret = regulator_set_load(vreg_info->reg,
1081 						 vreg_info->load_ua);
1082 			if (ret < 0) {
1083 				ath10k_err(ar,
1084 					   "failed to set regulator %s load: %d\n",
1085 					   vreg_info->name,
1086 					   vreg_info->load_ua);
1087 				goto err_reg_config;
1088 			}
1089 		}
1090 
1091 		ret = regulator_enable(vreg_info->reg);
1092 		if (ret) {
1093 			ath10k_err(ar, "failed to enable regulator %s\n",
1094 				   vreg_info->name);
1095 			goto err_reg_config;
1096 		}
1097 
1098 		if (vreg_info->settle_delay)
1099 			udelay(vreg_info->settle_delay);
1100 	}
1101 
1102 	return 0;
1103 
1104 err_reg_config:
1105 	for (; i >= 0; i--) {
1106 		vreg_info = &ar_snoc->vreg[i];
1107 
1108 		if (!vreg_info->reg)
1109 			continue;
1110 
1111 		regulator_disable(vreg_info->reg);
1112 		regulator_set_load(vreg_info->reg, 0);
1113 		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1114 	}
1115 
1116 	return ret;
1117 }
1118 
1119 static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
1120 {
1121 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1122 	struct ath10k_wcn3990_vreg_info *vreg_info;
1123 	int ret = 0;
1124 	int i;
1125 
1126 	for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1127 		vreg_info = &ar_snoc->vreg[i];
1128 
1129 		if (!vreg_info->reg)
1130 			continue;
1131 
1132 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1133 			   vreg_info->name);
1134 
1135 		ret = regulator_disable(vreg_info->reg);
1136 		if (ret)
1137 			ath10k_err(ar, "failed to disable regulator %s\n",
1138 				   vreg_info->name);
1139 
1140 		ret = regulator_set_load(vreg_info->reg, 0);
1141 		if (ret < 0)
1142 			ath10k_err(ar, "failed to set load %s\n",
1143 				   vreg_info->name);
1144 
1145 		ret = regulator_set_voltage(vreg_info->reg, 0,
1146 					    vreg_info->max_v);
1147 		if (ret)
1148 			ath10k_err(ar, "failed to set voltage %s\n",
1149 				   vreg_info->name);
1150 	}
1151 
1152 	return ret;
1153 }
1154 
1155 static int ath10k_wcn3990_clk_init(struct ath10k *ar)
1156 {
1157 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1158 	struct ath10k_wcn3990_clk_info *clk_info;
1159 	int ret = 0;
1160 	int i;
1161 
1162 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1163 		clk_info = &ar_snoc->clk[i];
1164 
1165 		if (!clk_info->handle)
1166 			continue;
1167 
1168 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1169 			   clk_info->name);
1170 
1171 		if (clk_info->freq) {
1172 			ret = clk_set_rate(clk_info->handle, clk_info->freq);
1173 
1174 			if (ret) {
1175 				ath10k_err(ar, "failed to set clock %s freq %u\n",
1176 					   clk_info->name, clk_info->freq);
1177 				goto err_clock_config;
1178 			}
1179 		}
1180 
1181 		ret = clk_prepare_enable(clk_info->handle);
1182 		if (ret) {
1183 			ath10k_err(ar, "failed to enable clock %s\n",
1184 				   clk_info->name);
1185 			goto err_clock_config;
1186 		}
1187 	}
1188 
1189 	return 0;
1190 
1191 err_clock_config:
1192 	for (; i >= 0; i--) {
1193 		clk_info = &ar_snoc->clk[i];
1194 
1195 		if (!clk_info->handle)
1196 			continue;
1197 
1198 		clk_disable_unprepare(clk_info->handle);
1199 	}
1200 
1201 	return ret;
1202 }
1203 
1204 static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
1205 {
1206 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1207 	struct ath10k_wcn3990_clk_info *clk_info;
1208 	int i;
1209 
1210 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1211 		clk_info = &ar_snoc->clk[i];
1212 
1213 		if (!clk_info->handle)
1214 			continue;
1215 
1216 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1217 			   clk_info->name);
1218 
1219 		clk_disable_unprepare(clk_info->handle);
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 static int ath10k_hw_power_on(struct ath10k *ar)
1226 {
1227 	int ret;
1228 
1229 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1230 
1231 	ret = ath10k_wcn3990_vreg_on(ar);
1232 	if (ret)
1233 		return ret;
1234 
1235 	ret = ath10k_wcn3990_clk_init(ar);
1236 	if (ret)
1237 		goto vreg_off;
1238 
1239 	return ret;
1240 
1241 vreg_off:
1242 	ath10k_wcn3990_vreg_off(ar);
1243 	return ret;
1244 }
1245 
1246 static int ath10k_hw_power_off(struct ath10k *ar)
1247 {
1248 	int ret;
1249 
1250 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1251 
1252 	ath10k_wcn3990_clk_deinit(ar);
1253 
1254 	ret = ath10k_wcn3990_vreg_off(ar);
1255 
1256 	return ret;
1257 }
1258 
1259 static const struct of_device_id ath10k_snoc_dt_match[] = {
1260 	{ .compatible = "qcom,wcn3990-wifi",
1261 	 .data = &drv_priv,
1262 	},
1263 	{ }
1264 };
1265 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1266 
1267 static int ath10k_snoc_probe(struct platform_device *pdev)
1268 {
1269 	const struct ath10k_snoc_drv_priv *drv_data;
1270 	const struct of_device_id *of_id;
1271 	struct ath10k_snoc *ar_snoc;
1272 	struct device *dev;
1273 	struct ath10k *ar;
1274 	int ret;
1275 	u32 i;
1276 
1277 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1278 	if (!of_id) {
1279 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1280 		return -EINVAL;
1281 	}
1282 
1283 	drv_data = of_id->data;
1284 	dev = &pdev->dev;
1285 
1286 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1287 	if (ret) {
1288 		dev_err(dev, "failed to set dma mask: %d", ret);
1289 		return ret;
1290 	}
1291 
1292 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1293 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1294 	if (!ar) {
1295 		dev_err(dev, "failed to allocate core\n");
1296 		return -ENOMEM;
1297 	}
1298 
1299 	ar_snoc = ath10k_snoc_priv(ar);
1300 	ar_snoc->dev = pdev;
1301 	platform_set_drvdata(pdev, ar);
1302 	ar_snoc->ar = ar;
1303 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1304 	ar->ce_priv = &ar_snoc->ce;
1305 
1306 	ath10k_snoc_resource_init(ar);
1307 	if (ret) {
1308 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1309 		goto err_core_destroy;
1310 	}
1311 
1312 	ath10k_snoc_setup_resource(ar);
1313 	if (ret) {
1314 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1315 		goto err_core_destroy;
1316 	}
1317 	ret = ath10k_snoc_request_irq(ar);
1318 	if (ret) {
1319 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1320 		goto err_release_resource;
1321 	}
1322 
1323 	ar_snoc->vreg = vreg_cfg;
1324 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1325 		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1326 		if (ret)
1327 			goto err_free_irq;
1328 	}
1329 
1330 	ar_snoc->clk = clk_cfg;
1331 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1332 		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1333 		if (ret)
1334 			goto err_free_irq;
1335 	}
1336 
1337 	ret = ath10k_hw_power_on(ar);
1338 	if (ret) {
1339 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1340 		goto err_free_irq;
1341 	}
1342 
1343 	ret = ath10k_core_register(ar, drv_data->hw_rev);
1344 	if (ret) {
1345 		ath10k_err(ar, "failed to register driver core: %d\n", ret);
1346 		goto err_hw_power_off;
1347 	}
1348 
1349 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1350 	ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
1351 
1352 	return 0;
1353 
1354 err_hw_power_off:
1355 	ath10k_hw_power_off(ar);
1356 
1357 err_free_irq:
1358 	ath10k_snoc_free_irq(ar);
1359 
1360 err_release_resource:
1361 	ath10k_snoc_release_resource(ar);
1362 
1363 err_core_destroy:
1364 	ath10k_core_destroy(ar);
1365 
1366 	return ret;
1367 }
1368 
1369 static int ath10k_snoc_remove(struct platform_device *pdev)
1370 {
1371 	struct ath10k *ar = platform_get_drvdata(pdev);
1372 
1373 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1374 	ath10k_core_unregister(ar);
1375 	ath10k_hw_power_off(ar);
1376 	ath10k_snoc_free_irq(ar);
1377 	ath10k_snoc_release_resource(ar);
1378 	ath10k_core_destroy(ar);
1379 
1380 	return 0;
1381 }
1382 
1383 static struct platform_driver ath10k_snoc_driver = {
1384 		.probe  = ath10k_snoc_probe,
1385 		.remove = ath10k_snoc_remove,
1386 		.driver = {
1387 			.name   = "ath10k_snoc",
1388 			.of_match_table = ath10k_snoc_dt_match,
1389 		},
1390 };
1391 
1392 static int __init ath10k_snoc_init(void)
1393 {
1394 	int ret;
1395 
1396 	ret = platform_driver_register(&ath10k_snoc_driver);
1397 	if (ret)
1398 		pr_err("failed to register ath10k snoc driver: %d\n",
1399 		       ret);
1400 
1401 	return ret;
1402 }
1403 module_init(ath10k_snoc_init);
1404 
1405 static void __exit ath10k_snoc_exit(void)
1406 {
1407 	platform_driver_unregister(&ath10k_snoc_driver);
1408 }
1409 module_exit(ath10k_snoc_exit);
1410 
1411 MODULE_AUTHOR("Qualcomm");
1412 MODULE_LICENSE("Dual BSD/GPL");
1413 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1414