xref: /linux/drivers/net/wireless/ath/ath10k/snoc.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/regulator/consumer.h>
24 
25 #include "ce.h"
26 #include "debug.h"
27 #include "hif.h"
28 #include "htc.h"
29 #include "snoc.h"
30 
31 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
32 #define CE_POLL_PIPE 4
33 
34 static char *const ce_name[] = {
35 	"WLAN_CE_0",
36 	"WLAN_CE_1",
37 	"WLAN_CE_2",
38 	"WLAN_CE_3",
39 	"WLAN_CE_4",
40 	"WLAN_CE_5",
41 	"WLAN_CE_6",
42 	"WLAN_CE_7",
43 	"WLAN_CE_8",
44 	"WLAN_CE_9",
45 	"WLAN_CE_10",
46 	"WLAN_CE_11",
47 };
48 
49 static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
50 	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
51 	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
52 	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
53 	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
54 };
55 
56 static struct ath10k_wcn3990_clk_info clk_cfg[] = {
57 	{NULL, "cxo_ref_clk_pin", 0, false},
58 };
59 
60 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
64 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
65 
66 static const struct ath10k_snoc_drv_priv drv_priv = {
67 	.hw_rev = ATH10K_HW_WCN3990,
68 	.dma_mask = DMA_BIT_MASK(37),
69 };
70 
71 static struct ce_attr host_ce_config_wlan[] = {
72 	/* CE0: host->target HTC control streams */
73 	{
74 		.flags = CE_ATTR_FLAGS,
75 		.src_nentries = 16,
76 		.src_sz_max = 2048,
77 		.dest_nentries = 0,
78 		.send_cb = ath10k_snoc_htc_tx_cb,
79 	},
80 
81 	/* CE1: target->host HTT + HTC control */
82 	{
83 		.flags = CE_ATTR_FLAGS,
84 		.src_nentries = 0,
85 		.src_sz_max = 2048,
86 		.dest_nentries = 512,
87 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
88 	},
89 
90 	/* CE2: target->host WMI */
91 	{
92 		.flags = CE_ATTR_FLAGS,
93 		.src_nentries = 0,
94 		.src_sz_max = 2048,
95 		.dest_nentries = 64,
96 		.recv_cb = ath10k_snoc_htc_rx_cb,
97 	},
98 
99 	/* CE3: host->target WMI */
100 	{
101 		.flags = CE_ATTR_FLAGS,
102 		.src_nentries = 32,
103 		.src_sz_max = 2048,
104 		.dest_nentries = 0,
105 		.send_cb = ath10k_snoc_htc_tx_cb,
106 	},
107 
108 	/* CE4: host->target HTT */
109 	{
110 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
111 		.src_nentries = 256,
112 		.src_sz_max = 256,
113 		.dest_nentries = 0,
114 		.send_cb = ath10k_snoc_htt_tx_cb,
115 	},
116 
117 	/* CE5: target->host HTT (ipa_uc->target ) */
118 	{
119 		.flags = CE_ATTR_FLAGS,
120 		.src_nentries = 0,
121 		.src_sz_max = 512,
122 		.dest_nentries = 512,
123 		.recv_cb = ath10k_snoc_htt_rx_cb,
124 	},
125 
126 	/* CE6: target autonomous hif_memcpy */
127 	{
128 		.flags = CE_ATTR_FLAGS,
129 		.src_nentries = 0,
130 		.src_sz_max = 0,
131 		.dest_nentries = 0,
132 	},
133 
134 	/* CE7: ce_diag, the Diagnostic Window */
135 	{
136 		.flags = CE_ATTR_FLAGS,
137 		.src_nentries = 2,
138 		.src_sz_max = 2048,
139 		.dest_nentries = 2,
140 	},
141 
142 	/* CE8: Target to uMC */
143 	{
144 		.flags = CE_ATTR_FLAGS,
145 		.src_nentries = 0,
146 		.src_sz_max = 2048,
147 		.dest_nentries = 128,
148 	},
149 
150 	/* CE9 target->host HTT */
151 	{
152 		.flags = CE_ATTR_FLAGS,
153 		.src_nentries = 0,
154 		.src_sz_max = 2048,
155 		.dest_nentries = 512,
156 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
157 	},
158 
159 	/* CE10: target->host HTT */
160 	{
161 		.flags = CE_ATTR_FLAGS,
162 		.src_nentries = 0,
163 		.src_sz_max = 2048,
164 		.dest_nentries = 512,
165 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
166 	},
167 
168 	/* CE11: target -> host PKTLOG */
169 	{
170 		.flags = CE_ATTR_FLAGS,
171 		.src_nentries = 0,
172 		.src_sz_max = 2048,
173 		.dest_nentries = 512,
174 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
175 	},
176 };
177 
178 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
179 	{
180 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
181 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
182 		__cpu_to_le32(3),
183 	},
184 	{
185 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
186 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
187 		__cpu_to_le32(2),
188 	},
189 	{
190 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
191 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
192 		__cpu_to_le32(3),
193 	},
194 	{
195 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
196 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
197 		__cpu_to_le32(2),
198 	},
199 	{
200 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
201 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
202 		__cpu_to_le32(3),
203 	},
204 	{
205 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
206 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
207 		__cpu_to_le32(2),
208 	},
209 	{
210 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
211 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
212 		__cpu_to_le32(3),
213 	},
214 	{
215 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
216 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
217 		__cpu_to_le32(2),
218 	},
219 	{
220 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
221 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
222 		__cpu_to_le32(3),
223 	},
224 	{
225 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
226 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
227 		__cpu_to_le32(2),
228 	},
229 	{
230 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
231 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
232 		__cpu_to_le32(0),
233 	},
234 	{
235 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
236 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
237 		__cpu_to_le32(2),
238 	},
239 	{ /* not used */
240 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
241 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
242 		__cpu_to_le32(0),
243 	},
244 	{ /* not used */
245 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
246 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
247 		__cpu_to_le32(2),
248 	},
249 	{
250 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
251 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
252 		__cpu_to_le32(4),
253 	},
254 	{
255 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
256 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
257 		__cpu_to_le32(1),
258 	},
259 	{ /* not used */
260 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
261 		__cpu_to_le32(PIPEDIR_OUT),
262 		__cpu_to_le32(5),
263 	},
264 	{ /* in = DL = target -> host */
265 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
266 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
267 		__cpu_to_le32(9),
268 	},
269 	{ /* in = DL = target -> host */
270 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
271 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
272 		__cpu_to_le32(10),
273 	},
274 	{ /* in = DL = target -> host pktlog */
275 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
276 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
277 		__cpu_to_le32(11),
278 	},
279 	/* (Additions here) */
280 
281 	{ /* must be last */
282 		__cpu_to_le32(0),
283 		__cpu_to_le32(0),
284 		__cpu_to_le32(0),
285 	},
286 };
287 
288 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
289 {
290 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
291 
292 	iowrite32(value, ar_snoc->mem + offset);
293 }
294 
295 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
296 {
297 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
298 	u32 val;
299 
300 	val = ioread32(ar_snoc->mem + offset);
301 
302 	return val;
303 }
304 
305 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
306 {
307 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
308 	struct ath10k *ar = pipe->hif_ce_state;
309 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
310 	struct sk_buff *skb;
311 	dma_addr_t paddr;
312 	int ret;
313 
314 	skb = dev_alloc_skb(pipe->buf_sz);
315 	if (!skb)
316 		return -ENOMEM;
317 
318 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
319 
320 	paddr = dma_map_single(ar->dev, skb->data,
321 			       skb->len + skb_tailroom(skb),
322 			       DMA_FROM_DEVICE);
323 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
324 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
325 		dev_kfree_skb_any(skb);
326 		return -EIO;
327 	}
328 
329 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
330 
331 	spin_lock_bh(&ce->ce_lock);
332 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
333 	spin_unlock_bh(&ce->ce_lock);
334 	if (ret) {
335 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
336 				 DMA_FROM_DEVICE);
337 		dev_kfree_skb_any(skb);
338 		return ret;
339 	}
340 
341 	return 0;
342 }
343 
344 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
345 {
346 	struct ath10k *ar = pipe->hif_ce_state;
347 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
348 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
349 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
350 	int ret, num;
351 
352 	if (pipe->buf_sz == 0)
353 		return;
354 
355 	if (!ce_pipe->dest_ring)
356 		return;
357 
358 	spin_lock_bh(&ce->ce_lock);
359 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
360 	spin_unlock_bh(&ce->ce_lock);
361 	while (num--) {
362 		ret = __ath10k_snoc_rx_post_buf(pipe);
363 		if (ret) {
364 			if (ret == -ENOSPC)
365 				break;
366 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
367 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
368 				  ATH10K_SNOC_RX_POST_RETRY_MS);
369 			break;
370 		}
371 	}
372 }
373 
374 static void ath10k_snoc_rx_post(struct ath10k *ar)
375 {
376 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
377 	int i;
378 
379 	for (i = 0; i < CE_COUNT; i++)
380 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
381 }
382 
383 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
384 				      void (*callback)(struct ath10k *ar,
385 						       struct sk_buff *skb))
386 {
387 	struct ath10k *ar = ce_state->ar;
388 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
389 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
390 	struct sk_buff *skb;
391 	struct sk_buff_head list;
392 	void *transfer_context;
393 	unsigned int nbytes, max_nbytes;
394 
395 	__skb_queue_head_init(&list);
396 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
397 					     &nbytes) == 0) {
398 		skb = transfer_context;
399 		max_nbytes = skb->len + skb_tailroom(skb);
400 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
401 				 max_nbytes, DMA_FROM_DEVICE);
402 
403 		if (unlikely(max_nbytes < nbytes)) {
404 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
405 				    nbytes, max_nbytes);
406 			dev_kfree_skb_any(skb);
407 			continue;
408 		}
409 
410 		skb_put(skb, nbytes);
411 		__skb_queue_tail(&list, skb);
412 	}
413 
414 	while ((skb = __skb_dequeue(&list))) {
415 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
416 			   ce_state->id, skb->len);
417 
418 		callback(ar, skb);
419 	}
420 
421 	ath10k_snoc_rx_post_pipe(pipe_info);
422 }
423 
424 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
425 {
426 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
427 }
428 
429 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
430 {
431 	/* CE4 polling needs to be done whenever CE pipe which transports
432 	 * HTT Rx (target->host) is processed.
433 	 */
434 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
435 
436 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
437 }
438 
439 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
440 {
441 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
442 	ath10k_htt_t2h_msg_handler(ar, skb);
443 }
444 
445 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
446 {
447 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
448 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
449 }
450 
451 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
452 {
453 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
454 	struct ath10k *ar = ar_snoc->ar;
455 
456 	ath10k_snoc_rx_post(ar);
457 }
458 
459 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
460 {
461 	struct ath10k *ar = ce_state->ar;
462 	struct sk_buff_head list;
463 	struct sk_buff *skb;
464 
465 	__skb_queue_head_init(&list);
466 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
467 		if (!skb)
468 			continue;
469 
470 		__skb_queue_tail(&list, skb);
471 	}
472 
473 	while ((skb = __skb_dequeue(&list)))
474 		ath10k_htc_tx_completion_handler(ar, skb);
475 }
476 
477 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
478 {
479 	struct ath10k *ar = ce_state->ar;
480 	struct sk_buff *skb;
481 
482 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
483 		if (!skb)
484 			continue;
485 
486 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
487 				 skb->len, DMA_TO_DEVICE);
488 		ath10k_htt_hif_tx_complete(ar, skb);
489 	}
490 }
491 
492 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
493 				 struct ath10k_hif_sg_item *items, int n_items)
494 {
495 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
496 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
497 	struct ath10k_snoc_pipe *snoc_pipe;
498 	struct ath10k_ce_pipe *ce_pipe;
499 	int err, i = 0;
500 
501 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
502 	ce_pipe = snoc_pipe->ce_hdl;
503 	spin_lock_bh(&ce->ce_lock);
504 
505 	for (i = 0; i < n_items - 1; i++) {
506 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
507 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
508 			   i, &items[i].paddr, items[i].len, n_items);
509 
510 		err = ath10k_ce_send_nolock(ce_pipe,
511 					    items[i].transfer_context,
512 					    items[i].paddr,
513 					    items[i].len,
514 					    items[i].transfer_id,
515 					    CE_SEND_FLAG_GATHER);
516 		if (err)
517 			goto err;
518 	}
519 
520 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
521 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
522 		   i, &items[i].paddr, items[i].len, n_items);
523 
524 	err = ath10k_ce_send_nolock(ce_pipe,
525 				    items[i].transfer_context,
526 				    items[i].paddr,
527 				    items[i].len,
528 				    items[i].transfer_id,
529 				    0);
530 	if (err)
531 		goto err;
532 
533 	spin_unlock_bh(&ce->ce_lock);
534 
535 	return 0;
536 
537 err:
538 	for (; i > 0; i--)
539 		__ath10k_ce_send_revert(ce_pipe);
540 
541 	spin_unlock_bh(&ce->ce_lock);
542 	return err;
543 }
544 
545 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
546 					   struct bmi_target_info *target_info)
547 {
548 	target_info->version = ATH10K_HW_WCN3990;
549 	target_info->type = ATH10K_HW_WCN3990;
550 
551 	return 0;
552 }
553 
554 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
555 {
556 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
557 
558 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
559 
560 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
561 }
562 
563 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
564 						int force)
565 {
566 	int resources;
567 
568 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
569 
570 	if (!force) {
571 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
572 
573 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
574 			return;
575 	}
576 	ath10k_ce_per_engine_service(ar, pipe);
577 }
578 
579 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
580 					       u16 service_id,
581 					       u8 *ul_pipe, u8 *dl_pipe)
582 {
583 	const struct service_to_pipe *entry;
584 	bool ul_set = false, dl_set = false;
585 	int i;
586 
587 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
588 
589 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
590 		entry = &target_service_to_ce_map_wlan[i];
591 
592 		if (__le32_to_cpu(entry->service_id) != service_id)
593 			continue;
594 
595 		switch (__le32_to_cpu(entry->pipedir)) {
596 		case PIPEDIR_NONE:
597 			break;
598 		case PIPEDIR_IN:
599 			WARN_ON(dl_set);
600 			*dl_pipe = __le32_to_cpu(entry->pipenum);
601 			dl_set = true;
602 			break;
603 		case PIPEDIR_OUT:
604 			WARN_ON(ul_set);
605 			*ul_pipe = __le32_to_cpu(entry->pipenum);
606 			ul_set = true;
607 			break;
608 		case PIPEDIR_INOUT:
609 			WARN_ON(dl_set);
610 			WARN_ON(ul_set);
611 			*dl_pipe = __le32_to_cpu(entry->pipenum);
612 			*ul_pipe = __le32_to_cpu(entry->pipenum);
613 			dl_set = true;
614 			ul_set = true;
615 			break;
616 		}
617 	}
618 
619 	if (WARN_ON(!ul_set || !dl_set))
620 		return -ENOENT;
621 
622 	return 0;
623 }
624 
625 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
626 					     u8 *ul_pipe, u8 *dl_pipe)
627 {
628 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
629 
630 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
631 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
632 						 ul_pipe, dl_pipe);
633 }
634 
635 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
636 {
637 	ath10k_ce_disable_interrupts(ar);
638 }
639 
640 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
641 {
642 	ath10k_ce_enable_interrupts(ar);
643 }
644 
645 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
646 {
647 	struct ath10k_ce_pipe *ce_pipe;
648 	struct ath10k_ce_ring *ce_ring;
649 	struct sk_buff *skb;
650 	struct ath10k *ar;
651 	int i;
652 
653 	ar = snoc_pipe->hif_ce_state;
654 	ce_pipe = snoc_pipe->ce_hdl;
655 	ce_ring = ce_pipe->dest_ring;
656 
657 	if (!ce_ring)
658 		return;
659 
660 	if (!snoc_pipe->buf_sz)
661 		return;
662 
663 	for (i = 0; i < ce_ring->nentries; i++) {
664 		skb = ce_ring->per_transfer_context[i];
665 		if (!skb)
666 			continue;
667 
668 		ce_ring->per_transfer_context[i] = NULL;
669 
670 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
671 				 skb->len + skb_tailroom(skb),
672 				 DMA_FROM_DEVICE);
673 		dev_kfree_skb_any(skb);
674 	}
675 }
676 
677 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
678 {
679 	struct ath10k_ce_pipe *ce_pipe;
680 	struct ath10k_ce_ring *ce_ring;
681 	struct ath10k_snoc *ar_snoc;
682 	struct sk_buff *skb;
683 	struct ath10k *ar;
684 	int i;
685 
686 	ar = snoc_pipe->hif_ce_state;
687 	ar_snoc = ath10k_snoc_priv(ar);
688 	ce_pipe = snoc_pipe->ce_hdl;
689 	ce_ring = ce_pipe->src_ring;
690 
691 	if (!ce_ring)
692 		return;
693 
694 	if (!snoc_pipe->buf_sz)
695 		return;
696 
697 	for (i = 0; i < ce_ring->nentries; i++) {
698 		skb = ce_ring->per_transfer_context[i];
699 		if (!skb)
700 			continue;
701 
702 		ce_ring->per_transfer_context[i] = NULL;
703 
704 		ath10k_htc_tx_completion_handler(ar, skb);
705 	}
706 }
707 
708 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
709 {
710 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
711 	struct ath10k_snoc_pipe *pipe_info;
712 	int pipe_num;
713 
714 	del_timer_sync(&ar_snoc->rx_post_retry);
715 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
716 		pipe_info = &ar_snoc->pipe_info[pipe_num];
717 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
718 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
719 	}
720 }
721 
722 static void ath10k_snoc_hif_stop(struct ath10k *ar)
723 {
724 	ath10k_snoc_irq_disable(ar);
725 	ath10k_snoc_buffer_cleanup(ar);
726 	napi_synchronize(&ar->napi);
727 	napi_disable(&ar->napi);
728 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
729 }
730 
731 static int ath10k_snoc_hif_start(struct ath10k *ar)
732 {
733 	ath10k_snoc_irq_enable(ar);
734 	ath10k_snoc_rx_post(ar);
735 
736 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
737 
738 	return 0;
739 }
740 
741 static int ath10k_snoc_init_pipes(struct ath10k *ar)
742 {
743 	int i, ret;
744 
745 	for (i = 0; i < CE_COUNT; i++) {
746 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
747 		if (ret) {
748 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
749 				   i, ret);
750 			return ret;
751 		}
752 	}
753 
754 	return 0;
755 }
756 
757 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
758 {
759 	return 0;
760 }
761 
762 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
763 {
764 }
765 
766 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
767 {
768 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
769 
770 	ath10k_snoc_wlan_disable(ar);
771 	ath10k_ce_free_rri(ar);
772 }
773 
774 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
775 {
776 	int ret;
777 
778 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
779 		   __func__, ar->state);
780 
781 	ret = ath10k_snoc_wlan_enable(ar);
782 	if (ret) {
783 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
784 		return ret;
785 	}
786 
787 	ath10k_ce_alloc_rri(ar);
788 
789 	ret = ath10k_snoc_init_pipes(ar);
790 	if (ret) {
791 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
792 		goto err_wlan_enable;
793 	}
794 
795 	napi_enable(&ar->napi);
796 	return 0;
797 
798 err_wlan_enable:
799 	ath10k_snoc_wlan_disable(ar);
800 
801 	return ret;
802 }
803 
804 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
805 	.read32		= ath10k_snoc_read32,
806 	.write32	= ath10k_snoc_write32,
807 	.start		= ath10k_snoc_hif_start,
808 	.stop		= ath10k_snoc_hif_stop,
809 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
810 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
811 	.power_up		= ath10k_snoc_hif_power_up,
812 	.power_down		= ath10k_snoc_hif_power_down,
813 	.tx_sg			= ath10k_snoc_hif_tx_sg,
814 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
815 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
816 	.get_target_info	= ath10k_snoc_hif_get_target_info,
817 };
818 
819 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
820 	.read32		= ath10k_snoc_read32,
821 	.write32	= ath10k_snoc_write32,
822 };
823 
824 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
825 {
826 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
827 	int i;
828 
829 	for (i = 0; i < CE_COUNT_MAX; i++) {
830 		if (ar_snoc->ce_irqs[i].irq_line == irq)
831 			return i;
832 	}
833 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
834 
835 	return -EINVAL;
836 }
837 
838 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
839 {
840 	struct ath10k *ar = arg;
841 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
842 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
843 
844 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
845 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
846 			    ce_id);
847 		return IRQ_HANDLED;
848 	}
849 
850 	ath10k_snoc_irq_disable(ar);
851 	napi_schedule(&ar->napi);
852 
853 	return IRQ_HANDLED;
854 }
855 
856 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
857 {
858 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
859 	int done = 0;
860 
861 	ath10k_ce_per_engine_service_any(ar);
862 	done = ath10k_htt_txrx_compl_task(ar, budget);
863 
864 	if (done < budget) {
865 		napi_complete(ctx);
866 		ath10k_snoc_irq_enable(ar);
867 	}
868 
869 	return done;
870 }
871 
872 static void ath10k_snoc_init_napi(struct ath10k *ar)
873 {
874 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
875 		       ATH10K_NAPI_BUDGET);
876 }
877 
878 static int ath10k_snoc_request_irq(struct ath10k *ar)
879 {
880 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
881 	int irqflags = IRQF_TRIGGER_RISING;
882 	int ret, id;
883 
884 	for (id = 0; id < CE_COUNT_MAX; id++) {
885 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
886 				  ath10k_snoc_per_engine_handler,
887 				  irqflags, ce_name[id], ar);
888 		if (ret) {
889 			ath10k_err(ar,
890 				   "failed to register IRQ handler for CE %d: %d",
891 				   id, ret);
892 			goto err_irq;
893 		}
894 	}
895 
896 	return 0;
897 
898 err_irq:
899 	for (id -= 1; id >= 0; id--)
900 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
901 
902 	return ret;
903 }
904 
905 static void ath10k_snoc_free_irq(struct ath10k *ar)
906 {
907 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
908 	int id;
909 
910 	for (id = 0; id < CE_COUNT_MAX; id++)
911 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
912 }
913 
914 static int ath10k_snoc_resource_init(struct ath10k *ar)
915 {
916 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
917 	struct platform_device *pdev;
918 	struct resource *res;
919 	int i, ret = 0;
920 
921 	pdev = ar_snoc->dev;
922 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
923 	if (!res) {
924 		ath10k_err(ar, "Memory base not found in DT\n");
925 		return -EINVAL;
926 	}
927 
928 	ar_snoc->mem_pa = res->start;
929 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
930 				    resource_size(res));
931 	if (!ar_snoc->mem) {
932 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
933 			   &ar_snoc->mem_pa);
934 		return -EINVAL;
935 	}
936 
937 	for (i = 0; i < CE_COUNT; i++) {
938 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
939 		if (!res) {
940 			ath10k_err(ar, "failed to get IRQ%d\n", i);
941 			ret = -ENODEV;
942 			goto out;
943 		}
944 		ar_snoc->ce_irqs[i].irq_line = res->start;
945 	}
946 
947 out:
948 	return ret;
949 }
950 
951 static int ath10k_snoc_setup_resource(struct ath10k *ar)
952 {
953 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
954 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
955 	struct ath10k_snoc_pipe *pipe;
956 	int i, ret;
957 
958 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
959 	spin_lock_init(&ce->ce_lock);
960 	for (i = 0; i < CE_COUNT; i++) {
961 		pipe = &ar_snoc->pipe_info[i];
962 		pipe->ce_hdl = &ce->ce_states[i];
963 		pipe->pipe_num = i;
964 		pipe->hif_ce_state = ar;
965 
966 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
967 		if (ret) {
968 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
969 				   i, ret);
970 			return ret;
971 		}
972 
973 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
974 	}
975 	ath10k_snoc_init_napi(ar);
976 
977 	return 0;
978 }
979 
980 static void ath10k_snoc_release_resource(struct ath10k *ar)
981 {
982 	int i;
983 
984 	netif_napi_del(&ar->napi);
985 	for (i = 0; i < CE_COUNT; i++)
986 		ath10k_ce_free_pipe(ar, i);
987 }
988 
989 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
990 				struct ath10k_wcn3990_vreg_info *vreg_info)
991 {
992 	struct regulator *reg;
993 	int ret = 0;
994 
995 	reg = devm_regulator_get_optional(dev, vreg_info->name);
996 
997 	if (IS_ERR(reg)) {
998 		ret = PTR_ERR(reg);
999 
1000 		if (ret  == -EPROBE_DEFER) {
1001 			ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1002 				   vreg_info->name);
1003 			return ret;
1004 		}
1005 		if (vreg_info->required) {
1006 			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1007 				   vreg_info->name, ret);
1008 			return ret;
1009 		}
1010 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
1011 			   "Optional regulator %s doesn't exist: %d\n",
1012 			   vreg_info->name, ret);
1013 		goto done;
1014 	}
1015 
1016 	vreg_info->reg = reg;
1017 
1018 done:
1019 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
1020 		   "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1021 		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1022 		   vreg_info->load_ua, vreg_info->settle_delay);
1023 
1024 	return 0;
1025 }
1026 
1027 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1028 			       struct ath10k_wcn3990_clk_info *clk_info)
1029 {
1030 	struct clk *handle;
1031 	int ret = 0;
1032 
1033 	handle = devm_clk_get(dev, clk_info->name);
1034 	if (IS_ERR(handle)) {
1035 		ret = PTR_ERR(handle);
1036 		if (clk_info->required) {
1037 			ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1038 				   clk_info->name, ret);
1039 			return ret;
1040 		}
1041 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1042 			   clk_info->name,
1043 			   ret);
1044 		return 0;
1045 	}
1046 
1047 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1048 		   clk_info->name, clk_info->freq);
1049 
1050 	clk_info->handle = handle;
1051 
1052 	return ret;
1053 }
1054 
1055 static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
1056 {
1057 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1058 	struct ath10k_wcn3990_vreg_info *vreg_info;
1059 	int ret = 0;
1060 	int i;
1061 
1062 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1063 		vreg_info = &ar_snoc->vreg[i];
1064 
1065 		if (!vreg_info->reg)
1066 			continue;
1067 
1068 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1069 			   vreg_info->name);
1070 
1071 		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1072 					    vreg_info->max_v);
1073 		if (ret) {
1074 			ath10k_err(ar,
1075 				   "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1076 				   vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1077 			goto err_reg_config;
1078 		}
1079 
1080 		if (vreg_info->load_ua) {
1081 			ret = regulator_set_load(vreg_info->reg,
1082 						 vreg_info->load_ua);
1083 			if (ret < 0) {
1084 				ath10k_err(ar,
1085 					   "failed to set regulator %s load: %d\n",
1086 					   vreg_info->name,
1087 					   vreg_info->load_ua);
1088 				goto err_reg_config;
1089 			}
1090 		}
1091 
1092 		ret = regulator_enable(vreg_info->reg);
1093 		if (ret) {
1094 			ath10k_err(ar, "failed to enable regulator %s\n",
1095 				   vreg_info->name);
1096 			goto err_reg_config;
1097 		}
1098 
1099 		if (vreg_info->settle_delay)
1100 			udelay(vreg_info->settle_delay);
1101 	}
1102 
1103 	return 0;
1104 
1105 err_reg_config:
1106 	for (; i >= 0; i--) {
1107 		vreg_info = &ar_snoc->vreg[i];
1108 
1109 		if (!vreg_info->reg)
1110 			continue;
1111 
1112 		regulator_disable(vreg_info->reg);
1113 		regulator_set_load(vreg_info->reg, 0);
1114 		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1115 	}
1116 
1117 	return ret;
1118 }
1119 
1120 static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
1121 {
1122 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1123 	struct ath10k_wcn3990_vreg_info *vreg_info;
1124 	int ret = 0;
1125 	int i;
1126 
1127 	for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1128 		vreg_info = &ar_snoc->vreg[i];
1129 
1130 		if (!vreg_info->reg)
1131 			continue;
1132 
1133 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1134 			   vreg_info->name);
1135 
1136 		ret = regulator_disable(vreg_info->reg);
1137 		if (ret)
1138 			ath10k_err(ar, "failed to disable regulator %s\n",
1139 				   vreg_info->name);
1140 
1141 		ret = regulator_set_load(vreg_info->reg, 0);
1142 		if (ret < 0)
1143 			ath10k_err(ar, "failed to set load %s\n",
1144 				   vreg_info->name);
1145 
1146 		ret = regulator_set_voltage(vreg_info->reg, 0,
1147 					    vreg_info->max_v);
1148 		if (ret)
1149 			ath10k_err(ar, "failed to set voltage %s\n",
1150 				   vreg_info->name);
1151 	}
1152 
1153 	return ret;
1154 }
1155 
1156 static int ath10k_wcn3990_clk_init(struct ath10k *ar)
1157 {
1158 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1159 	struct ath10k_wcn3990_clk_info *clk_info;
1160 	int ret = 0;
1161 	int i;
1162 
1163 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1164 		clk_info = &ar_snoc->clk[i];
1165 
1166 		if (!clk_info->handle)
1167 			continue;
1168 
1169 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1170 			   clk_info->name);
1171 
1172 		if (clk_info->freq) {
1173 			ret = clk_set_rate(clk_info->handle, clk_info->freq);
1174 
1175 			if (ret) {
1176 				ath10k_err(ar, "failed to set clock %s freq %u\n",
1177 					   clk_info->name, clk_info->freq);
1178 				goto err_clock_config;
1179 			}
1180 		}
1181 
1182 		ret = clk_prepare_enable(clk_info->handle);
1183 		if (ret) {
1184 			ath10k_err(ar, "failed to enable clock %s\n",
1185 				   clk_info->name);
1186 			goto err_clock_config;
1187 		}
1188 	}
1189 
1190 	return 0;
1191 
1192 err_clock_config:
1193 	for (; i >= 0; i--) {
1194 		clk_info = &ar_snoc->clk[i];
1195 
1196 		if (!clk_info->handle)
1197 			continue;
1198 
1199 		clk_disable_unprepare(clk_info->handle);
1200 	}
1201 
1202 	return ret;
1203 }
1204 
1205 static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
1206 {
1207 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1208 	struct ath10k_wcn3990_clk_info *clk_info;
1209 	int i;
1210 
1211 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1212 		clk_info = &ar_snoc->clk[i];
1213 
1214 		if (!clk_info->handle)
1215 			continue;
1216 
1217 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1218 			   clk_info->name);
1219 
1220 		clk_disable_unprepare(clk_info->handle);
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int ath10k_hw_power_on(struct ath10k *ar)
1227 {
1228 	int ret;
1229 
1230 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1231 
1232 	ret = ath10k_wcn3990_vreg_on(ar);
1233 	if (ret)
1234 		return ret;
1235 
1236 	ret = ath10k_wcn3990_clk_init(ar);
1237 	if (ret)
1238 		goto vreg_off;
1239 
1240 	return ret;
1241 
1242 vreg_off:
1243 	ath10k_wcn3990_vreg_off(ar);
1244 	return ret;
1245 }
1246 
1247 static int ath10k_hw_power_off(struct ath10k *ar)
1248 {
1249 	int ret;
1250 
1251 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1252 
1253 	ath10k_wcn3990_clk_deinit(ar);
1254 
1255 	ret = ath10k_wcn3990_vreg_off(ar);
1256 
1257 	return ret;
1258 }
1259 
1260 static const struct of_device_id ath10k_snoc_dt_match[] = {
1261 	{ .compatible = "qcom,wcn3990-wifi",
1262 	 .data = &drv_priv,
1263 	},
1264 	{ }
1265 };
1266 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1267 
1268 static int ath10k_snoc_probe(struct platform_device *pdev)
1269 {
1270 	const struct ath10k_snoc_drv_priv *drv_data;
1271 	const struct of_device_id *of_id;
1272 	struct ath10k_snoc *ar_snoc;
1273 	struct device *dev;
1274 	struct ath10k *ar;
1275 	int ret;
1276 	u32 i;
1277 
1278 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1279 	if (!of_id) {
1280 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1281 		return -EINVAL;
1282 	}
1283 
1284 	drv_data = of_id->data;
1285 	dev = &pdev->dev;
1286 
1287 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1288 	if (ret) {
1289 		dev_err(dev, "failed to set dma mask: %d", ret);
1290 		return ret;
1291 	}
1292 
1293 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1294 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1295 	if (!ar) {
1296 		dev_err(dev, "failed to allocate core\n");
1297 		return -ENOMEM;
1298 	}
1299 
1300 	ar_snoc = ath10k_snoc_priv(ar);
1301 	ar_snoc->dev = pdev;
1302 	platform_set_drvdata(pdev, ar);
1303 	ar_snoc->ar = ar;
1304 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1305 	ar->ce_priv = &ar_snoc->ce;
1306 
1307 	ret = ath10k_snoc_resource_init(ar);
1308 	if (ret) {
1309 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1310 		goto err_core_destroy;
1311 	}
1312 
1313 	ret = ath10k_snoc_setup_resource(ar);
1314 	if (ret) {
1315 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1316 		goto err_core_destroy;
1317 	}
1318 	ret = ath10k_snoc_request_irq(ar);
1319 	if (ret) {
1320 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1321 		goto err_release_resource;
1322 	}
1323 
1324 	ar_snoc->vreg = vreg_cfg;
1325 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1326 		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1327 		if (ret)
1328 			goto err_free_irq;
1329 	}
1330 
1331 	ar_snoc->clk = clk_cfg;
1332 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1333 		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1334 		if (ret)
1335 			goto err_free_irq;
1336 	}
1337 
1338 	ret = ath10k_hw_power_on(ar);
1339 	if (ret) {
1340 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1341 		goto err_free_irq;
1342 	}
1343 
1344 	ret = ath10k_core_register(ar, drv_data->hw_rev);
1345 	if (ret) {
1346 		ath10k_err(ar, "failed to register driver core: %d\n", ret);
1347 		goto err_hw_power_off;
1348 	}
1349 
1350 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1351 	ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
1352 
1353 	return 0;
1354 
1355 err_hw_power_off:
1356 	ath10k_hw_power_off(ar);
1357 
1358 err_free_irq:
1359 	ath10k_snoc_free_irq(ar);
1360 
1361 err_release_resource:
1362 	ath10k_snoc_release_resource(ar);
1363 
1364 err_core_destroy:
1365 	ath10k_core_destroy(ar);
1366 
1367 	return ret;
1368 }
1369 
1370 static int ath10k_snoc_remove(struct platform_device *pdev)
1371 {
1372 	struct ath10k *ar = platform_get_drvdata(pdev);
1373 
1374 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1375 	ath10k_core_unregister(ar);
1376 	ath10k_hw_power_off(ar);
1377 	ath10k_snoc_free_irq(ar);
1378 	ath10k_snoc_release_resource(ar);
1379 	ath10k_core_destroy(ar);
1380 
1381 	return 0;
1382 }
1383 
1384 static struct platform_driver ath10k_snoc_driver = {
1385 		.probe  = ath10k_snoc_probe,
1386 		.remove = ath10k_snoc_remove,
1387 		.driver = {
1388 			.name   = "ath10k_snoc",
1389 			.of_match_table = ath10k_snoc_dt_match,
1390 		},
1391 };
1392 module_platform_driver(ath10k_snoc_driver);
1393 
1394 MODULE_AUTHOR("Qualcomm");
1395 MODULE_LICENSE("Dual BSD/GPL");
1396 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1397