1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 */
6
7 #include "core.h"
8 #include "hif.h"
9 #include "debug.h"
10 #if defined(__FreeBSD__)
11 #include <linux/delay.h>
12 #endif
13
14 /********/
15 /* Send */
16 /********/
17
ath10k_htc_control_tx_complete(struct ath10k * ar,struct sk_buff * skb)18 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
19 struct sk_buff *skb)
20 {
21 kfree_skb(skb);
22 }
23
ath10k_htc_build_tx_ctrl_skb(void * ar)24 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
25 {
26 struct sk_buff *skb;
27 struct ath10k_skb_cb *skb_cb;
28
29 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
30 if (!skb)
31 return NULL;
32
33 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
34 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
35
36 skb_cb = ATH10K_SKB_CB(skb);
37 memset(skb_cb, 0, sizeof(*skb_cb));
38
39 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
40 return skb;
41 }
42
ath10k_htc_restore_tx_skb(struct ath10k_htc * htc,struct sk_buff * skb)43 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
44 struct sk_buff *skb)
45 {
46 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
47
48 if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
49 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
50 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
51 }
52
ath10k_htc_notify_tx_completion(struct ath10k_htc_ep * ep,struct sk_buff * skb)53 void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
54 struct sk_buff *skb)
55 {
56 struct ath10k *ar = ep->htc->ar;
57 struct ath10k_htc_hdr *hdr;
58
59 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
60 ep->eid, skb);
61
62 /* A corner case where the copy completion is reaching to host but still
63 * copy engine is processing it due to which host unmaps corresponding
64 * memory and causes SMMU fault, hence as workaround adding delay
65 * the unmapping memory to avoid SMMU faults.
66 */
67 if (ar->hw_params.delay_unmap_buffer &&
68 ep->ul_pipe_id == 3)
69 mdelay(2);
70
71 hdr = (struct ath10k_htc_hdr *)skb->data;
72 ath10k_htc_restore_tx_skb(ep->htc, skb);
73
74 if (!ep->ep_ops.ep_tx_complete) {
75 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
76 dev_kfree_skb_any(skb);
77 return;
78 }
79
80 if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
81 dev_kfree_skb_any(skb);
82 return;
83 }
84
85 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
86 }
87 EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
88
ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)89 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
90 struct sk_buff *skb)
91 {
92 struct ath10k_htc_hdr *hdr;
93
94 hdr = (struct ath10k_htc_hdr *)skb->data;
95 memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
96
97 hdr->eid = ep->eid;
98 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
99 hdr->flags = 0;
100 if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
101 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
102
103 spin_lock_bh(&ep->htc->tx_lock);
104 hdr->seq_no = ep->seq_no++;
105 spin_unlock_bh(&ep->htc->tx_lock);
106 }
107
ath10k_htc_consume_credit(struct ath10k_htc_ep * ep,unsigned int len,bool consume)108 static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
109 unsigned int len,
110 bool consume)
111 {
112 struct ath10k_htc *htc = ep->htc;
113 struct ath10k *ar = htc->ar;
114 enum ath10k_htc_ep_id eid = ep->eid;
115 int credits, ret = 0;
116
117 if (!ep->tx_credit_flow_enabled)
118 return 0;
119
120 credits = DIV_ROUND_UP(len, ep->tx_credit_size);
121 spin_lock_bh(&htc->tx_lock);
122
123 if (ep->tx_credits < credits) {
124 ath10k_dbg(ar, ATH10K_DBG_HTC,
125 "htc insufficient credits ep %d required %d available %d consume %d\n",
126 eid, credits, ep->tx_credits, consume);
127 ret = -EAGAIN;
128 goto unlock;
129 }
130
131 if (consume) {
132 ep->tx_credits -= credits;
133 ath10k_dbg(ar, ATH10K_DBG_HTC,
134 "htc ep %d consumed %d credits total %d\n",
135 eid, credits, ep->tx_credits);
136 }
137
138 unlock:
139 spin_unlock_bh(&htc->tx_lock);
140 return ret;
141 }
142
ath10k_htc_release_credit(struct ath10k_htc_ep * ep,unsigned int len)143 static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
144 {
145 struct ath10k_htc *htc = ep->htc;
146 struct ath10k *ar = htc->ar;
147 enum ath10k_htc_ep_id eid = ep->eid;
148 int credits;
149
150 if (!ep->tx_credit_flow_enabled)
151 return;
152
153 credits = DIV_ROUND_UP(len, ep->tx_credit_size);
154 spin_lock_bh(&htc->tx_lock);
155 ep->tx_credits += credits;
156 ath10k_dbg(ar, ATH10K_DBG_HTC,
157 "htc ep %d reverted %d credits back total %d\n",
158 eid, credits, ep->tx_credits);
159 spin_unlock_bh(&htc->tx_lock);
160
161 if (ep->ep_ops.ep_tx_credits)
162 ep->ep_ops.ep_tx_credits(htc->ar);
163 }
164
ath10k_htc_send(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)165 int ath10k_htc_send(struct ath10k_htc *htc,
166 enum ath10k_htc_ep_id eid,
167 struct sk_buff *skb)
168 {
169 struct ath10k *ar = htc->ar;
170 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
171 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
172 struct ath10k_hif_sg_item sg_item;
173 struct device *dev = htc->ar->dev;
174 int ret;
175 unsigned int skb_len;
176
177 if (htc->ar->state == ATH10K_STATE_WEDGED)
178 return -ECOMM;
179
180 if (eid >= ATH10K_HTC_EP_COUNT) {
181 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
182 return -ENOENT;
183 }
184
185 skb_push(skb, sizeof(struct ath10k_htc_hdr));
186
187 skb_len = skb->len;
188 ret = ath10k_htc_consume_credit(ep, skb_len, true);
189 if (ret)
190 goto err_pull;
191
192 ath10k_htc_prepare_tx_skb(ep, skb);
193
194 skb_cb->eid = eid;
195 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
196 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
197 DMA_TO_DEVICE);
198 ret = dma_mapping_error(dev, skb_cb->paddr);
199 if (ret) {
200 ret = -EIO;
201 goto err_credits;
202 }
203 }
204
205 sg_item.transfer_id = ep->eid;
206 sg_item.transfer_context = skb;
207 sg_item.vaddr = skb->data;
208 sg_item.paddr = skb_cb->paddr;
209 sg_item.len = skb->len;
210
211 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
212 if (ret)
213 goto err_unmap;
214
215 return 0;
216
217 err_unmap:
218 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
219 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
220 err_credits:
221 ath10k_htc_release_credit(ep, skb_len);
222 err_pull:
223 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
224 return ret;
225 }
226
ath10k_htc_tx_completion_handler(struct ath10k * ar,struct sk_buff * skb)227 void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
228 {
229 struct ath10k_htc *htc = &ar->htc;
230 struct ath10k_skb_cb *skb_cb;
231 struct ath10k_htc_ep *ep;
232
233 if (WARN_ON_ONCE(!skb))
234 return;
235
236 skb_cb = ATH10K_SKB_CB(skb);
237 ep = &htc->endpoint[skb_cb->eid];
238
239 ath10k_htc_notify_tx_completion(ep, skb);
240 /* the skb now belongs to the completion handler */
241 }
242 EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
243
244 /***********/
245 /* Receive */
246 /***********/
247
248 static void
ath10k_htc_process_credit_report(struct ath10k_htc * htc,const struct ath10k_htc_credit_report * report,int len,enum ath10k_htc_ep_id eid)249 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
250 const struct ath10k_htc_credit_report *report,
251 int len,
252 enum ath10k_htc_ep_id eid)
253 {
254 struct ath10k *ar = htc->ar;
255 struct ath10k_htc_ep *ep;
256 int i, n_reports;
257
258 if (len % sizeof(*report))
259 ath10k_warn(ar, "Uneven credit report len %d", len);
260
261 n_reports = len / sizeof(*report);
262
263 spin_lock_bh(&htc->tx_lock);
264 for (i = 0; i < n_reports; i++, report++) {
265 if (report->eid >= ATH10K_HTC_EP_COUNT)
266 break;
267
268 ep = &htc->endpoint[report->eid];
269 ep->tx_credits += report->credits;
270
271 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
272 report->eid, report->credits, ep->tx_credits);
273
274 if (ep->ep_ops.ep_tx_credits) {
275 spin_unlock_bh(&htc->tx_lock);
276 ep->ep_ops.ep_tx_credits(htc->ar);
277 spin_lock_bh(&htc->tx_lock);
278 }
279 }
280 spin_unlock_bh(&htc->tx_lock);
281 }
282
283 static int
ath10k_htc_process_lookahead(struct ath10k_htc * htc,const struct ath10k_htc_lookahead_report * report,int len,enum ath10k_htc_ep_id eid,void * next_lookaheads,int * next_lookaheads_len)284 ath10k_htc_process_lookahead(struct ath10k_htc *htc,
285 const struct ath10k_htc_lookahead_report *report,
286 int len,
287 enum ath10k_htc_ep_id eid,
288 void *next_lookaheads,
289 int *next_lookaheads_len)
290 {
291 struct ath10k *ar = htc->ar;
292
293 /* Invalid lookahead flags are actually transmitted by
294 * the target in the HTC control message.
295 * Since this will happen at every boot we silently ignore
296 * the lookahead in this case
297 */
298 if (report->pre_valid != ((~report->post_valid) & 0xFF))
299 return 0;
300
301 if (next_lookaheads && next_lookaheads_len) {
302 ath10k_dbg(ar, ATH10K_DBG_HTC,
303 "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
304 report->pre_valid, report->post_valid);
305
306 /* look ahead bytes are valid, copy them over */
307 memcpy((u8 *)next_lookaheads, report->lookahead, 4);
308
309 *next_lookaheads_len = 1;
310 }
311
312 return 0;
313 }
314
315 static int
ath10k_htc_process_lookahead_bundle(struct ath10k_htc * htc,const struct ath10k_htc_lookahead_bundle * report,int len,enum ath10k_htc_ep_id eid,void * next_lookaheads,int * next_lookaheads_len)316 ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
317 const struct ath10k_htc_lookahead_bundle *report,
318 int len,
319 enum ath10k_htc_ep_id eid,
320 void *next_lookaheads,
321 int *next_lookaheads_len)
322 {
323 struct ath10k *ar = htc->ar;
324 int bundle_cnt = len / sizeof(*report);
325
326 if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
327 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
328 bundle_cnt);
329 return -EINVAL;
330 }
331
332 if (next_lookaheads && next_lookaheads_len) {
333 int i;
334
335 for (i = 0; i < bundle_cnt; i++) {
336 memcpy(((u8 *)next_lookaheads) + 4 * i,
337 report->lookahead, 4);
338 report++;
339 }
340
341 *next_lookaheads_len = bundle_cnt;
342 }
343
344 return 0;
345 }
346
ath10k_htc_process_trailer(struct ath10k_htc * htc,u8 * buffer,int length,enum ath10k_htc_ep_id src_eid,void * next_lookaheads,int * next_lookaheads_len)347 int ath10k_htc_process_trailer(struct ath10k_htc *htc,
348 u8 *buffer,
349 int length,
350 enum ath10k_htc_ep_id src_eid,
351 void *next_lookaheads,
352 int *next_lookaheads_len)
353 {
354 struct ath10k_htc_lookahead_bundle *bundle;
355 struct ath10k *ar = htc->ar;
356 int status = 0;
357 struct ath10k_htc_record *record;
358 u8 *orig_buffer;
359 int orig_length;
360 size_t len;
361
362 orig_buffer = buffer;
363 orig_length = length;
364
365 while (length > 0) {
366 record = (struct ath10k_htc_record *)buffer;
367
368 if (length < sizeof(record->hdr)) {
369 status = -EINVAL;
370 break;
371 }
372
373 if (record->hdr.len > length) {
374 /* no room left in buffer for record */
375 ath10k_warn(ar, "Invalid record length: %d\n",
376 record->hdr.len);
377 status = -EINVAL;
378 break;
379 }
380
381 switch (record->hdr.id) {
382 case ATH10K_HTC_RECORD_CREDITS:
383 len = sizeof(struct ath10k_htc_credit_report);
384 if (record->hdr.len < len) {
385 ath10k_warn(ar, "Credit report too long\n");
386 status = -EINVAL;
387 break;
388 }
389 ath10k_htc_process_credit_report(htc,
390 record->credit_report,
391 record->hdr.len,
392 src_eid);
393 break;
394 case ATH10K_HTC_RECORD_LOOKAHEAD:
395 len = sizeof(struct ath10k_htc_lookahead_report);
396 if (record->hdr.len < len) {
397 ath10k_warn(ar, "Lookahead report too long\n");
398 status = -EINVAL;
399 break;
400 }
401 status = ath10k_htc_process_lookahead(htc,
402 record->lookahead_report,
403 record->hdr.len,
404 src_eid,
405 next_lookaheads,
406 next_lookaheads_len);
407 break;
408 case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
409 bundle = record->lookahead_bundle;
410 status = ath10k_htc_process_lookahead_bundle(htc,
411 bundle,
412 record->hdr.len,
413 src_eid,
414 next_lookaheads,
415 next_lookaheads_len);
416 break;
417 default:
418 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
419 record->hdr.id, record->hdr.len);
420 break;
421 }
422
423 if (status)
424 break;
425
426 /* multiple records may be present in a trailer */
427 buffer += sizeof(record->hdr) + record->hdr.len;
428 length -= sizeof(record->hdr) + record->hdr.len;
429 }
430
431 if (status)
432 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
433 orig_buffer, orig_length);
434
435 return status;
436 }
437 EXPORT_SYMBOL(ath10k_htc_process_trailer);
438
ath10k_htc_rx_completion_handler(struct ath10k * ar,struct sk_buff * skb)439 void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
440 {
441 int status = 0;
442 struct ath10k_htc *htc = &ar->htc;
443 struct ath10k_htc_hdr *hdr;
444 struct ath10k_htc_ep *ep;
445 u16 payload_len;
446 u32 trailer_len = 0;
447 size_t min_len;
448 u8 eid;
449 bool trailer_present;
450
451 hdr = (struct ath10k_htc_hdr *)skb->data;
452 skb_pull(skb, sizeof(*hdr));
453
454 eid = hdr->eid;
455
456 if (eid >= ATH10K_HTC_EP_COUNT) {
457 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
458 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
459 hdr, sizeof(*hdr));
460 goto out;
461 }
462
463 ep = &htc->endpoint[eid];
464 if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
465 ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
466 goto out;
467 }
468
469 payload_len = __le16_to_cpu(hdr->len);
470
471 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
472 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
473 payload_len + sizeof(*hdr));
474 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
475 hdr, sizeof(*hdr));
476 goto out;
477 }
478
479 if (skb->len < payload_len) {
480 ath10k_dbg(ar, ATH10K_DBG_HTC,
481 "HTC Rx: insufficient length, got %d, expected %d\n",
482 skb->len, payload_len);
483 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
484 "", hdr, sizeof(*hdr));
485 goto out;
486 }
487
488 /* get flags to check for trailer */
489 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
490 if (trailer_present) {
491 u8 *trailer;
492
493 trailer_len = hdr->trailer_len;
494 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
495
496 if ((trailer_len < min_len) ||
497 (trailer_len > payload_len)) {
498 ath10k_warn(ar, "Invalid trailer length: %d\n",
499 trailer_len);
500 goto out;
501 }
502
503 trailer = (u8 *)hdr;
504 trailer += sizeof(*hdr);
505 trailer += payload_len;
506 trailer -= trailer_len;
507 status = ath10k_htc_process_trailer(htc, trailer,
508 trailer_len, hdr->eid,
509 NULL, NULL);
510 if (status)
511 goto out;
512
513 skb_trim(skb, skb->len - trailer_len);
514 }
515
516 if (((int)payload_len - (int)trailer_len) <= 0)
517 /* zero length packet with trailer data, just drop these */
518 goto out;
519
520 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
521 eid, skb);
522 ep->ep_ops.ep_rx_complete(ar, skb);
523
524 /* skb is now owned by the rx completion handler */
525 skb = NULL;
526 out:
527 kfree_skb(skb);
528 }
529 EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
530
ath10k_htc_control_rx_complete(struct ath10k * ar,struct sk_buff * skb)531 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
532 struct sk_buff *skb)
533 {
534 struct ath10k_htc *htc = &ar->htc;
535 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
536
537 switch (__le16_to_cpu(msg->hdr.message_id)) {
538 case ATH10K_HTC_MSG_READY_ID:
539 case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
540 /* handle HTC control message */
541 if (completion_done(&htc->ctl_resp)) {
542 /* this is a fatal error, target should not be
543 * sending unsolicited messages on the ep 0
544 */
545 ath10k_warn(ar, "HTC rx ctrl still processing\n");
546 complete(&htc->ctl_resp);
547 goto out;
548 }
549
550 htc->control_resp_len =
551 min_t(int, skb->len,
552 ATH10K_HTC_MAX_CTRL_MSG_LEN);
553
554 memcpy(htc->control_resp_buffer, skb->data,
555 htc->control_resp_len);
556
557 complete(&htc->ctl_resp);
558 break;
559 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
560 htc->htc_ops.target_send_suspend_complete(ar);
561 break;
562 default:
563 ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
564 break;
565 }
566
567 out:
568 kfree_skb(skb);
569 }
570
571 /***************/
572 /* Init/Deinit */
573 /***************/
574
htc_service_name(enum ath10k_htc_svc_id id)575 static const char *htc_service_name(enum ath10k_htc_svc_id id)
576 {
577 switch (id) {
578 case ATH10K_HTC_SVC_ID_RESERVED:
579 return "Reserved";
580 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
581 return "Control";
582 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
583 return "WMI";
584 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
585 return "DATA BE";
586 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
587 return "DATA BK";
588 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
589 return "DATA VI";
590 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
591 return "DATA VO";
592 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
593 return "NMI Control";
594 case ATH10K_HTC_SVC_ID_NMI_DATA:
595 return "NMI Data";
596 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
597 return "HTT Data";
598 case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
599 return "HTT Data";
600 case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
601 return "HTT Data";
602 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
603 return "RAW";
604 case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
605 return "PKTLOG";
606 }
607
608 return "Unknown";
609 }
610
ath10k_htc_reset_endpoint_states(struct ath10k_htc * htc)611 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
612 {
613 struct ath10k_htc_ep *ep;
614 int i;
615
616 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
617 ep = &htc->endpoint[i];
618 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
619 ep->max_ep_message_len = 0;
620 ep->max_tx_queue_depth = 0;
621 ep->eid = i;
622 ep->htc = htc;
623 ep->tx_credit_flow_enabled = true;
624 }
625 }
626
ath10k_htc_get_credit_allocation(struct ath10k_htc * htc,u16 service_id)627 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
628 u16 service_id)
629 {
630 u8 allocation = 0;
631
632 /* The WMI control service is the only service with flow control.
633 * Let it have all transmit credits.
634 */
635 if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
636 allocation = htc->total_transmit_credits;
637
638 return allocation;
639 }
640
ath10k_htc_send_bundle(struct ath10k_htc_ep * ep,struct sk_buff * bundle_skb,struct sk_buff_head * tx_save_head)641 static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
642 struct sk_buff *bundle_skb,
643 struct sk_buff_head *tx_save_head)
644 {
645 struct ath10k_hif_sg_item sg_item;
646 struct ath10k_htc *htc = ep->htc;
647 struct ath10k *ar = htc->ar;
648 struct sk_buff *skb;
649 int ret, cn = 0;
650 unsigned int skb_len;
651
652 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
653 skb_len = bundle_skb->len;
654 ret = ath10k_htc_consume_credit(ep, skb_len, true);
655
656 if (!ret) {
657 sg_item.transfer_id = ep->eid;
658 sg_item.transfer_context = bundle_skb;
659 sg_item.vaddr = bundle_skb->data;
660 sg_item.len = bundle_skb->len;
661
662 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
663 if (ret)
664 ath10k_htc_release_credit(ep, skb_len);
665 }
666
667 if (ret)
668 dev_kfree_skb_any(bundle_skb);
669
670 for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
671 if (ret) {
672 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
673 skb_queue_head(&ep->tx_req_head, skb);
674 } else {
675 skb_queue_tail(&ep->tx_complete_head, skb);
676 }
677 }
678
679 if (!ret)
680 queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
681
682 ath10k_dbg(ar, ATH10K_DBG_HTC,
683 "bundle tx status %d eid %d req count %d count %d len %d\n",
684 ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
685 return ret;
686 }
687
ath10k_htc_send_one_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)688 static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
689 {
690 struct ath10k_htc *htc = ep->htc;
691 struct ath10k *ar = htc->ar;
692 int ret;
693
694 ret = ath10k_htc_send(htc, ep->eid, skb);
695
696 if (ret)
697 skb_queue_head(&ep->tx_req_head, skb);
698
699 ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
700 ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
701 }
702
ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep * ep)703 static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
704 {
705 struct ath10k_htc *htc = ep->htc;
706 struct sk_buff *bundle_skb, *skb;
707 struct sk_buff_head tx_save_head;
708 struct ath10k_htc_hdr *hdr;
709 u8 *bundle_buf;
710 int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
711
712 if (htc->ar->state == ATH10K_STATE_WEDGED)
713 return -ECOMM;
714
715 if (ep->tx_credit_flow_enabled &&
716 ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
717 return 0;
718
719 bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
720 bundle_skb = dev_alloc_skb(bundles_left);
721
722 if (!bundle_skb)
723 return -ENOMEM;
724
725 bundle_buf = bundle_skb->data;
726 skb_queue_head_init(&tx_save_head);
727
728 while (true) {
729 skb = skb_dequeue(&ep->tx_req_head);
730 if (!skb)
731 break;
732
733 credit_pad = 0;
734 trans_len = skb->len + sizeof(*hdr);
735 credit_remainder = trans_len % ep->tx_credit_size;
736
737 if (credit_remainder != 0) {
738 credit_pad = ep->tx_credit_size - credit_remainder;
739 trans_len += credit_pad;
740 }
741
742 ret = ath10k_htc_consume_credit(ep,
743 bundle_buf + trans_len - bundle_skb->data,
744 false);
745 if (ret) {
746 skb_queue_head(&ep->tx_req_head, skb);
747 break;
748 }
749
750 if (bundles_left < trans_len) {
751 bundle_skb->len = bundle_buf - bundle_skb->data;
752 ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
753
754 if (ret) {
755 skb_queue_head(&ep->tx_req_head, skb);
756 return ret;
757 }
758
759 if (skb_queue_len(&ep->tx_req_head) == 0) {
760 ath10k_htc_send_one_skb(ep, skb);
761 return ret;
762 }
763
764 if (ep->tx_credit_flow_enabled &&
765 ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
766 skb_queue_head(&ep->tx_req_head, skb);
767 return 0;
768 }
769
770 bundles_left =
771 ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
772 bundle_skb = dev_alloc_skb(bundles_left);
773
774 if (!bundle_skb) {
775 skb_queue_head(&ep->tx_req_head, skb);
776 return -ENOMEM;
777 }
778 bundle_buf = bundle_skb->data;
779 skb_queue_head_init(&tx_save_head);
780 }
781
782 skb_push(skb, sizeof(struct ath10k_htc_hdr));
783 ath10k_htc_prepare_tx_skb(ep, skb);
784
785 memcpy(bundle_buf, skb->data, skb->len);
786 hdr = (struct ath10k_htc_hdr *)bundle_buf;
787 hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
788 hdr->pad_len = __cpu_to_le16(credit_pad);
789 bundle_buf += trans_len;
790 bundles_left -= trans_len;
791 skb_queue_tail(&tx_save_head, skb);
792 }
793
794 if (bundle_buf != bundle_skb->data) {
795 bundle_skb->len = bundle_buf - bundle_skb->data;
796 ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
797 } else {
798 dev_kfree_skb_any(bundle_skb);
799 }
800
801 return ret;
802 }
803
ath10k_htc_bundle_tx_work(struct work_struct * work)804 static void ath10k_htc_bundle_tx_work(struct work_struct *work)
805 {
806 struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
807 struct ath10k_htc_ep *ep;
808 struct sk_buff *skb;
809 int i;
810
811 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
812 ep = &ar->htc.endpoint[i];
813
814 if (!ep->bundle_tx)
815 continue;
816
817 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
818 ep->eid, skb_queue_len(&ep->tx_req_head));
819
820 if (skb_queue_len(&ep->tx_req_head) >=
821 ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
822 ath10k_htc_send_bundle_skbs(ep);
823 } else {
824 skb = skb_dequeue(&ep->tx_req_head);
825
826 if (!skb)
827 continue;
828 ath10k_htc_send_one_skb(ep, skb);
829 }
830 }
831 }
832
ath10k_htc_tx_complete_work(struct work_struct * work)833 static void ath10k_htc_tx_complete_work(struct work_struct *work)
834 {
835 struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
836 struct ath10k_htc_ep *ep;
837 enum ath10k_htc_ep_id eid;
838 struct sk_buff *skb;
839 int i;
840
841 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
842 ep = &ar->htc.endpoint[i];
843 eid = ep->eid;
844 if (ep->bundle_tx && eid == ar->htt.eid) {
845 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
846 ep->eid, skb_queue_len(&ep->tx_complete_head));
847
848 while (true) {
849 skb = skb_dequeue(&ep->tx_complete_head);
850 if (!skb)
851 break;
852 ath10k_htc_notify_tx_completion(ep, skb);
853 }
854 }
855 }
856 }
857
ath10k_htc_send_hl(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)858 int ath10k_htc_send_hl(struct ath10k_htc *htc,
859 enum ath10k_htc_ep_id eid,
860 struct sk_buff *skb)
861 {
862 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
863 struct ath10k *ar = htc->ar;
864
865 if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
866 ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
867 return -ENOMEM;
868 }
869
870 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
871 eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
872
873 if (ep->bundle_tx) {
874 skb_queue_tail(&ep->tx_req_head, skb);
875 queue_work(ar->workqueue, &ar->bundle_tx_work);
876 return 0;
877 } else {
878 return ath10k_htc_send(htc, eid, skb);
879 }
880 }
881
ath10k_htc_setup_tx_req(struct ath10k_htc_ep * ep)882 void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
883 {
884 if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
885 !ep->bundle_tx) {
886 ep->bundle_tx = true;
887 skb_queue_head_init(&ep->tx_req_head);
888 skb_queue_head_init(&ep->tx_complete_head);
889 }
890 }
891
ath10k_htc_stop_hl(struct ath10k * ar)892 void ath10k_htc_stop_hl(struct ath10k *ar)
893 {
894 struct ath10k_htc_ep *ep;
895 int i;
896
897 cancel_work_sync(&ar->bundle_tx_work);
898 cancel_work_sync(&ar->tx_complete_work);
899
900 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
901 ep = &ar->htc.endpoint[i];
902
903 if (!ep->bundle_tx)
904 continue;
905
906 ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
907 ep->eid, skb_queue_len(&ep->tx_req_head));
908
909 skb_queue_purge(&ep->tx_req_head);
910 }
911 }
912
ath10k_htc_wait_target(struct ath10k_htc * htc)913 int ath10k_htc_wait_target(struct ath10k_htc *htc)
914 {
915 struct ath10k *ar = htc->ar;
916 int i, status = 0;
917 unsigned long time_left;
918 struct ath10k_htc_msg *msg;
919 u16 message_id;
920
921 time_left = wait_for_completion_timeout(&htc->ctl_resp,
922 ATH10K_HTC_WAIT_TIMEOUT_HZ);
923 if (!time_left) {
924 /* Workaround: In some cases the PCI HIF doesn't
925 * receive interrupt for the control response message
926 * even if the buffer was completed. It is suspected
927 * iomap writes unmasking PCI CE irqs aren't propagated
928 * properly in KVM PCI-passthrough sometimes.
929 */
930 ath10k_warn(ar, "failed to receive control response completion, polling..\n");
931
932 for (i = 0; i < CE_COUNT; i++)
933 ath10k_hif_send_complete_check(htc->ar, i, 1);
934
935 time_left =
936 wait_for_completion_timeout(&htc->ctl_resp,
937 ATH10K_HTC_WAIT_TIMEOUT_HZ);
938
939 if (!time_left)
940 status = -ETIMEDOUT;
941 }
942
943 if (status < 0) {
944 ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
945 return status;
946 }
947
948 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
949 ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
950 htc->control_resp_len);
951 return -ECOMM;
952 }
953
954 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
955 message_id = __le16_to_cpu(msg->hdr.message_id);
956
957 if (message_id != ATH10K_HTC_MSG_READY_ID) {
958 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
959 return -ECOMM;
960 }
961
962 if (ar->hw_params.use_fw_tx_credits)
963 htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
964 else
965 htc->total_transmit_credits = 1;
966
967 htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
968
969 ath10k_dbg(ar, ATH10K_DBG_HTC,
970 "Target ready! transmit resources: %d size:%d actual credits:%d\n",
971 htc->total_transmit_credits,
972 htc->target_credit_size,
973 msg->ready.credit_count);
974
975 if ((htc->total_transmit_credits == 0) ||
976 (htc->target_credit_size == 0)) {
977 ath10k_err(ar, "Invalid credit size received\n");
978 return -ECOMM;
979 }
980
981 /* The only way to determine if the ready message is an extended
982 * message is from the size.
983 */
984 if (htc->control_resp_len >=
985 sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
986 htc->alt_data_credit_size =
987 __le16_to_cpu(msg->ready_ext.reserved) &
988 ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
989 htc->max_msgs_per_htc_bundle =
990 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
991 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
992 ath10k_dbg(ar, ATH10K_DBG_HTC,
993 "Extended ready message RX bundle size %d alt size %d\n",
994 htc->max_msgs_per_htc_bundle,
995 htc->alt_data_credit_size);
996 }
997
998 INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
999 INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
1000
1001 return 0;
1002 }
1003
ath10k_htc_change_tx_credit_flow(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,bool enable)1004 void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
1005 enum ath10k_htc_ep_id eid,
1006 bool enable)
1007 {
1008 struct ath10k *ar = htc->ar;
1009 struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
1010
1011 ep->tx_credit_flow_enabled = enable;
1012 }
1013
ath10k_htc_connect_service(struct ath10k_htc * htc,struct ath10k_htc_svc_conn_req * conn_req,struct ath10k_htc_svc_conn_resp * conn_resp)1014 int ath10k_htc_connect_service(struct ath10k_htc *htc,
1015 struct ath10k_htc_svc_conn_req *conn_req,
1016 struct ath10k_htc_svc_conn_resp *conn_resp)
1017 {
1018 struct ath10k *ar = htc->ar;
1019 struct ath10k_htc_msg *msg;
1020 struct ath10k_htc_conn_svc *req_msg;
1021 struct ath10k_htc_conn_svc_response resp_msg_dummy;
1022 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
1023 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
1024 struct ath10k_htc_ep *ep;
1025 struct sk_buff *skb;
1026 unsigned int max_msg_size = 0;
1027 int length, status;
1028 unsigned long time_left;
1029 bool disable_credit_flow_ctrl = false;
1030 u16 message_id, service_id, flags = 0;
1031 u8 tx_alloc = 0;
1032
1033 /* special case for HTC pseudo control service */
1034 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
1035 disable_credit_flow_ctrl = true;
1036 assigned_eid = ATH10K_HTC_EP_0;
1037 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
1038 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
1039 goto setup;
1040 }
1041
1042 tx_alloc = ath10k_htc_get_credit_allocation(htc,
1043 conn_req->service_id);
1044 if (!tx_alloc)
1045 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1046 "boot htc service %s does not allocate target credits\n",
1047 htc_service_name(conn_req->service_id));
1048
1049 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1050 if (!skb) {
1051 ath10k_err(ar, "Failed to allocate HTC packet\n");
1052 return -ENOMEM;
1053 }
1054
1055 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
1056 skb_put(skb, length);
1057 memset(skb->data, 0, length);
1058
1059 msg = (struct ath10k_htc_msg *)skb->data;
1060 msg->hdr.message_id =
1061 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
1062
1063 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
1064
1065 /* Only enable credit flow control for WMI ctrl service */
1066 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
1067 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1068 disable_credit_flow_ctrl = true;
1069 }
1070
1071 req_msg = &msg->connect_service;
1072 req_msg->flags = __cpu_to_le16(flags);
1073 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
1074
1075 reinit_completion(&htc->ctl_resp);
1076
1077 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1078 if (status) {
1079 kfree_skb(skb);
1080 return status;
1081 }
1082
1083 /* wait for response */
1084 time_left = wait_for_completion_timeout(&htc->ctl_resp,
1085 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
1086 if (!time_left) {
1087 ath10k_err(ar, "Service connect timeout\n");
1088 return -ETIMEDOUT;
1089 }
1090
1091 /* we controlled the buffer creation, it's aligned */
1092 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
1093 resp_msg = &msg->connect_service_response;
1094 message_id = __le16_to_cpu(msg->hdr.message_id);
1095 service_id = __le16_to_cpu(resp_msg->service_id);
1096
1097 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
1098 (htc->control_resp_len < sizeof(msg->hdr) +
1099 sizeof(msg->connect_service_response))) {
1100 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
1101 return -EPROTO;
1102 }
1103
1104 ath10k_dbg(ar, ATH10K_DBG_HTC,
1105 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
1106 htc_service_name(service_id),
1107 resp_msg->status, resp_msg->eid);
1108
1109 conn_resp->connect_resp_code = resp_msg->status;
1110
1111 /* check response status */
1112 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
1113 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
1114 htc_service_name(service_id),
1115 resp_msg->status);
1116 return -EPROTO;
1117 }
1118
1119 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
1120 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
1121
1122 setup:
1123
1124 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
1125 return -EPROTO;
1126
1127 if (max_msg_size == 0)
1128 return -EPROTO;
1129
1130 ep = &htc->endpoint[assigned_eid];
1131 ep->eid = assigned_eid;
1132
1133 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
1134 return -EPROTO;
1135
1136 /* return assigned endpoint to caller */
1137 conn_resp->eid = assigned_eid;
1138 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
1139
1140 /* setup the endpoint */
1141 ep->service_id = conn_req->service_id;
1142 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
1143 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
1144 ep->tx_credits = tx_alloc;
1145 ep->tx_credit_size = htc->target_credit_size;
1146
1147 if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
1148 htc->alt_data_credit_size != 0)
1149 ep->tx_credit_size = htc->alt_data_credit_size;
1150
1151 /* copy all the callbacks */
1152 ep->ep_ops = conn_req->ep_ops;
1153
1154 status = ath10k_hif_map_service_to_pipe(htc->ar,
1155 ep->service_id,
1156 &ep->ul_pipe_id,
1157 &ep->dl_pipe_id);
1158 if (status) {
1159 ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
1160 ep->service_id);
1161 return status;
1162 }
1163
1164 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1165 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
1166 htc_service_name(ep->service_id), ep->ul_pipe_id,
1167 ep->dl_pipe_id, ep->eid);
1168
1169 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
1170 ep->tx_credit_flow_enabled = false;
1171 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1172 "boot htc service '%s' eid %d TX flow control disabled\n",
1173 htc_service_name(ep->service_id), assigned_eid);
1174 }
1175
1176 return status;
1177 }
1178
ath10k_htc_alloc_skb(struct ath10k * ar,int size)1179 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
1180 {
1181 struct sk_buff *skb;
1182
1183 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
1184 if (!skb)
1185 return NULL;
1186
1187 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
1188
1189 /* FW/HTC requires 4-byte aligned streams */
1190 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1191 ath10k_warn(ar, "Unaligned HTC tx skb\n");
1192
1193 return skb;
1194 }
1195
ath10k_htc_pktlog_process_rx(struct ath10k * ar,struct sk_buff * skb)1196 static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
1197 {
1198 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
1199 dev_kfree_skb_any(skb);
1200 }
1201
ath10k_htc_pktlog_connect(struct ath10k * ar)1202 static int ath10k_htc_pktlog_connect(struct ath10k *ar)
1203 {
1204 struct ath10k_htc_svc_conn_resp conn_resp;
1205 struct ath10k_htc_svc_conn_req conn_req;
1206 int status;
1207
1208 memset(&conn_req, 0, sizeof(conn_req));
1209 memset(&conn_resp, 0, sizeof(conn_resp));
1210
1211 conn_req.ep_ops.ep_tx_complete = NULL;
1212 conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
1213 conn_req.ep_ops.ep_tx_credits = NULL;
1214
1215 /* connect to control service */
1216 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
1217 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1218 if (status) {
1219 ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
1220 status);
1221 return status;
1222 }
1223
1224 return 0;
1225 }
1226
ath10k_htc_pktlog_svc_supported(struct ath10k * ar)1227 static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
1228 {
1229 u8 ul_pipe_id;
1230 u8 dl_pipe_id;
1231 int status;
1232
1233 status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
1234 &ul_pipe_id,
1235 &dl_pipe_id);
1236 if (status) {
1237 ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
1238 ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
1239
1240 return false;
1241 }
1242
1243 return true;
1244 }
1245
ath10k_htc_start(struct ath10k_htc * htc)1246 int ath10k_htc_start(struct ath10k_htc *htc)
1247 {
1248 struct ath10k *ar = htc->ar;
1249 struct sk_buff *skb;
1250 int status = 0;
1251 struct ath10k_htc_msg *msg;
1252
1253 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1254 if (!skb)
1255 return -ENOMEM;
1256
1257 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
1258 memset(skb->data, 0, skb->len);
1259
1260 msg = (struct ath10k_htc_msg *)skb->data;
1261 msg->hdr.message_id =
1262 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
1263
1264 if (ar->hif.bus == ATH10K_BUS_SDIO) {
1265 /* Extra setup params used by SDIO */
1266 msg->setup_complete_ext.flags =
1267 __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
1268 msg->setup_complete_ext.max_msgs_per_bundled_recv =
1269 htc->max_msgs_per_htc_bundle;
1270 }
1271 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
1272
1273 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1274 if (status) {
1275 kfree_skb(skb);
1276 return status;
1277 }
1278
1279 if (ath10k_htc_pktlog_svc_supported(ar)) {
1280 status = ath10k_htc_pktlog_connect(ar);
1281 if (status) {
1282 ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
1283 return status;
1284 }
1285 }
1286
1287 return 0;
1288 }
1289
1290 /* registered target arrival callback from the HIF layer */
ath10k_htc_init(struct ath10k * ar)1291 int ath10k_htc_init(struct ath10k *ar)
1292 {
1293 int status;
1294 struct ath10k_htc *htc = &ar->htc;
1295 struct ath10k_htc_svc_conn_req conn_req;
1296 struct ath10k_htc_svc_conn_resp conn_resp;
1297
1298 spin_lock_init(&htc->tx_lock);
1299
1300 ath10k_htc_reset_endpoint_states(htc);
1301
1302 htc->ar = ar;
1303
1304 /* setup our pseudo HTC control endpoint connection */
1305 memset(&conn_req, 0, sizeof(conn_req));
1306 memset(&conn_resp, 0, sizeof(conn_resp));
1307 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
1308 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
1309 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
1310 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
1311
1312 /* connect fake service */
1313 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
1314 if (status) {
1315 ath10k_err(ar, "could not connect to htc service (%d)\n",
1316 status);
1317 return status;
1318 }
1319
1320 init_completion(&htc->ctl_resp);
1321
1322 return 0;
1323 }
1324