1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Bluetooth Software UART Qualcomm protocol
4 *
5 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
6 * protocol extension to H4.
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
10 *
11 * Acknowledgements:
12 * This file is based on hci_ll.c, which was...
13 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
14 * which was in turn based on hci_h4.c, which was written
15 * by Maxim Krasnyansky and Marcel Holtmann.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/debugfs.h>
22 #include <linux/delay.h>
23 #include <linux/devcoredump.h>
24 #include <linux/device.h>
25 #include <linux/gpio/consumer.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/acpi.h>
30 #include <linux/platform_device.h>
31 #include <linux/pwrseq/consumer.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/serdev.h>
34 #include <linux/string_choices.h>
35 #include <linux/mutex.h>
36 #include <linux/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40
41 #include "hci_uart.h"
42 #include "btqca.h"
43
44 /* HCI_IBS protocol messages */
45 #define HCI_IBS_SLEEP_IND 0xFE
46 #define HCI_IBS_WAKE_IND 0xFD
47 #define HCI_IBS_WAKE_ACK 0xFC
48 #define HCI_MAX_IBS_SIZE 10
49
50 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
51 #define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
52 #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
53 #define CMD_TRANS_TIMEOUT_MS 100
54 #define MEMDUMP_TIMEOUT_MS 8000
55 #define IBS_DISABLE_SSR_TIMEOUT_MS \
56 (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
57 #define FW_DOWNLOAD_TIMEOUT_MS 3000
58
59 /* susclk rate */
60 #define SUSCLK_RATE_32KHZ 32768
61
62 /* Controller debug log header */
63 #define QCA_DEBUG_HANDLE 0x2EDC
64
65 /* max retry count when init fails */
66 #define MAX_INIT_RETRIES 3
67
68 /* Controller dump header */
69 #define QCA_SSR_DUMP_HANDLE 0x0108
70 #define QCA_DUMP_PACKET_SIZE 255
71 #define QCA_LAST_SEQUENCE_NUM 0xFFFF
72 #define QCA_CRASHBYTE_PACKET_LEN 1096
73 #define QCA_MEMDUMP_BYTE 0xFB
74
75 enum qca_flags {
76 QCA_IBS_DISABLED,
77 QCA_DROP_VENDOR_EVENT,
78 QCA_SUSPENDING,
79 QCA_MEMDUMP_COLLECTION,
80 QCA_HW_ERROR_EVENT,
81 QCA_SSR_TRIGGERED,
82 QCA_BT_OFF,
83 QCA_ROM_FW,
84 QCA_DEBUGFS_CREATED,
85 };
86
87 enum qca_capabilities {
88 QCA_CAP_WIDEBAND_SPEECH = BIT(0),
89 QCA_CAP_VALID_LE_STATES = BIT(1),
90 QCA_CAP_HFP_HW_OFFLOAD = BIT(2),
91 };
92
93 /* HCI_IBS transmit side sleep protocol states */
94 enum tx_ibs_states {
95 HCI_IBS_TX_ASLEEP,
96 HCI_IBS_TX_WAKING,
97 HCI_IBS_TX_AWAKE,
98 };
99
100 /* HCI_IBS receive side sleep protocol states */
101 enum rx_states {
102 HCI_IBS_RX_ASLEEP,
103 HCI_IBS_RX_AWAKE,
104 };
105
106 /* HCI_IBS transmit and receive side clock state vote */
107 enum hci_ibs_clock_state_vote {
108 HCI_IBS_VOTE_STATS_UPDATE,
109 HCI_IBS_TX_VOTE_CLOCK_ON,
110 HCI_IBS_TX_VOTE_CLOCK_OFF,
111 HCI_IBS_RX_VOTE_CLOCK_ON,
112 HCI_IBS_RX_VOTE_CLOCK_OFF,
113 };
114
115 /* Controller memory dump states */
116 enum qca_memdump_states {
117 QCA_MEMDUMP_IDLE,
118 QCA_MEMDUMP_COLLECTING,
119 QCA_MEMDUMP_COLLECTED,
120 QCA_MEMDUMP_TIMEOUT,
121 };
122
123 struct qca_memdump_info {
124 u32 current_seq_no;
125 u32 received_dump;
126 u32 ram_dump_size;
127 };
128
129 struct qca_memdump_event_hdr {
130 __u8 evt;
131 __u8 plen;
132 __u16 opcode;
133 __le16 seq_no;
134 __u8 reserved;
135 } __packed;
136
137
138 struct qca_dump_size {
139 __le32 dump_size;
140 } __packed;
141
142 struct qca_data {
143 struct hci_uart *hu;
144 struct sk_buff *rx_skb;
145 struct sk_buff_head txq;
146 struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
147 struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
148 spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
149 u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
150 u8 rx_ibs_state; /* HCI_IBS receive side power state */
151 bool tx_vote; /* Clock must be on for TX */
152 bool rx_vote; /* Clock must be on for RX */
153 struct timer_list tx_idle_timer;
154 u32 tx_idle_delay;
155 struct timer_list wake_retrans_timer;
156 u32 wake_retrans;
157 struct workqueue_struct *workqueue;
158 struct work_struct ws_awake_rx;
159 struct work_struct ws_awake_device;
160 struct work_struct ws_rx_vote_off;
161 struct work_struct ws_tx_vote_off;
162 struct work_struct ctrl_memdump_evt;
163 struct delayed_work ctrl_memdump_timeout;
164 struct qca_memdump_info *qca_memdump;
165 unsigned long flags;
166 struct completion drop_ev_comp;
167 wait_queue_head_t suspend_wait_q;
168 enum qca_memdump_states memdump_state;
169 struct mutex hci_memdump_lock;
170
171 u16 fw_version;
172 u16 controller_id;
173 /* For debugging purpose */
174 u64 ibs_sent_wacks;
175 u64 ibs_sent_slps;
176 u64 ibs_sent_wakes;
177 u64 ibs_recv_wacks;
178 u64 ibs_recv_slps;
179 u64 ibs_recv_wakes;
180 u64 vote_last_jif;
181 u32 vote_on_ms;
182 u32 vote_off_ms;
183 u64 tx_votes_on;
184 u64 rx_votes_on;
185 u64 tx_votes_off;
186 u64 rx_votes_off;
187 u64 votes_on;
188 u64 votes_off;
189 };
190
191 enum qca_speed_type {
192 QCA_INIT_SPEED = 1,
193 QCA_OPER_SPEED
194 };
195
196 /*
197 * Voltage regulator information required for configuring the
198 * QCA Bluetooth chipset
199 */
200 struct qca_vreg {
201 const char *name;
202 unsigned int load_uA;
203 };
204
205 struct qca_device_data {
206 enum qca_btsoc_type soc_type;
207 struct qca_vreg *vregs;
208 size_t num_vregs;
209 uint32_t capabilities;
210 };
211
212 /*
213 * Platform data for the QCA Bluetooth power driver.
214 */
215 struct qca_power {
216 struct device *dev;
217 struct regulator_bulk_data *vreg_bulk;
218 int num_vregs;
219 bool vregs_on;
220 struct pwrseq_desc *pwrseq;
221 };
222
223 struct qca_serdev {
224 struct hci_uart serdev_hu;
225 struct gpio_desc *bt_en;
226 struct gpio_desc *sw_ctrl;
227 struct clk *susclk;
228 enum qca_btsoc_type btsoc_type;
229 struct qca_power *bt_power;
230 u32 init_speed;
231 u32 oper_speed;
232 bool bdaddr_property_broken;
233 bool support_hfp_hw_offload;
234 const char *firmware_name[2];
235 };
236
237 static int qca_regulator_enable(struct qca_serdev *qcadev);
238 static void qca_regulator_disable(struct qca_serdev *qcadev);
239 static void qca_power_off(struct hci_uart *hu);
240 static void qca_controller_memdump(struct work_struct *work);
241 static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb);
242
qca_soc_type(struct hci_uart * hu)243 static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
244 {
245 enum qca_btsoc_type soc_type;
246
247 if (hu->serdev) {
248 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
249
250 soc_type = qsd->btsoc_type;
251 } else {
252 soc_type = QCA_ROME;
253 }
254
255 return soc_type;
256 }
257
qca_get_firmware_name(struct hci_uart * hu)258 static const char *qca_get_firmware_name(struct hci_uart *hu)
259 {
260 if (hu->serdev) {
261 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
262
263 return qsd->firmware_name[0];
264 } else {
265 return NULL;
266 }
267 }
268
qca_get_rampatch_name(struct hci_uart * hu)269 static const char *qca_get_rampatch_name(struct hci_uart *hu)
270 {
271 if (hu->serdev) {
272 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
273
274 return qsd->firmware_name[1];
275 } else {
276 return NULL;
277 }
278 }
279
__serial_clock_on(struct tty_struct * tty)280 static void __serial_clock_on(struct tty_struct *tty)
281 {
282 /* TODO: Some chipset requires to enable UART clock on client
283 * side to save power consumption or manual work is required.
284 * Please put your code to control UART clock here if needed
285 */
286 }
287
__serial_clock_off(struct tty_struct * tty)288 static void __serial_clock_off(struct tty_struct *tty)
289 {
290 /* TODO: Some chipset requires to disable UART clock on client
291 * side to save power consumption or manual work is required.
292 * Please put your code to control UART clock off here if needed
293 */
294 }
295
296 /* serial_clock_vote needs to be called with the ibs lock held */
serial_clock_vote(unsigned long vote,struct hci_uart * hu)297 static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
298 {
299 struct qca_data *qca = hu->priv;
300 unsigned int diff;
301
302 bool old_vote = (qca->tx_vote | qca->rx_vote);
303 bool new_vote;
304
305 switch (vote) {
306 case HCI_IBS_VOTE_STATS_UPDATE:
307 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
308
309 if (old_vote)
310 qca->vote_off_ms += diff;
311 else
312 qca->vote_on_ms += diff;
313 return;
314
315 case HCI_IBS_TX_VOTE_CLOCK_ON:
316 qca->tx_vote = true;
317 qca->tx_votes_on++;
318 break;
319
320 case HCI_IBS_RX_VOTE_CLOCK_ON:
321 qca->rx_vote = true;
322 qca->rx_votes_on++;
323 break;
324
325 case HCI_IBS_TX_VOTE_CLOCK_OFF:
326 qca->tx_vote = false;
327 qca->tx_votes_off++;
328 break;
329
330 case HCI_IBS_RX_VOTE_CLOCK_OFF:
331 qca->rx_vote = false;
332 qca->rx_votes_off++;
333 break;
334
335 default:
336 BT_ERR("Voting irregularity");
337 return;
338 }
339
340 new_vote = qca->rx_vote | qca->tx_vote;
341
342 if (new_vote != old_vote) {
343 if (new_vote)
344 __serial_clock_on(hu->tty);
345 else
346 __serial_clock_off(hu->tty);
347
348 BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
349 str_true_false(vote));
350
351 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
352
353 if (new_vote) {
354 qca->votes_on++;
355 qca->vote_off_ms += diff;
356 } else {
357 qca->votes_off++;
358 qca->vote_on_ms += diff;
359 }
360 qca->vote_last_jif = jiffies;
361 }
362 }
363
364 /* Builds and sends an HCI_IBS command packet.
365 * These are very simple packets with only 1 cmd byte.
366 */
send_hci_ibs_cmd(u8 cmd,struct hci_uart * hu)367 static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
368 {
369 int err = 0;
370 struct sk_buff *skb = NULL;
371 struct qca_data *qca = hu->priv;
372
373 BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
374
375 skb = bt_skb_alloc(1, GFP_ATOMIC);
376 if (!skb) {
377 BT_ERR("Failed to allocate memory for HCI_IBS packet");
378 return -ENOMEM;
379 }
380
381 /* Assign HCI_IBS type */
382 skb_put_u8(skb, cmd);
383
384 skb_queue_tail(&qca->txq, skb);
385
386 return err;
387 }
388
qca_wq_awake_device(struct work_struct * work)389 static void qca_wq_awake_device(struct work_struct *work)
390 {
391 struct qca_data *qca = container_of(work, struct qca_data,
392 ws_awake_device);
393 struct hci_uart *hu = qca->hu;
394 unsigned long retrans_delay;
395 unsigned long flags;
396
397 BT_DBG("hu %p wq awake device", hu);
398
399 /* Vote for serial clock */
400 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
401
402 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
403
404 /* Send wake indication to device */
405 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
406 BT_ERR("Failed to send WAKE to device");
407
408 qca->ibs_sent_wakes++;
409
410 /* Start retransmit timer */
411 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
412 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
413
414 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
415
416 /* Actually send the packets */
417 hci_uart_tx_wakeup(hu);
418 }
419
qca_wq_awake_rx(struct work_struct * work)420 static void qca_wq_awake_rx(struct work_struct *work)
421 {
422 struct qca_data *qca = container_of(work, struct qca_data,
423 ws_awake_rx);
424 struct hci_uart *hu = qca->hu;
425 unsigned long flags;
426
427 BT_DBG("hu %p wq awake rx", hu);
428
429 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
430
431 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
432 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
433
434 /* Always acknowledge device wake up,
435 * sending IBS message doesn't count as TX ON.
436 */
437 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
438 BT_ERR("Failed to acknowledge device wake up");
439
440 qca->ibs_sent_wacks++;
441
442 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
443
444 /* Actually send the packets */
445 hci_uart_tx_wakeup(hu);
446 }
447
qca_wq_serial_rx_clock_vote_off(struct work_struct * work)448 static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
449 {
450 struct qca_data *qca = container_of(work, struct qca_data,
451 ws_rx_vote_off);
452 struct hci_uart *hu = qca->hu;
453
454 BT_DBG("hu %p rx clock vote off", hu);
455
456 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
457 }
458
qca_wq_serial_tx_clock_vote_off(struct work_struct * work)459 static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
460 {
461 struct qca_data *qca = container_of(work, struct qca_data,
462 ws_tx_vote_off);
463 struct hci_uart *hu = qca->hu;
464
465 BT_DBG("hu %p tx clock vote off", hu);
466
467 /* Run HCI tx handling unlocked */
468 hci_uart_tx_wakeup(hu);
469
470 /* Now that message queued to tty driver, vote for tty clocks off.
471 * It is up to the tty driver to pend the clocks off until tx done.
472 */
473 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
474 }
475
hci_ibs_tx_idle_timeout(struct timer_list * t)476 static void hci_ibs_tx_idle_timeout(struct timer_list *t)
477 {
478 struct qca_data *qca = timer_container_of(qca, t, tx_idle_timer);
479 struct hci_uart *hu = qca->hu;
480 unsigned long flags;
481
482 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
483
484 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
485 flags, SINGLE_DEPTH_NESTING);
486
487 switch (qca->tx_ibs_state) {
488 case HCI_IBS_TX_AWAKE:
489 /* TX_IDLE, go to SLEEP */
490 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
491 BT_ERR("Failed to send SLEEP to device");
492 break;
493 }
494 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
495 qca->ibs_sent_slps++;
496 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
497 break;
498
499 case HCI_IBS_TX_ASLEEP:
500 case HCI_IBS_TX_WAKING:
501 default:
502 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
503 break;
504 }
505
506 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
507 }
508
hci_ibs_wake_retrans_timeout(struct timer_list * t)509 static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
510 {
511 struct qca_data *qca = timer_container_of(qca, t, wake_retrans_timer);
512 struct hci_uart *hu = qca->hu;
513 unsigned long flags, retrans_delay;
514 bool retransmit = false;
515
516 BT_DBG("hu %p wake retransmit timeout in %d state",
517 hu, qca->tx_ibs_state);
518
519 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
520 flags, SINGLE_DEPTH_NESTING);
521
522 /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
523 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
524 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
525 return;
526 }
527
528 switch (qca->tx_ibs_state) {
529 case HCI_IBS_TX_WAKING:
530 /* No WAKE_ACK, retransmit WAKE */
531 retransmit = true;
532 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
533 BT_ERR("Failed to acknowledge device wake up");
534 break;
535 }
536 qca->ibs_sent_wakes++;
537 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
538 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
539 break;
540
541 case HCI_IBS_TX_ASLEEP:
542 case HCI_IBS_TX_AWAKE:
543 default:
544 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
545 break;
546 }
547
548 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
549
550 if (retransmit)
551 hci_uart_tx_wakeup(hu);
552 }
553
554
qca_controller_memdump_timeout(struct work_struct * work)555 static void qca_controller_memdump_timeout(struct work_struct *work)
556 {
557 struct qca_data *qca = container_of(work, struct qca_data,
558 ctrl_memdump_timeout.work);
559 struct hci_uart *hu = qca->hu;
560
561 mutex_lock(&qca->hci_memdump_lock);
562 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
563 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
564 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
565 /* Inject hw error event to reset the device
566 * and driver.
567 */
568 hci_reset_dev(hu->hdev);
569 }
570 }
571
572 mutex_unlock(&qca->hci_memdump_lock);
573 }
574
575
576 /* Initialize protocol */
qca_open(struct hci_uart * hu)577 static int qca_open(struct hci_uart *hu)
578 {
579 struct qca_serdev *qcadev;
580 struct qca_data *qca;
581
582 BT_DBG("hu %p qca_open", hu);
583
584 if (!hci_uart_has_flow_control(hu))
585 return -EOPNOTSUPP;
586
587 qca = kzalloc_obj(*qca);
588 if (!qca)
589 return -ENOMEM;
590
591 skb_queue_head_init(&qca->txq);
592 skb_queue_head_init(&qca->tx_wait_q);
593 skb_queue_head_init(&qca->rx_memdump_q);
594 spin_lock_init(&qca->hci_ibs_lock);
595 mutex_init(&qca->hci_memdump_lock);
596 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
597 if (!qca->workqueue) {
598 BT_ERR("QCA Workqueue not initialized properly");
599 kfree(qca);
600 return -ENOMEM;
601 }
602
603 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
604 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
605 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
606 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
607 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
608 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
609 qca_controller_memdump_timeout);
610 init_waitqueue_head(&qca->suspend_wait_q);
611
612 qca->hu = hu;
613 init_completion(&qca->drop_ev_comp);
614
615 /* Assume we start with both sides asleep -- extra wakes OK */
616 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
617 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
618
619 qca->vote_last_jif = jiffies;
620
621 hu->priv = qca;
622
623 if (hu->serdev) {
624 qcadev = serdev_device_get_drvdata(hu->serdev);
625
626 switch (qcadev->btsoc_type) {
627 case QCA_WCN3950:
628 case QCA_WCN3988:
629 case QCA_WCN3990:
630 case QCA_WCN3991:
631 case QCA_WCN3998:
632 case QCA_WCN6750:
633 hu->init_speed = qcadev->init_speed;
634 break;
635
636 default:
637 break;
638 }
639
640 if (qcadev->oper_speed)
641 hu->oper_speed = qcadev->oper_speed;
642 }
643
644 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
645 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
646
647 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
648 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
649
650 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
651 qca->tx_idle_delay, qca->wake_retrans);
652
653 return 0;
654 }
655
qca_debugfs_init(struct hci_dev * hdev)656 static void qca_debugfs_init(struct hci_dev *hdev)
657 {
658 struct hci_uart *hu = hci_get_drvdata(hdev);
659 struct qca_data *qca = hu->priv;
660 struct dentry *ibs_dir;
661 umode_t mode;
662
663 if (!hdev->debugfs)
664 return;
665
666 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
667 return;
668
669 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
670
671 /* read only */
672 mode = 0444;
673 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
674 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
675 debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
676 &qca->ibs_sent_slps);
677 debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
678 &qca->ibs_sent_wakes);
679 debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
680 &qca->ibs_sent_wacks);
681 debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
682 &qca->ibs_recv_slps);
683 debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
684 &qca->ibs_recv_wakes);
685 debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
686 &qca->ibs_recv_wacks);
687 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
688 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
689 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
690 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
691 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
692 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
693 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
694 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
695 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
696 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
697
698 /* read/write */
699 mode = 0644;
700 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
701 debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
702 &qca->tx_idle_delay);
703 }
704
705 /* Flush protocol data */
qca_flush(struct hci_uart * hu)706 static int qca_flush(struct hci_uart *hu)
707 {
708 struct qca_data *qca = hu->priv;
709
710 BT_DBG("hu %p qca flush", hu);
711
712 skb_queue_purge(&qca->tx_wait_q);
713 skb_queue_purge(&qca->txq);
714
715 return 0;
716 }
717
718 /* Close protocol */
qca_close(struct hci_uart * hu)719 static int qca_close(struct hci_uart *hu)
720 {
721 struct qca_data *qca = hu->priv;
722
723 BT_DBG("hu %p qca close", hu);
724
725 /* BT core skips qca_hci_shutdown() which calls qca_power_off() on rmmod */
726 if (!test_bit(QCA_BT_OFF, &qca->flags))
727 qca_power_off(hu);
728
729 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
730
731 skb_queue_purge(&qca->tx_wait_q);
732 skb_queue_purge(&qca->txq);
733 skb_queue_purge(&qca->rx_memdump_q);
734 /*
735 * Shut the timers down so they can't be rearmed when
736 * destroy_workqueue() drains pending work which in turn might try
737 * to arm a timer. After shutdown rearm attempts are silently
738 * ignored by the timer core code.
739 */
740 timer_shutdown_sync(&qca->tx_idle_timer);
741 timer_shutdown_sync(&qca->wake_retrans_timer);
742 destroy_workqueue(qca->workqueue);
743 qca->hu = NULL;
744
745 kfree_skb(qca->rx_skb);
746
747 hu->priv = NULL;
748
749 kfree(qca);
750
751 return 0;
752 }
753
754 /* Called upon a wake-up-indication from the device.
755 */
device_want_to_wakeup(struct hci_uart * hu)756 static void device_want_to_wakeup(struct hci_uart *hu)
757 {
758 unsigned long flags;
759 struct qca_data *qca = hu->priv;
760
761 BT_DBG("hu %p want to wake up", hu);
762
763 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
764
765 qca->ibs_recv_wakes++;
766
767 /* Don't wake the rx up when suspending. */
768 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
769 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
770 return;
771 }
772
773 switch (qca->rx_ibs_state) {
774 case HCI_IBS_RX_ASLEEP:
775 /* Make sure clock is on - we may have turned clock off since
776 * receiving the wake up indicator awake rx clock.
777 */
778 queue_work(qca->workqueue, &qca->ws_awake_rx);
779 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
780 return;
781
782 case HCI_IBS_RX_AWAKE:
783 /* Always acknowledge device wake up,
784 * sending IBS message doesn't count as TX ON.
785 */
786 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
787 BT_ERR("Failed to acknowledge device wake up");
788 break;
789 }
790 qca->ibs_sent_wacks++;
791 break;
792
793 default:
794 /* Any other state is illegal */
795 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
796 qca->rx_ibs_state);
797 break;
798 }
799
800 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
801
802 /* Actually send the packets */
803 hci_uart_tx_wakeup(hu);
804 }
805
806 /* Called upon a sleep-indication from the device.
807 */
device_want_to_sleep(struct hci_uart * hu)808 static void device_want_to_sleep(struct hci_uart *hu)
809 {
810 unsigned long flags;
811 struct qca_data *qca = hu->priv;
812
813 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
814
815 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
816
817 qca->ibs_recv_slps++;
818
819 switch (qca->rx_ibs_state) {
820 case HCI_IBS_RX_AWAKE:
821 /* Update state */
822 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
823 /* Vote off rx clock under workqueue */
824 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
825 break;
826
827 case HCI_IBS_RX_ASLEEP:
828 break;
829
830 default:
831 /* Any other state is illegal */
832 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
833 qca->rx_ibs_state);
834 break;
835 }
836
837 wake_up_interruptible(&qca->suspend_wait_q);
838
839 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
840 }
841
842 /* Called upon wake-up-acknowledgement from the device
843 */
device_woke_up(struct hci_uart * hu)844 static void device_woke_up(struct hci_uart *hu)
845 {
846 unsigned long flags, idle_delay;
847 struct qca_data *qca = hu->priv;
848 struct sk_buff *skb = NULL;
849
850 BT_DBG("hu %p woke up", hu);
851
852 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
853
854 qca->ibs_recv_wacks++;
855
856 /* Don't react to the wake-up-acknowledgment when suspending. */
857 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
858 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
859 return;
860 }
861
862 switch (qca->tx_ibs_state) {
863 case HCI_IBS_TX_AWAKE:
864 /* Expect one if we send 2 WAKEs */
865 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
866 qca->tx_ibs_state);
867 break;
868
869 case HCI_IBS_TX_WAKING:
870 /* Send pending packets */
871 while ((skb = skb_dequeue(&qca->tx_wait_q)))
872 skb_queue_tail(&qca->txq, skb);
873
874 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
875 timer_delete(&qca->wake_retrans_timer);
876 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
877 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
878 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
879 break;
880
881 case HCI_IBS_TX_ASLEEP:
882 default:
883 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
884 qca->tx_ibs_state);
885 break;
886 }
887
888 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
889
890 /* Actually send the packets */
891 hci_uart_tx_wakeup(hu);
892 }
893
894 /* Enqueue frame for transmission (padding, crc, etc) may be called from
895 * two simultaneous tasklets.
896 */
qca_enqueue(struct hci_uart * hu,struct sk_buff * skb)897 static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
898 {
899 unsigned long flags = 0, idle_delay;
900 struct qca_data *qca = hu->priv;
901
902 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
903 qca->tx_ibs_state);
904
905 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
906 /* As SSR is in progress, ignore the packets */
907 bt_dev_dbg(hu->hdev, "SSR is in progress");
908 kfree_skb(skb);
909 return 0;
910 }
911
912 /* Prepend skb with frame type */
913 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
914
915 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
916
917 /* Don't go to sleep in middle of patch download or
918 * Out-Of-Band(GPIOs control) sleep is selected.
919 * Don't wake the device up when suspending.
920 */
921 if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
922 test_bit(QCA_SUSPENDING, &qca->flags)) {
923 skb_queue_tail(&qca->txq, skb);
924 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
925 return 0;
926 }
927
928 /* Act according to current state */
929 switch (qca->tx_ibs_state) {
930 case HCI_IBS_TX_AWAKE:
931 BT_DBG("Device awake, sending normally");
932 skb_queue_tail(&qca->txq, skb);
933 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
934 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
935 break;
936
937 case HCI_IBS_TX_ASLEEP:
938 BT_DBG("Device asleep, waking up and queueing packet");
939 /* Save packet for later */
940 skb_queue_tail(&qca->tx_wait_q, skb);
941
942 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
943 /* Schedule a work queue to wake up device */
944 queue_work(qca->workqueue, &qca->ws_awake_device);
945 break;
946
947 case HCI_IBS_TX_WAKING:
948 BT_DBG("Device waking up, queueing packet");
949 /* Transient state; just keep packet for later */
950 skb_queue_tail(&qca->tx_wait_q, skb);
951 break;
952
953 default:
954 BT_ERR("Illegal tx state: %d (losing packet)",
955 qca->tx_ibs_state);
956 dev_kfree_skb_irq(skb);
957 break;
958 }
959
960 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
961
962 return 0;
963 }
964
qca_ibs_sleep_ind(struct hci_dev * hdev,struct sk_buff * skb)965 static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
966 {
967 struct hci_uart *hu = hci_get_drvdata(hdev);
968
969 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
970
971 device_want_to_sleep(hu);
972
973 kfree_skb(skb);
974 return 0;
975 }
976
qca_ibs_wake_ind(struct hci_dev * hdev,struct sk_buff * skb)977 static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 struct hci_uart *hu = hci_get_drvdata(hdev);
980
981 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
982
983 device_want_to_wakeup(hu);
984
985 kfree_skb(skb);
986 return 0;
987 }
988
qca_ibs_wake_ack(struct hci_dev * hdev,struct sk_buff * skb)989 static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
990 {
991 struct hci_uart *hu = hci_get_drvdata(hdev);
992
993 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
994
995 device_woke_up(hu);
996
997 kfree_skb(skb);
998 return 0;
999 }
1000
qca_recv_acl_data(struct hci_dev * hdev,struct sk_buff * skb)1001 static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
1002 {
1003 /* We receive debug logs from chip as an ACL packets.
1004 * Instead of sending the data to ACL to decode the
1005 * received data, we are pushing them to the above layers
1006 * as a diagnostic packet.
1007 */
1008 if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
1009 return hci_recv_diag(hdev, skb);
1010
1011 return hci_recv_frame(hdev, skb);
1012 }
1013
qca_dmp_hdr(struct hci_dev * hdev,struct sk_buff * skb)1014 static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
1015 {
1016 struct hci_uart *hu = hci_get_drvdata(hdev);
1017 struct qca_data *qca = hu->priv;
1018 char buf[80];
1019
1020 snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n",
1021 qca->controller_id);
1022 skb_put_data(skb, buf, strlen(buf));
1023
1024 snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n",
1025 qca->fw_version);
1026 skb_put_data(skb, buf, strlen(buf));
1027
1028 snprintf(buf, sizeof(buf), "Vendor:Qualcomm\n");
1029 skb_put_data(skb, buf, strlen(buf));
1030
1031 snprintf(buf, sizeof(buf), "Driver: %s\n",
1032 hu->serdev->dev.driver->name);
1033 skb_put_data(skb, buf, strlen(buf));
1034 }
1035
qca_controller_memdump(struct work_struct * work)1036 static void qca_controller_memdump(struct work_struct *work)
1037 {
1038 struct qca_data *qca = container_of(work, struct qca_data,
1039 ctrl_memdump_evt);
1040 struct hci_uart *hu = qca->hu;
1041 struct sk_buff *skb;
1042 struct qca_memdump_event_hdr *cmd_hdr;
1043 struct qca_memdump_info *qca_memdump = qca->qca_memdump;
1044 struct qca_dump_size *dump;
1045 u16 seq_no;
1046 u32 rx_size;
1047 int ret = 0;
1048 enum qca_btsoc_type soc_type = qca_soc_type(hu);
1049
1050 while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
1051
1052 mutex_lock(&qca->hci_memdump_lock);
1053 /* Skip processing the received packets if timeout detected
1054 * or memdump collection completed.
1055 */
1056 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1057 qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1058 mutex_unlock(&qca->hci_memdump_lock);
1059 return;
1060 }
1061
1062 if (!qca_memdump) {
1063 qca_memdump = kzalloc_obj(*qca_memdump, GFP_ATOMIC);
1064 if (!qca_memdump) {
1065 mutex_unlock(&qca->hci_memdump_lock);
1066 return;
1067 }
1068
1069 qca->qca_memdump = qca_memdump;
1070 }
1071
1072 qca->memdump_state = QCA_MEMDUMP_COLLECTING;
1073 cmd_hdr = (void *) skb->data;
1074 seq_no = __le16_to_cpu(cmd_hdr->seq_no);
1075 skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
1076
1077 if (!seq_no) {
1078
1079 /* This is the first frame of memdump packet from
1080 * the controller, Disable IBS to receive dump
1081 * with out any interruption, ideally time required for
1082 * the controller to send the dump is 8 seconds. let us
1083 * start timer to handle this asynchronous activity.
1084 */
1085 set_bit(QCA_IBS_DISABLED, &qca->flags);
1086 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1087 dump = (void *) skb->data;
1088 qca_memdump->ram_dump_size = __le32_to_cpu(dump->dump_size);
1089 if (!(qca_memdump->ram_dump_size)) {
1090 bt_dev_err(hu->hdev, "Rx invalid memdump size");
1091 kfree(qca_memdump);
1092 kfree_skb(skb);
1093 mutex_unlock(&qca->hci_memdump_lock);
1094 return;
1095 }
1096
1097 queue_delayed_work(qca->workqueue,
1098 &qca->ctrl_memdump_timeout,
1099 msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
1100 skb_pull(skb, sizeof(qca_memdump->ram_dump_size));
1101 qca_memdump->current_seq_no = 0;
1102 qca_memdump->received_dump = 0;
1103 ret = hci_devcd_init(hu->hdev, qca_memdump->ram_dump_size);
1104 bt_dev_info(hu->hdev, "hci_devcd_init Return:%d",
1105 ret);
1106 if (ret < 0) {
1107 kfree(qca->qca_memdump);
1108 qca->qca_memdump = NULL;
1109 qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1110 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1111 clear_and_wake_up_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1112 clear_bit(QCA_IBS_DISABLED, &qca->flags);
1113 mutex_unlock(&qca->hci_memdump_lock);
1114 return;
1115 }
1116
1117 bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
1118 qca_memdump->ram_dump_size);
1119
1120 }
1121
1122 /* If sequence no 0 is missed then there is no point in
1123 * accepting the other sequences.
1124 */
1125 if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
1126 bt_dev_err(hu->hdev, "QCA: Discarding other packets");
1127 kfree(qca_memdump);
1128 kfree_skb(skb);
1129 mutex_unlock(&qca->hci_memdump_lock);
1130 return;
1131 }
1132 /* There could be chance of missing some packets from
1133 * the controller. In such cases let us store the dummy
1134 * packets in the buffer.
1135 */
1136 /* For QCA6390, controller does not lost packets but
1137 * sequence number field of packet sometimes has error
1138 * bits, so skip this checking for missing packet.
1139 */
1140 while ((seq_no > qca_memdump->current_seq_no + 1) &&
1141 (soc_type != QCA_QCA6390) &&
1142 seq_no != QCA_LAST_SEQUENCE_NUM) {
1143 bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
1144 qca_memdump->current_seq_no);
1145 rx_size = qca_memdump->received_dump;
1146 rx_size += QCA_DUMP_PACKET_SIZE;
1147 if (rx_size > qca_memdump->ram_dump_size) {
1148 bt_dev_err(hu->hdev,
1149 "QCA memdump received %d, no space for missed packet",
1150 qca_memdump->received_dump);
1151 break;
1152 }
1153 hci_devcd_append_pattern(hu->hdev, 0x00,
1154 QCA_DUMP_PACKET_SIZE);
1155 qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
1156 qca_memdump->current_seq_no++;
1157 }
1158
1159 rx_size = qca_memdump->received_dump + skb->len;
1160 if (rx_size <= qca_memdump->ram_dump_size) {
1161 if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
1162 (seq_no != qca_memdump->current_seq_no)) {
1163 bt_dev_err(hu->hdev,
1164 "QCA memdump unexpected packet %d",
1165 seq_no);
1166 }
1167 bt_dev_dbg(hu->hdev,
1168 "QCA memdump packet %d with length %d",
1169 seq_no, skb->len);
1170 hci_devcd_append(hu->hdev, skb);
1171 qca_memdump->current_seq_no += 1;
1172 qca_memdump->received_dump = rx_size;
1173 } else {
1174 bt_dev_err(hu->hdev,
1175 "QCA memdump received no space for packet %d",
1176 qca_memdump->current_seq_no);
1177 }
1178
1179 if (seq_no == QCA_LAST_SEQUENCE_NUM) {
1180 bt_dev_info(hu->hdev,
1181 "QCA memdump Done, received %d, total %d",
1182 qca_memdump->received_dump,
1183 qca_memdump->ram_dump_size);
1184 hci_devcd_complete(hu->hdev);
1185 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1186 kfree(qca->qca_memdump);
1187 qca->qca_memdump = NULL;
1188 qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1189 clear_and_wake_up_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1190 }
1191
1192 mutex_unlock(&qca->hci_memdump_lock);
1193 }
1194
1195 }
1196
qca_controller_memdump_event(struct hci_dev * hdev,struct sk_buff * skb)1197 static int qca_controller_memdump_event(struct hci_dev *hdev,
1198 struct sk_buff *skb)
1199 {
1200 struct hci_uart *hu = hci_get_drvdata(hdev);
1201 struct qca_data *qca = hu->priv;
1202
1203 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1204 skb_queue_tail(&qca->rx_memdump_q, skb);
1205 queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1206
1207 return 0;
1208 }
1209
qca_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1210 static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1211 {
1212 struct hci_uart *hu = hci_get_drvdata(hdev);
1213 struct qca_data *qca = hu->priv;
1214
1215 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1216 struct hci_event_hdr *hdr = (void *)skb->data;
1217
1218 /* For the WCN3990 the vendor command for a baudrate change
1219 * isn't sent as synchronous HCI command, because the
1220 * controller sends the corresponding vendor event with the
1221 * new baudrate. The event is received and properly decoded
1222 * after changing the baudrate of the host port. It needs to
1223 * be dropped, otherwise it can be misinterpreted as
1224 * response to a later firmware download command (also a
1225 * vendor command).
1226 */
1227
1228 if (hdr->evt == HCI_EV_VENDOR)
1229 complete(&qca->drop_ev_comp);
1230
1231 kfree_skb(skb);
1232
1233 return 0;
1234 }
1235 /* We receive chip memory dump as an event packet, With a dedicated
1236 * handler followed by a hardware error event. When this event is
1237 * received we store dump into a file before closing hci. This
1238 * dump will help in triaging the issues.
1239 */
1240 if ((skb->data[0] == HCI_VENDOR_PKT) &&
1241 (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
1242 return qca_controller_memdump_event(hdev, skb);
1243
1244 return hci_recv_frame(hdev, skb);
1245 }
1246
1247 #define QCA_IBS_SLEEP_IND_EVENT \
1248 .type = HCI_IBS_SLEEP_IND, \
1249 .hlen = 0, \
1250 .loff = 0, \
1251 .lsize = 0, \
1252 .maxlen = HCI_MAX_IBS_SIZE
1253
1254 #define QCA_IBS_WAKE_IND_EVENT \
1255 .type = HCI_IBS_WAKE_IND, \
1256 .hlen = 0, \
1257 .loff = 0, \
1258 .lsize = 0, \
1259 .maxlen = HCI_MAX_IBS_SIZE
1260
1261 #define QCA_IBS_WAKE_ACK_EVENT \
1262 .type = HCI_IBS_WAKE_ACK, \
1263 .hlen = 0, \
1264 .loff = 0, \
1265 .lsize = 0, \
1266 .maxlen = HCI_MAX_IBS_SIZE
1267
1268 static const struct h4_recv_pkt qca_recv_pkts[] = {
1269 { H4_RECV_ACL, .recv = qca_recv_acl_data },
1270 { H4_RECV_SCO, .recv = hci_recv_frame },
1271 { H4_RECV_EVENT, .recv = qca_recv_event },
1272 { H4_RECV_ISO, .recv = hci_recv_frame },
1273 { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
1274 { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
1275 { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
1276 };
1277
qca_recv(struct hci_uart * hu,const void * data,int count)1278 static int qca_recv(struct hci_uart *hu, const void *data, int count)
1279 {
1280 struct qca_data *qca = hu->priv;
1281
1282 if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
1283 return -EUNATCH;
1284
1285 qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
1286 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
1287 if (IS_ERR(qca->rx_skb)) {
1288 int err = PTR_ERR(qca->rx_skb);
1289 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
1290 qca->rx_skb = NULL;
1291 return err;
1292 }
1293
1294 return count;
1295 }
1296
qca_dequeue(struct hci_uart * hu)1297 static struct sk_buff *qca_dequeue(struct hci_uart *hu)
1298 {
1299 struct qca_data *qca = hu->priv;
1300
1301 return skb_dequeue(&qca->txq);
1302 }
1303
qca_get_baudrate_value(int speed)1304 static uint8_t qca_get_baudrate_value(int speed)
1305 {
1306 switch (speed) {
1307 case 9600:
1308 return QCA_BAUDRATE_9600;
1309 case 19200:
1310 return QCA_BAUDRATE_19200;
1311 case 38400:
1312 return QCA_BAUDRATE_38400;
1313 case 57600:
1314 return QCA_BAUDRATE_57600;
1315 case 115200:
1316 return QCA_BAUDRATE_115200;
1317 case 230400:
1318 return QCA_BAUDRATE_230400;
1319 case 460800:
1320 return QCA_BAUDRATE_460800;
1321 case 500000:
1322 return QCA_BAUDRATE_500000;
1323 case 921600:
1324 return QCA_BAUDRATE_921600;
1325 case 1000000:
1326 return QCA_BAUDRATE_1000000;
1327 case 2000000:
1328 return QCA_BAUDRATE_2000000;
1329 case 3000000:
1330 return QCA_BAUDRATE_3000000;
1331 case 3200000:
1332 return QCA_BAUDRATE_3200000;
1333 case 3500000:
1334 return QCA_BAUDRATE_3500000;
1335 default:
1336 return QCA_BAUDRATE_115200;
1337 }
1338 }
1339
qca_set_baudrate(struct hci_dev * hdev,uint8_t baudrate)1340 static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
1341 {
1342 struct hci_uart *hu = hci_get_drvdata(hdev);
1343 struct qca_data *qca = hu->priv;
1344 struct sk_buff *skb;
1345 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
1346
1347 if (baudrate > QCA_BAUDRATE_3200000)
1348 return -EINVAL;
1349
1350 cmd[4] = baudrate;
1351
1352 skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
1353 if (!skb) {
1354 bt_dev_err(hdev, "Failed to allocate baudrate packet");
1355 return -ENOMEM;
1356 }
1357
1358 /* Assign commands to change baudrate and packet type. */
1359 skb_put_data(skb, cmd, sizeof(cmd));
1360 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1361
1362 skb_queue_tail(&qca->txq, skb);
1363 hci_uart_tx_wakeup(hu);
1364
1365 /* Wait for the baudrate change request to be sent */
1366
1367 while (!skb_queue_empty(&qca->txq))
1368 usleep_range(100, 200);
1369
1370 if (hu->serdev)
1371 serdev_device_wait_until_sent(hu->serdev,
1372 msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
1373
1374 /* Give the controller time to process the request */
1375 switch (qca_soc_type(hu)) {
1376 case QCA_WCN3950:
1377 case QCA_WCN3988:
1378 case QCA_WCN3990:
1379 case QCA_WCN3991:
1380 case QCA_WCN3998:
1381 case QCA_WCN6750:
1382 case QCA_WCN6855:
1383 case QCA_WCN7850:
1384 usleep_range(1000, 10000);
1385 break;
1386
1387 default:
1388 msleep(300);
1389 }
1390
1391 return 0;
1392 }
1393
host_set_baudrate(struct hci_uart * hu,unsigned int speed)1394 static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
1395 {
1396 if (hu->serdev)
1397 serdev_device_set_baudrate(hu->serdev, speed);
1398 else
1399 hci_uart_set_baudrate(hu, speed);
1400 }
1401
qca_send_power_pulse(struct hci_uart * hu,bool on)1402 static int qca_send_power_pulse(struct hci_uart *hu, bool on)
1403 {
1404 int ret;
1405 int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
1406 u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
1407
1408 /* These power pulses are single byte command which are sent
1409 * at required baudrate to wcn3990. On wcn3990, we have an external
1410 * circuit at Tx pin which decodes the pulse sent at specific baudrate.
1411 * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
1412 * and also we use the same power inputs to turn on and off for
1413 * Wi-Fi/BT. Powering up the power sources will not enable BT, until
1414 * we send a power on pulse at 115200 bps. This algorithm will help to
1415 * save power. Disabling hardware flow control is mandatory while
1416 * sending power pulses to SoC.
1417 */
1418 bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
1419
1420 serdev_device_write_flush(hu->serdev);
1421 hci_uart_set_flow_control(hu, true);
1422 ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
1423 if (ret < 0) {
1424 bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
1425 return ret;
1426 }
1427
1428 serdev_device_wait_until_sent(hu->serdev, timeout);
1429 hci_uart_set_flow_control(hu, false);
1430
1431 /* Give to controller time to boot/shutdown */
1432 if (on)
1433 msleep(100);
1434 else
1435 usleep_range(1000, 10000);
1436
1437 return 0;
1438 }
1439
qca_get_speed(struct hci_uart * hu,enum qca_speed_type speed_type)1440 static unsigned int qca_get_speed(struct hci_uart *hu,
1441 enum qca_speed_type speed_type)
1442 {
1443 unsigned int speed = 0;
1444
1445 if (speed_type == QCA_INIT_SPEED) {
1446 if (hu->init_speed)
1447 speed = hu->init_speed;
1448 else if (hu->proto->init_speed)
1449 speed = hu->proto->init_speed;
1450 } else {
1451 if (hu->oper_speed)
1452 speed = hu->oper_speed;
1453 else if (hu->proto->oper_speed)
1454 speed = hu->proto->oper_speed;
1455 }
1456
1457 return speed;
1458 }
1459
qca_check_speeds(struct hci_uart * hu)1460 static int qca_check_speeds(struct hci_uart *hu)
1461 {
1462 switch (qca_soc_type(hu)) {
1463 case QCA_WCN3950:
1464 case QCA_WCN3988:
1465 case QCA_WCN3990:
1466 case QCA_WCN3991:
1467 case QCA_WCN3998:
1468 case QCA_WCN6750:
1469 case QCA_WCN6855:
1470 case QCA_WCN7850:
1471 if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
1472 !qca_get_speed(hu, QCA_OPER_SPEED))
1473 return -EINVAL;
1474 break;
1475
1476 default:
1477 if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
1478 !qca_get_speed(hu, QCA_OPER_SPEED))
1479 return -EINVAL;
1480 }
1481
1482 return 0;
1483 }
1484
qca_set_speed(struct hci_uart * hu,enum qca_speed_type speed_type)1485 static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
1486 {
1487 unsigned int speed, qca_baudrate;
1488 struct qca_data *qca = hu->priv;
1489 int ret = 0;
1490
1491 if (speed_type == QCA_INIT_SPEED) {
1492 speed = qca_get_speed(hu, QCA_INIT_SPEED);
1493 if (speed)
1494 host_set_baudrate(hu, speed);
1495 } else {
1496 enum qca_btsoc_type soc_type = qca_soc_type(hu);
1497
1498 speed = qca_get_speed(hu, QCA_OPER_SPEED);
1499 if (!speed)
1500 return 0;
1501
1502 /* Disable flow control for wcn3990 to deassert RTS while
1503 * changing the baudrate of chip and host.
1504 */
1505 switch (soc_type) {
1506 case QCA_WCN3950:
1507 case QCA_WCN3988:
1508 case QCA_WCN3990:
1509 case QCA_WCN3991:
1510 case QCA_WCN3998:
1511 case QCA_WCN6750:
1512 case QCA_WCN6855:
1513 case QCA_WCN7850:
1514 hci_uart_set_flow_control(hu, true);
1515 break;
1516
1517 default:
1518 break;
1519 }
1520
1521 switch (soc_type) {
1522 case QCA_WCN3990:
1523 reinit_completion(&qca->drop_ev_comp);
1524 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1525 break;
1526
1527 default:
1528 break;
1529 }
1530
1531 qca_baudrate = qca_get_baudrate_value(speed);
1532 bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
1533 ret = qca_set_baudrate(hu->hdev, qca_baudrate);
1534 if (ret)
1535 goto error;
1536
1537 host_set_baudrate(hu, speed);
1538
1539 error:
1540 switch (soc_type) {
1541 case QCA_WCN3950:
1542 case QCA_WCN3988:
1543 case QCA_WCN3990:
1544 case QCA_WCN3991:
1545 case QCA_WCN3998:
1546 case QCA_WCN6750:
1547 case QCA_WCN6855:
1548 case QCA_WCN7850:
1549 hci_uart_set_flow_control(hu, false);
1550 break;
1551
1552 default:
1553 break;
1554 }
1555
1556 switch (soc_type) {
1557 case QCA_WCN3990:
1558 /* Wait for the controller to send the vendor event
1559 * for the baudrate change command.
1560 */
1561 if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1562 msecs_to_jiffies(100))) {
1563 bt_dev_err(hu->hdev,
1564 "Failed to change controller baudrate\n");
1565 ret = -ETIMEDOUT;
1566 }
1567
1568 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1569 break;
1570
1571 default:
1572 break;
1573 }
1574 }
1575
1576 return ret;
1577 }
1578
qca_send_crashbuffer(struct hci_uart * hu)1579 static int qca_send_crashbuffer(struct hci_uart *hu)
1580 {
1581 struct qca_data *qca = hu->priv;
1582 struct sk_buff *skb;
1583
1584 skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
1585 if (!skb) {
1586 bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
1587 return -ENOMEM;
1588 }
1589
1590 /* We forcefully crash the controller, by sending 0xfb byte for
1591 * 1024 times. We also might have chance of losing data, To be
1592 * on safer side we send 1096 bytes to the SoC.
1593 */
1594 memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
1595 QCA_CRASHBYTE_PACKET_LEN);
1596 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1597 bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
1598 skb_queue_tail(&qca->txq, skb);
1599 hci_uart_tx_wakeup(hu);
1600
1601 return 0;
1602 }
1603
qca_wait_for_dump_collection(struct hci_dev * hdev)1604 static void qca_wait_for_dump_collection(struct hci_dev *hdev)
1605 {
1606 struct hci_uart *hu = hci_get_drvdata(hdev);
1607 struct qca_data *qca = hu->priv;
1608
1609 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1610 TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
1611
1612 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1613 }
1614
qca_hw_error(struct hci_dev * hdev,u8 code)1615 static void qca_hw_error(struct hci_dev *hdev, u8 code)
1616 {
1617 struct hci_uart *hu = hci_get_drvdata(hdev);
1618 struct qca_data *qca = hu->priv;
1619
1620 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1621 set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1622 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1623
1624 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1625 /* If hardware error event received for other than QCA
1626 * soc memory dump event, then we need to crash the SOC
1627 * and wait here for 8 seconds to get the dump packets.
1628 * This will block main thread to be on hold until we
1629 * collect dump.
1630 */
1631 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1632 qca_send_crashbuffer(hu);
1633 qca_wait_for_dump_collection(hdev);
1634 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1635 /* Let us wait here until memory dump collected or
1636 * memory dump timer expired.
1637 */
1638 bt_dev_info(hdev, "waiting for dump to complete");
1639 qca_wait_for_dump_collection(hdev);
1640 }
1641
1642 mutex_lock(&qca->hci_memdump_lock);
1643 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1644 bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
1645 hci_devcd_abort(hu->hdev);
1646 if (qca->qca_memdump) {
1647 kfree(qca->qca_memdump);
1648 qca->qca_memdump = NULL;
1649 }
1650 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1651 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1652 }
1653 mutex_unlock(&qca->hci_memdump_lock);
1654
1655 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1656 qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1657 cancel_work_sync(&qca->ctrl_memdump_evt);
1658 skb_queue_purge(&qca->rx_memdump_q);
1659 }
1660
1661 /*
1662 * If the BT chip's bt_en pin is connected to a 3.3V power supply via
1663 * hardware and always stays high, driver cannot control the bt_en pin.
1664 * As a result, during SSR (SubSystem Restart), QCA_SSR_TRIGGERED and
1665 * QCA_IBS_DISABLED flags cannot be cleared, which leads to a reset
1666 * command timeout.
1667 * Add an msleep delay to ensure controller completes the SSR process.
1668 *
1669 * Host will not download the firmware after SSR, controller to remain
1670 * in the IBS_WAKE state, and the host needs to synchronize with it
1671 *
1672 * Since the bluetooth chip has been reset, clear the memdump state.
1673 */
1674 if (!hci_test_quirk(hu->hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) {
1675 /*
1676 * When the SSR (SubSystem Restart) duration exceeds 2 seconds,
1677 * it triggers host tx_idle_delay, which sets host TX state
1678 * to sleep. Reset tx_idle_timer after SSR to prevent
1679 * host enter TX IBS_Sleep mode.
1680 */
1681 mod_timer(&qca->tx_idle_timer, jiffies +
1682 msecs_to_jiffies(qca->tx_idle_delay));
1683
1684 /* Controller reset completion time is 50ms */
1685 msleep(50);
1686
1687 clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1688 clear_bit(QCA_IBS_DISABLED, &qca->flags);
1689
1690 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
1691 qca->memdump_state = QCA_MEMDUMP_IDLE;
1692 }
1693
1694 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1695 }
1696
qca_reset(struct hci_dev * hdev)1697 static void qca_reset(struct hci_dev *hdev)
1698 {
1699 struct hci_uart *hu = hci_get_drvdata(hdev);
1700 struct qca_data *qca = hu->priv;
1701
1702 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1703 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1704 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1705 qca_send_crashbuffer(hu);
1706 qca_wait_for_dump_collection(hdev);
1707 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1708 /* Let us wait here until memory dump collected or
1709 * memory dump timer expired.
1710 */
1711 bt_dev_info(hdev, "waiting for dump to complete");
1712 qca_wait_for_dump_collection(hdev);
1713 }
1714
1715 mutex_lock(&qca->hci_memdump_lock);
1716 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1717 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1718 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
1719 /* Inject hw error event to reset the device
1720 * and driver.
1721 */
1722 hci_reset_dev(hu->hdev);
1723 }
1724 }
1725 mutex_unlock(&qca->hci_memdump_lock);
1726 }
1727
qca_wakeup(struct hci_dev * hdev)1728 static bool qca_wakeup(struct hci_dev *hdev)
1729 {
1730 struct hci_uart *hu = hci_get_drvdata(hdev);
1731 bool wakeup;
1732
1733 if (!hu->serdev)
1734 return true;
1735
1736 /* BT SoC attached through the serial bus is handled by the serdev driver.
1737 * So we need to use the device handle of the serdev driver to get the
1738 * status of device may wakeup.
1739 */
1740 wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
1741 bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
1742
1743 return wakeup;
1744 }
1745
qca_port_reopen(struct hci_uart * hu)1746 static int qca_port_reopen(struct hci_uart *hu)
1747 {
1748 int ret;
1749
1750 /* Now the device is in ready state to communicate with host.
1751 * To sync host with device we need to reopen port.
1752 * Without this, we will have RTS and CTS synchronization
1753 * issues.
1754 */
1755 serdev_device_close(hu->serdev);
1756 ret = serdev_device_open(hu->serdev);
1757 if (ret) {
1758 bt_dev_err(hu->hdev, "failed to open port");
1759 return ret;
1760 }
1761
1762 hci_uart_set_flow_control(hu, false);
1763
1764 return 0;
1765 }
1766
qca_regulator_init(struct hci_uart * hu)1767 static int qca_regulator_init(struct hci_uart *hu)
1768 {
1769 enum qca_btsoc_type soc_type = qca_soc_type(hu);
1770 struct qca_serdev *qcadev;
1771 int ret;
1772 bool sw_ctrl_state;
1773
1774 /* Check for vregs status, may be hci down has turned
1775 * off the voltage regulator.
1776 */
1777 qcadev = serdev_device_get_drvdata(hu->serdev);
1778
1779 if (!qcadev->bt_power->vregs_on) {
1780 serdev_device_close(hu->serdev);
1781 ret = qca_regulator_enable(qcadev);
1782 if (ret)
1783 return ret;
1784
1785 ret = serdev_device_open(hu->serdev);
1786 if (ret) {
1787 bt_dev_err(hu->hdev, "failed to open port");
1788 return ret;
1789 }
1790 }
1791
1792 switch (soc_type) {
1793 case QCA_WCN3950:
1794 case QCA_WCN3988:
1795 case QCA_WCN3990:
1796 case QCA_WCN3991:
1797 case QCA_WCN3998:
1798 /* Forcefully enable wcn399x to enter in to boot mode. */
1799 host_set_baudrate(hu, 2400);
1800 ret = qca_send_power_pulse(hu, false);
1801 if (ret)
1802 return ret;
1803 break;
1804
1805 default:
1806 break;
1807 }
1808
1809 /* For wcn6750 need to enable gpio bt_en */
1810 if (qcadev->bt_en) {
1811 gpiod_set_value_cansleep(qcadev->bt_en, 0);
1812 msleep(50);
1813 gpiod_set_value_cansleep(qcadev->bt_en, 1);
1814 msleep(50);
1815 if (qcadev->sw_ctrl) {
1816 sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
1817 bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
1818 }
1819 }
1820
1821 qca_set_speed(hu, QCA_INIT_SPEED);
1822
1823 switch (soc_type) {
1824 case QCA_WCN3950:
1825 case QCA_WCN3988:
1826 case QCA_WCN3990:
1827 case QCA_WCN3991:
1828 case QCA_WCN3998:
1829 ret = qca_send_power_pulse(hu, true);
1830 if (ret)
1831 return ret;
1832 break;
1833
1834 default:
1835 break;
1836 }
1837
1838 return qca_port_reopen(hu);
1839 }
1840
qca_power_on(struct hci_dev * hdev)1841 static int qca_power_on(struct hci_dev *hdev)
1842 {
1843 struct hci_uart *hu = hci_get_drvdata(hdev);
1844 enum qca_btsoc_type soc_type = qca_soc_type(hu);
1845 struct qca_serdev *qcadev;
1846 struct qca_data *qca = hu->priv;
1847 int ret = 0;
1848
1849 /* Non-serdev device usually is powered by external power
1850 * and don't need additional action in driver for power on
1851 */
1852 if (!hu->serdev)
1853 return 0;
1854
1855 switch (soc_type) {
1856 case QCA_QCA6390:
1857 case QCA_WCN3950:
1858 case QCA_WCN3988:
1859 case QCA_WCN3990:
1860 case QCA_WCN3991:
1861 case QCA_WCN3998:
1862 case QCA_WCN6750:
1863 case QCA_WCN6855:
1864 case QCA_WCN7850:
1865 ret = qca_regulator_init(hu);
1866 break;
1867
1868 default:
1869 qcadev = serdev_device_get_drvdata(hu->serdev);
1870 if (qcadev->bt_en) {
1871 gpiod_set_value_cansleep(qcadev->bt_en, 1);
1872 /* Controller needs time to bootup. */
1873 msleep(150);
1874 }
1875 }
1876
1877 clear_bit(QCA_BT_OFF, &qca->flags);
1878 return ret;
1879 }
1880
hci_coredump_qca(struct hci_dev * hdev)1881 static void hci_coredump_qca(struct hci_dev *hdev)
1882 {
1883 int err;
1884 static const u8 param[] = { 0x26 };
1885
1886 err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
1887 if (err < 0)
1888 bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
1889 }
1890
qca_get_data_path_id(struct hci_dev * hdev,__u8 * data_path_id)1891 static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
1892 {
1893 /* QCA uses 1 as non-HCI data path id for HFP */
1894 *data_path_id = 1;
1895 return 0;
1896 }
1897
qca_configure_hfp_offload(struct hci_dev * hdev)1898 static int qca_configure_hfp_offload(struct hci_dev *hdev)
1899 {
1900 bt_dev_info(hdev, "HFP non-HCI data transport is supported");
1901 hdev->get_data_path_id = qca_get_data_path_id;
1902 /* Do not need to send HCI_Configure_Data_Path to configure non-HCI
1903 * data transport path for QCA controllers, so set below field as NULL.
1904 */
1905 hdev->get_codec_config_data = NULL;
1906 return 0;
1907 }
1908
qca_setup(struct hci_uart * hu)1909 static int qca_setup(struct hci_uart *hu)
1910 {
1911 struct hci_dev *hdev = hu->hdev;
1912 struct qca_data *qca = hu->priv;
1913 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
1914 unsigned int retries = 0;
1915 enum qca_btsoc_type soc_type = qca_soc_type(hu);
1916 const char *firmware_name = qca_get_firmware_name(hu);
1917 const char *rampatch_name = qca_get_rampatch_name(hu);
1918 int ret;
1919 struct qca_btsoc_version ver;
1920 struct qca_serdev *qcadev = serdev_device_get_drvdata(hu->serdev);
1921 const char *soc_name;
1922
1923 ret = qca_check_speeds(hu);
1924 if (ret)
1925 return ret;
1926
1927 clear_bit(QCA_ROM_FW, &qca->flags);
1928 /* Patch downloading has to be done without IBS mode */
1929 set_bit(QCA_IBS_DISABLED, &qca->flags);
1930
1931 /* Enable controller to do both LE scan and BR/EDR inquiry
1932 * simultaneously.
1933 */
1934 hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
1935
1936 switch (soc_type) {
1937 case QCA_QCA2066:
1938 soc_name = "qca2066";
1939 break;
1940
1941 case QCA_WCN3950:
1942 case QCA_WCN3988:
1943 case QCA_WCN3990:
1944 case QCA_WCN3991:
1945 case QCA_WCN3998:
1946 soc_name = "wcn399x";
1947 break;
1948
1949 case QCA_WCN6750:
1950 soc_name = "wcn6750";
1951 break;
1952
1953 case QCA_WCN6855:
1954 soc_name = "wcn6855";
1955 break;
1956
1957 case QCA_WCN7850:
1958 soc_name = "wcn7850";
1959 break;
1960
1961 default:
1962 soc_name = "ROME/QCA6390";
1963 }
1964 bt_dev_info(hdev, "setting up %s", soc_name);
1965
1966 qca->memdump_state = QCA_MEMDUMP_IDLE;
1967
1968 retry:
1969 ret = qca_power_on(hdev);
1970 if (ret)
1971 goto out;
1972
1973 clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1974
1975 switch (soc_type) {
1976 case QCA_WCN3950:
1977 case QCA_WCN3988:
1978 case QCA_WCN3990:
1979 case QCA_WCN3991:
1980 case QCA_WCN3998:
1981 case QCA_WCN6750:
1982 case QCA_WCN6855:
1983 case QCA_WCN7850:
1984 if (qcadev->bdaddr_property_broken)
1985 hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
1986
1987 hci_set_aosp_capable(hdev);
1988
1989 ret = qca_read_soc_version(hdev, &ver, soc_type);
1990 if (ret)
1991 goto out;
1992 break;
1993
1994 default:
1995 qca_set_speed(hu, QCA_INIT_SPEED);
1996 }
1997
1998 /* Setup user speed if needed */
1999 speed = qca_get_speed(hu, QCA_OPER_SPEED);
2000 if (speed) {
2001 ret = qca_set_speed(hu, QCA_OPER_SPEED);
2002 if (ret)
2003 goto out;
2004
2005 qca_baudrate = qca_get_baudrate_value(speed);
2006 }
2007
2008 switch (soc_type) {
2009 case QCA_WCN3950:
2010 case QCA_WCN3988:
2011 case QCA_WCN3990:
2012 case QCA_WCN3991:
2013 case QCA_WCN3998:
2014 case QCA_WCN6750:
2015 case QCA_WCN6855:
2016 case QCA_WCN7850:
2017 break;
2018
2019 default:
2020 /* Get QCA version information */
2021 ret = qca_read_soc_version(hdev, &ver, soc_type);
2022 if (ret)
2023 goto out;
2024 }
2025
2026 /* Setup patch / NVM configurations */
2027 ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
2028 firmware_name, rampatch_name);
2029 if (!ret) {
2030 clear_bit(QCA_IBS_DISABLED, &qca->flags);
2031 qca_debugfs_init(hdev);
2032 hu->hdev->hw_error = qca_hw_error;
2033 hu->hdev->reset = qca_reset;
2034 if (hu->serdev) {
2035 if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
2036 hu->hdev->wakeup = qca_wakeup;
2037 }
2038 } else if (ret == -ENOENT) {
2039 /* No patch/nvm-config found, run with original fw/config */
2040 set_bit(QCA_ROM_FW, &qca->flags);
2041 ret = 0;
2042 } else if (ret == -EAGAIN) {
2043 /*
2044 * Userspace firmware loader will return -EAGAIN in case no
2045 * patch/nvm-config is found, so run with original fw/config.
2046 */
2047 set_bit(QCA_ROM_FW, &qca->flags);
2048 ret = 0;
2049 }
2050
2051 out:
2052 if (ret) {
2053 qca_power_off(hu);
2054
2055 if (retries < MAX_INIT_RETRIES) {
2056 bt_dev_warn(hdev, "Retry BT power ON:%d", retries);
2057 if (hu->serdev) {
2058 serdev_device_close(hu->serdev);
2059 ret = serdev_device_open(hu->serdev);
2060 if (ret) {
2061 bt_dev_err(hdev, "failed to open port");
2062 return ret;
2063 }
2064 }
2065 retries++;
2066 goto retry;
2067 }
2068 return ret;
2069 }
2070
2071 /* Setup bdaddr */
2072 if (soc_type == QCA_ROME)
2073 hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
2074 else
2075 hu->hdev->set_bdaddr = qca_set_bdaddr;
2076
2077 if (qcadev->support_hfp_hw_offload)
2078 qca_configure_hfp_offload(hdev);
2079
2080 qca->fw_version = le16_to_cpu(ver.patch_ver);
2081 qca->controller_id = le16_to_cpu(ver.rom_ver);
2082 hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL);
2083
2084 return ret;
2085 }
2086
2087 static const struct hci_uart_proto qca_proto = {
2088 .id = HCI_UART_QCA,
2089 .name = "QCA",
2090 .manufacturer = 29,
2091 .init_speed = 115200,
2092 .oper_speed = 3000000,
2093 .open = qca_open,
2094 .close = qca_close,
2095 .flush = qca_flush,
2096 .setup = qca_setup,
2097 .recv = qca_recv,
2098 .enqueue = qca_enqueue,
2099 .dequeue = qca_dequeue,
2100 };
2101
2102 static const struct qca_device_data qca_soc_data_qca2066 __maybe_unused = {
2103 .soc_type = QCA_QCA2066,
2104 .num_vregs = 0,
2105 .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES |
2106 QCA_CAP_HFP_HW_OFFLOAD,
2107 };
2108
2109 static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
2110 .soc_type = QCA_QCA6390,
2111 .num_vregs = 0,
2112 };
2113
2114 static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = {
2115 .soc_type = QCA_WCN3950,
2116 .vregs = (struct qca_vreg []) {
2117 { "vddio", 15000 },
2118 { "vddxo", 60000 },
2119 { "vddrf", 155000 },
2120 { "vddch0", 585000 },
2121 },
2122 .num_vregs = 4,
2123 };
2124
2125 static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
2126 .soc_type = QCA_WCN3988,
2127 .vregs = (struct qca_vreg []) {
2128 { "vddio", 15000 },
2129 { "vddxo", 80000 },
2130 { "vddrf", 300000 },
2131 { "vddch0", 450000 },
2132 },
2133 .num_vregs = 4,
2134 };
2135
2136 static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = {
2137 .soc_type = QCA_WCN3990,
2138 .vregs = (struct qca_vreg []) {
2139 { "vddio", 15000 },
2140 { "vddxo", 80000 },
2141 { "vddrf", 300000 },
2142 { "vddch0", 450000 },
2143 },
2144 .num_vregs = 4,
2145 };
2146
2147 static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = {
2148 .soc_type = QCA_WCN3991,
2149 .vregs = (struct qca_vreg []) {
2150 { "vddio", 15000 },
2151 { "vddxo", 80000 },
2152 { "vddrf", 300000 },
2153 { "vddch0", 450000 },
2154 },
2155 .num_vregs = 4,
2156 .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2157 };
2158
2159 static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
2160 .soc_type = QCA_WCN3998,
2161 .vregs = (struct qca_vreg []) {
2162 { "vddio", 10000 },
2163 { "vddxo", 80000 },
2164 { "vddrf", 300000 },
2165 { "vddch0", 450000 },
2166 },
2167 .num_vregs = 4,
2168 };
2169
2170 static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = {
2171 .soc_type = QCA_WCN6750,
2172 .vregs = (struct qca_vreg []) {
2173 { "vddio", 5000 },
2174 { "vddaon", 26000 },
2175 { "vddbtcxmx", 126000 },
2176 { "vddrfacmn", 12500 },
2177 { "vddrfa0p8", 102000 },
2178 { "vddrfa1p7", 302000 },
2179 { "vddrfa1p2", 257000 },
2180 { "vddrfa2p2", 1700000 },
2181 { "vddasd", 200 },
2182 },
2183 .num_vregs = 9,
2184 .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2185 };
2186
2187 static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = {
2188 .soc_type = QCA_WCN6855,
2189 .vregs = (struct qca_vreg []) {
2190 { "vddio", 5000 },
2191 { "vddbtcxmx", 126000 },
2192 { "vddrfacmn", 12500 },
2193 { "vddrfa0p8", 102000 },
2194 { "vddrfa1p7", 302000 },
2195 { "vddrfa1p2", 257000 },
2196 },
2197 .num_vregs = 6,
2198 .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES |
2199 QCA_CAP_HFP_HW_OFFLOAD,
2200 };
2201
2202 static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = {
2203 .soc_type = QCA_WCN7850,
2204 .vregs = (struct qca_vreg []) {
2205 { "vddio", 5000 },
2206 { "vddaon", 26000 },
2207 { "vdddig", 126000 },
2208 { "vddrfa0p8", 102000 },
2209 { "vddrfa1p2", 257000 },
2210 { "vddrfa1p9", 302000 },
2211 },
2212 .num_vregs = 6,
2213 .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES |
2214 QCA_CAP_HFP_HW_OFFLOAD,
2215 };
2216
qca_power_off(struct hci_uart * hu)2217 static void qca_power_off(struct hci_uart *hu)
2218 {
2219 struct qca_serdev *qcadev;
2220 struct qca_data *qca = hu->priv;
2221 unsigned long flags;
2222 enum qca_btsoc_type soc_type = qca_soc_type(hu);
2223 bool sw_ctrl_state;
2224 struct qca_power *power;
2225
2226 /* From this point we go into power off state. But serial port is
2227 * still open, stop queueing the IBS data and flush all the buffered
2228 * data in skb's.
2229 */
2230 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
2231 set_bit(QCA_IBS_DISABLED, &qca->flags);
2232 qca_flush(hu);
2233 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2234
2235 /* Non-serdev device usually is powered by external power
2236 * and don't need additional action in driver for power down
2237 */
2238 if (!hu->serdev)
2239 return;
2240
2241 qcadev = serdev_device_get_drvdata(hu->serdev);
2242 power = qcadev->bt_power;
2243
2244 switch (soc_type) {
2245 case QCA_WCN3988:
2246 case QCA_WCN3990:
2247 case QCA_WCN3991:
2248 case QCA_WCN3998:
2249 host_set_baudrate(hu, 2400);
2250 qca_send_power_pulse(hu, false);
2251 break;
2252 default:
2253 break;
2254 }
2255
2256 if (power && power->pwrseq) {
2257 pwrseq_power_off(power->pwrseq);
2258 set_bit(QCA_BT_OFF, &qca->flags);
2259 return;
2260 }
2261
2262 switch (soc_type) {
2263 case QCA_WCN3988:
2264 case QCA_WCN3990:
2265 case QCA_WCN3991:
2266 case QCA_WCN3998:
2267 qca_regulator_disable(qcadev);
2268 break;
2269
2270 case QCA_WCN6750:
2271 case QCA_WCN6855:
2272 gpiod_set_value_cansleep(qcadev->bt_en, 0);
2273 msleep(100);
2274 qca_regulator_disable(qcadev);
2275 if (qcadev->sw_ctrl) {
2276 sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
2277 BT_DBG("SW_CTRL is %d", sw_ctrl_state);
2278 }
2279 break;
2280
2281 default:
2282 gpiod_set_value_cansleep(qcadev->bt_en, 0);
2283 }
2284
2285 set_bit(QCA_BT_OFF, &qca->flags);
2286 }
2287
qca_hci_shutdown(struct hci_dev * hdev)2288 static int qca_hci_shutdown(struct hci_dev *hdev)
2289 {
2290 struct hci_uart *hu = hci_get_drvdata(hdev);
2291 struct qca_data *qca = hu->priv;
2292 enum qca_btsoc_type soc_type = qca_soc_type(hu);
2293
2294 hu->hdev->hw_error = NULL;
2295 hu->hdev->reset = NULL;
2296
2297 timer_delete_sync(&qca->wake_retrans_timer);
2298 timer_delete_sync(&qca->tx_idle_timer);
2299
2300 /* Stop sending shutdown command if soc crashes. */
2301 if (soc_type != QCA_ROME
2302 && qca->memdump_state == QCA_MEMDUMP_IDLE) {
2303 qca_send_pre_shutdown_cmd(hdev);
2304 usleep_range(8000, 10000);
2305 }
2306
2307 qca_power_off(hu);
2308 return 0;
2309 }
2310
qca_regulator_enable(struct qca_serdev * qcadev)2311 static int qca_regulator_enable(struct qca_serdev *qcadev)
2312 {
2313 struct qca_power *power = qcadev->bt_power;
2314 int ret;
2315
2316 if (power->pwrseq)
2317 return pwrseq_power_on(power->pwrseq);
2318
2319 /* Already enabled */
2320 if (power->vregs_on)
2321 return 0;
2322
2323 BT_DBG("enabling %d regulators)", power->num_vregs);
2324
2325 ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
2326 if (ret)
2327 return ret;
2328
2329 power->vregs_on = true;
2330
2331 ret = clk_prepare_enable(qcadev->susclk);
2332 if (ret)
2333 qca_regulator_disable(qcadev);
2334
2335 return ret;
2336 }
2337
qca_regulator_disable(struct qca_serdev * qcadev)2338 static void qca_regulator_disable(struct qca_serdev *qcadev)
2339 {
2340 struct qca_power *power;
2341
2342 if (!qcadev)
2343 return;
2344
2345 power = qcadev->bt_power;
2346
2347 /* Already disabled? */
2348 if (!power->vregs_on)
2349 return;
2350
2351 regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
2352 power->vregs_on = false;
2353
2354 clk_disable_unprepare(qcadev->susclk);
2355 }
2356
qca_init_regulators(struct qca_power * qca,const struct qca_vreg * vregs,size_t num_vregs)2357 static int qca_init_regulators(struct qca_power *qca,
2358 const struct qca_vreg *vregs, size_t num_vregs)
2359 {
2360 struct regulator_bulk_data *bulk;
2361 int ret;
2362 int i;
2363
2364 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
2365 if (!bulk)
2366 return -ENOMEM;
2367
2368 for (i = 0; i < num_vregs; i++)
2369 bulk[i].supply = vregs[i].name;
2370
2371 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
2372 if (ret < 0)
2373 return ret;
2374
2375 for (i = 0; i < num_vregs; i++) {
2376 ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
2377 if (ret)
2378 return ret;
2379 }
2380
2381 qca->vreg_bulk = bulk;
2382 qca->num_vregs = num_vregs;
2383
2384 return 0;
2385 }
2386
qca_serdev_probe(struct serdev_device * serdev)2387 static int qca_serdev_probe(struct serdev_device *serdev)
2388 {
2389 struct qca_serdev *qcadev;
2390 struct hci_dev *hdev;
2391 const struct qca_device_data *data;
2392 int err;
2393 bool power_ctrl_enabled = true;
2394
2395 qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
2396 if (!qcadev)
2397 return -ENOMEM;
2398
2399 qcadev->serdev_hu.serdev = serdev;
2400 data = device_get_match_data(&serdev->dev);
2401 serdev_device_set_drvdata(serdev, qcadev);
2402 device_property_read_string_array(&serdev->dev, "firmware-name",
2403 qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
2404 device_property_read_u32(&serdev->dev, "max-speed",
2405 &qcadev->oper_speed);
2406 if (!qcadev->oper_speed)
2407 BT_DBG("UART will pick default operating speed");
2408
2409 qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
2410 "qcom,local-bd-address-broken");
2411
2412 if (data)
2413 qcadev->btsoc_type = data->soc_type;
2414 else
2415 qcadev->btsoc_type = QCA_ROME;
2416
2417 switch (qcadev->btsoc_type) {
2418 case QCA_QCA6390:
2419 case QCA_WCN3950:
2420 case QCA_WCN3988:
2421 case QCA_WCN3990:
2422 case QCA_WCN3991:
2423 case QCA_WCN3998:
2424 case QCA_WCN6750:
2425 case QCA_WCN6855:
2426 case QCA_WCN7850:
2427 qcadev->bt_power = devm_kzalloc(&serdev->dev,
2428 sizeof(struct qca_power),
2429 GFP_KERNEL);
2430 if (!qcadev->bt_power)
2431 return -ENOMEM;
2432 break;
2433 default:
2434 break;
2435 }
2436
2437 switch (qcadev->btsoc_type) {
2438 case QCA_WCN3950:
2439 case QCA_WCN3988:
2440 case QCA_WCN3990:
2441 case QCA_WCN3991:
2442 case QCA_WCN3998:
2443 case QCA_WCN6750:
2444 case QCA_WCN6855:
2445 case QCA_WCN7850:
2446 if (!device_property_present(&serdev->dev, "enable-gpios")) {
2447 /*
2448 * Backward compatibility with old DT sources. If the
2449 * node doesn't have the 'enable-gpios' property then
2450 * let's use the power sequencer. Otherwise, let's
2451 * drive everything ourselves.
2452 */
2453 qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
2454 "bluetooth");
2455
2456 /*
2457 * Some modules have BT_EN enabled via a hardware pull-up,
2458 * meaning it is not defined in the DTS and is not controlled
2459 * through the power sequence. In such cases, fall through
2460 * to follow the legacy flow.
2461 */
2462 if (IS_ERR(qcadev->bt_power->pwrseq))
2463 qcadev->bt_power->pwrseq = NULL;
2464 else
2465 break;
2466 }
2467
2468 qcadev->bt_power->dev = &serdev->dev;
2469 err = qca_init_regulators(qcadev->bt_power, data->vregs,
2470 data->num_vregs);
2471 if (err) {
2472 BT_ERR("Failed to init regulators:%d", err);
2473 return err;
2474 }
2475
2476 qcadev->bt_power->vregs_on = false;
2477
2478 qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
2479 GPIOD_OUT_LOW);
2480 if (IS_ERR(qcadev->bt_en))
2481 return dev_err_probe(&serdev->dev,
2482 PTR_ERR(qcadev->bt_en),
2483 "failed to acquire BT_EN gpio\n");
2484
2485 if (!qcadev->bt_en &&
2486 (data->soc_type == QCA_WCN6750 ||
2487 data->soc_type == QCA_WCN6855 ||
2488 data->soc_type == QCA_WCN7850))
2489 power_ctrl_enabled = false;
2490
2491 qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
2492 GPIOD_IN);
2493 if (IS_ERR(qcadev->sw_ctrl) &&
2494 (data->soc_type == QCA_WCN6750 ||
2495 data->soc_type == QCA_WCN6855 ||
2496 data->soc_type == QCA_WCN7850)) {
2497 dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
2498 return PTR_ERR(qcadev->sw_ctrl);
2499 }
2500
2501 qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
2502 if (IS_ERR(qcadev->susclk)) {
2503 dev_err(&serdev->dev, "failed to acquire clk\n");
2504 return PTR_ERR(qcadev->susclk);
2505 }
2506 break;
2507
2508 case QCA_QCA6390:
2509 if (dev_of_node(&serdev->dev)) {
2510 qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
2511 "bluetooth");
2512 if (IS_ERR(qcadev->bt_power->pwrseq))
2513 return PTR_ERR(qcadev->bt_power->pwrseq);
2514 break;
2515 }
2516 fallthrough;
2517
2518 default:
2519 qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
2520 GPIOD_OUT_LOW);
2521 if (IS_ERR(qcadev->bt_en)) {
2522 dev_err(&serdev->dev, "failed to acquire enable gpio\n");
2523 return PTR_ERR(qcadev->bt_en);
2524 }
2525
2526 if (!qcadev->bt_en)
2527 power_ctrl_enabled = false;
2528
2529 qcadev->susclk = devm_clk_get_optional_enabled_with_rate(
2530 &serdev->dev, NULL, SUSCLK_RATE_32KHZ);
2531 if (IS_ERR(qcadev->susclk)) {
2532 dev_warn(&serdev->dev, "failed to acquire clk\n");
2533 return PTR_ERR(qcadev->susclk);
2534 }
2535 }
2536
2537 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
2538 if (err) {
2539 BT_ERR("serdev registration failed");
2540 return err;
2541 }
2542
2543 hdev = qcadev->serdev_hu.hdev;
2544
2545 if (power_ctrl_enabled) {
2546 hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
2547 hdev->shutdown = qca_hci_shutdown;
2548 }
2549
2550 if (data) {
2551 /* Wideband speech support must be set per driver since it can't
2552 * be queried via hci. Same with the valid le states quirk.
2553 */
2554 if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
2555 hci_set_quirk(hdev,
2556 HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2557
2558 if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
2559 hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
2560
2561 if (data->capabilities & QCA_CAP_HFP_HW_OFFLOAD)
2562 qcadev->support_hfp_hw_offload = true;
2563 }
2564
2565 return 0;
2566 }
2567
qca_serdev_remove(struct serdev_device * serdev)2568 static void qca_serdev_remove(struct serdev_device *serdev)
2569 {
2570 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2571 struct qca_power *power = qcadev->bt_power;
2572
2573 switch (qcadev->btsoc_type) {
2574 case QCA_WCN3988:
2575 case QCA_WCN3990:
2576 case QCA_WCN3991:
2577 case QCA_WCN3998:
2578 case QCA_WCN6750:
2579 case QCA_WCN6855:
2580 case QCA_WCN7850:
2581 if (power->vregs_on)
2582 qca_power_off(&qcadev->serdev_hu);
2583 break;
2584 default:
2585 break;
2586 }
2587
2588 hci_uart_unregister_device(&qcadev->serdev_hu);
2589 }
2590
qca_serdev_shutdown(struct serdev_device * serdev)2591 static void qca_serdev_shutdown(struct serdev_device *serdev)
2592 {
2593 int ret;
2594 int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
2595 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2596 struct hci_uart *hu = &qcadev->serdev_hu;
2597 struct hci_dev *hdev = hu->hdev;
2598 const u8 ibs_wake_cmd[] = { 0xFD };
2599 const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
2600
2601 if (qcadev->btsoc_type == QCA_QCA6390) {
2602 /* The purpose of sending the VSC is to reset SOC into a initial
2603 * state and the state will ensure next hdev->setup() success.
2604 * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
2605 * hdev->setup() can do its job regardless of SoC state, so
2606 * don't need to send the VSC.
2607 * if HCI_SETUP is set, it means that hdev->setup() was never
2608 * invoked and the SOC is already in the initial state, so
2609 * don't also need to send the VSC.
2610 */
2611 if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP) ||
2612 hci_dev_test_flag(hdev, HCI_SETUP))
2613 return;
2614
2615 /* The serdev must be in open state when control logic arrives
2616 * here, so also fix the use-after-free issue caused by that
2617 * the serdev is flushed or wrote after it is closed.
2618 */
2619 serdev_device_write_flush(serdev);
2620 ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
2621 sizeof(ibs_wake_cmd));
2622 if (ret < 0) {
2623 BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
2624 return;
2625 }
2626 serdev_device_wait_until_sent(serdev, timeout);
2627 usleep_range(8000, 10000);
2628
2629 serdev_device_write_flush(serdev);
2630 ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
2631 sizeof(edl_reset_soc_cmd));
2632 if (ret < 0) {
2633 BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
2634 return;
2635 }
2636 serdev_device_wait_until_sent(serdev, timeout);
2637 usleep_range(8000, 10000);
2638 }
2639 }
2640
qca_suspend(struct device * dev)2641 static int __maybe_unused qca_suspend(struct device *dev)
2642 {
2643 struct serdev_device *serdev = to_serdev_device(dev);
2644 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2645 struct hci_uart *hu = &qcadev->serdev_hu;
2646 struct qca_data *qca = hu->priv;
2647 unsigned long flags;
2648 bool tx_pending = false;
2649 int ret = 0;
2650 u8 cmd;
2651 u32 wait_timeout = 0;
2652
2653 set_bit(QCA_SUSPENDING, &qca->flags);
2654
2655 /* if BT SoC is running with default firmware then it does not
2656 * support in-band sleep
2657 */
2658 if (test_bit(QCA_ROM_FW, &qca->flags))
2659 return 0;
2660
2661 /* During SSR after memory dump collection, controller will be
2662 * powered off and then powered on.If controller is powered off
2663 * during SSR then we should wait until SSR is completed.
2664 */
2665 if (test_bit(QCA_BT_OFF, &qca->flags) &&
2666 !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
2667 return 0;
2668
2669 if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
2670 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
2671 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
2672 IBS_DISABLE_SSR_TIMEOUT_MS :
2673 FW_DOWNLOAD_TIMEOUT_MS;
2674
2675 /* QCA_IBS_DISABLED flag is set to true, During FW download
2676 * and during memory dump collection. It is reset to false,
2677 * After FW download complete.
2678 */
2679 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
2680 TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
2681
2682 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
2683 bt_dev_err(hu->hdev, "SSR or FW download time out");
2684 ret = -ETIMEDOUT;
2685 goto error;
2686 }
2687 }
2688
2689 cancel_work_sync(&qca->ws_awake_device);
2690 cancel_work_sync(&qca->ws_awake_rx);
2691
2692 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
2693 flags, SINGLE_DEPTH_NESTING);
2694
2695 switch (qca->tx_ibs_state) {
2696 case HCI_IBS_TX_WAKING:
2697 timer_delete(&qca->wake_retrans_timer);
2698 fallthrough;
2699 case HCI_IBS_TX_AWAKE:
2700 timer_delete(&qca->tx_idle_timer);
2701
2702 serdev_device_write_flush(hu->serdev);
2703 cmd = HCI_IBS_SLEEP_IND;
2704 ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
2705
2706 if (ret < 0) {
2707 BT_ERR("Failed to send SLEEP to device");
2708 break;
2709 }
2710
2711 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
2712 qca->ibs_sent_slps++;
2713 tx_pending = true;
2714 break;
2715
2716 case HCI_IBS_TX_ASLEEP:
2717 break;
2718
2719 default:
2720 BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
2721 ret = -EINVAL;
2722 break;
2723 }
2724
2725 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2726
2727 if (ret < 0)
2728 goto error;
2729
2730 if (tx_pending) {
2731 serdev_device_wait_until_sent(hu->serdev,
2732 msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
2733 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
2734 }
2735
2736 /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
2737 * to sleep, so that the packet does not wake the system later.
2738 */
2739 ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
2740 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
2741 msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
2742 if (ret == 0) {
2743 ret = -ETIMEDOUT;
2744 goto error;
2745 }
2746
2747 return 0;
2748
2749 error:
2750 clear_bit(QCA_SUSPENDING, &qca->flags);
2751
2752 return ret;
2753 }
2754
qca_resume(struct device * dev)2755 static int __maybe_unused qca_resume(struct device *dev)
2756 {
2757 struct serdev_device *serdev = to_serdev_device(dev);
2758 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2759 struct hci_uart *hu = &qcadev->serdev_hu;
2760 struct qca_data *qca = hu->priv;
2761
2762 clear_bit(QCA_SUSPENDING, &qca->flags);
2763
2764 return 0;
2765 }
2766
2767 static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
2768
2769 #ifdef CONFIG_OF
2770 static const struct of_device_id qca_bluetooth_of_match[] = {
2771 { .compatible = "qcom,qca2066-bt", .data = &qca_soc_data_qca2066},
2772 { .compatible = "qcom,qca6174-bt" },
2773 { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
2774 { .compatible = "qcom,qca9377-bt" },
2775 { .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950},
2776 { .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
2777 { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
2778 { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
2779 { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
2780 { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750},
2781 { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855},
2782 { .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850},
2783 { /* sentinel */ }
2784 };
2785 MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
2786 #endif
2787
2788 #ifdef CONFIG_ACPI
2789 static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
2790 { "QCOM2066", (kernel_ulong_t)&qca_soc_data_qca2066 },
2791 { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2792 { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2793 { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2794 { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2795 { },
2796 };
2797 MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
2798 #endif
2799
2800 #ifdef CONFIG_DEV_COREDUMP
hciqca_coredump(struct device * dev)2801 static void hciqca_coredump(struct device *dev)
2802 {
2803 struct serdev_device *serdev = to_serdev_device(dev);
2804 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2805 struct hci_uart *hu = &qcadev->serdev_hu;
2806 struct hci_dev *hdev = hu->hdev;
2807
2808 if (hdev->dump.coredump)
2809 hdev->dump.coredump(hdev);
2810 }
2811 #endif
2812
2813 static struct serdev_device_driver qca_serdev_driver = {
2814 .probe = qca_serdev_probe,
2815 .remove = qca_serdev_remove,
2816 .shutdown = qca_serdev_shutdown,
2817 .driver = {
2818 .name = "hci_uart_qca",
2819 .of_match_table = of_match_ptr(qca_bluetooth_of_match),
2820 .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
2821 .pm = &qca_pm_ops,
2822 #ifdef CONFIG_DEV_COREDUMP
2823 .coredump = hciqca_coredump,
2824 #endif
2825 },
2826 };
2827
qca_init(void)2828 int __init qca_init(void)
2829 {
2830 serdev_device_driver_register(&qca_serdev_driver);
2831
2832 return hci_uart_register_proto(&qca_proto);
2833 }
2834
qca_deinit(void)2835 int __exit qca_deinit(void)
2836 {
2837 serdev_device_driver_unregister(&qca_serdev_driver);
2838
2839 return hci_uart_unregister_proto(&qca_proto);
2840 }
2841