1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Eliot Lee <eliot.lee@intel.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 * Sreehari Kancharla <sreehari.kancharla@intel.com>
16 */
17
18 #include <linux/acpi.h>
19 #include <linux/bits.h>
20 #include <linux/bitfield.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/gfp.h>
24 #include <linux/io.h>
25 #include <linux/irqreturn.h>
26 #include <linux/kthread.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/wait.h>
32 #include <linux/workqueue.h>
33
34 #include "t7xx_cldma.h"
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_netdev.h"
39 #include "t7xx_pci.h"
40 #include "t7xx_pcie_mac.h"
41 #include "t7xx_port.h"
42 #include "t7xx_port_proxy.h"
43 #include "t7xx_reg.h"
44 #include "t7xx_state_monitor.h"
45
46 #define RT_ID_MD_PORT_ENUM 0
47 #define RT_ID_AP_PORT_ENUM 1
48 /* Modem feature query identification code - "ICCC" */
49 #define MD_FEATURE_QUERY_ID 0x49434343
50
51 #define FEATURE_VER GENMASK(7, 4)
52 #define FEATURE_MSK GENMASK(3, 0)
53
54 #define RGU_RESET_DELAY_MS 10
55 #define PORT_RESET_DELAY_MS 2000
56 #define FASTBOOT_RESET_DELAY_MS 2000
57 #define EX_HS_TIMEOUT_MS 5000
58 #define EX_HS_POLL_DELAY_MS 10
59
60 enum mtk_feature_support_type {
61 MTK_FEATURE_DOES_NOT_EXIST,
62 MTK_FEATURE_NOT_SUPPORTED,
63 MTK_FEATURE_MUST_BE_SUPPORTED,
64 };
65
t7xx_get_interrupt_status(struct t7xx_pci_dev * t7xx_dev)66 static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
67 {
68 return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
69 }
70
71 /**
72 * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
73 * @t7xx_dev: MTK device.
74 *
75 * Check the interrupt status and queue commands accordingly.
76 *
77 * Returns:
78 ** 0 - Success.
79 ** -EINVAL - Failure to get FSM control.
80 */
t7xx_pci_mhccif_isr(struct t7xx_pci_dev * t7xx_dev)81 int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
82 {
83 struct t7xx_modem *md = t7xx_dev->md;
84 struct t7xx_fsm_ctl *ctl;
85 unsigned int int_sta;
86 int ret = 0;
87 u32 mask;
88
89 ctl = md->fsm_ctl;
90 if (!ctl) {
91 dev_err_ratelimited(&t7xx_dev->pdev->dev,
92 "MHCCIF interrupt received before initializing MD monitor\n");
93 return -EINVAL;
94 }
95
96 spin_lock_bh(&md->exp_lock);
97 int_sta = t7xx_get_interrupt_status(t7xx_dev);
98 md->exp_id |= int_sta;
99 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
100 if (ctl->md_state == MD_STATE_INVALID ||
101 ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
102 ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
103 ctl->md_state == MD_STATE_READY) {
104 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
105 ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
106 }
107 } else if (md->exp_id & D2H_INT_PORT_ENUM) {
108 md->exp_id &= ~D2H_INT_PORT_ENUM;
109
110 if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
111 ctl->curr_state == FSM_STATE_STOPPED)
112 ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
113 } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
114 mask = t7xx_mhccif_mask_get(t7xx_dev);
115 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
116 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
117 queue_work(md->handshake_wq, &md->handshake_work);
118 }
119 }
120 spin_unlock_bh(&md->exp_lock);
121
122 return ret;
123 }
124
t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev * t7xx_dev)125 static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
126 {
127 struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
128 void __iomem *reset_pcie_reg;
129 u32 val;
130
131 reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
132 pbase_addr->pcie_dev_reg_trsl_addr;
133 val = ioread32(reset_pcie_reg);
134 iowrite32(val, reset_pcie_reg);
135 }
136
t7xx_clear_rgu_irq(struct t7xx_pci_dev * t7xx_dev)137 void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
138 {
139 /* Clear L2 */
140 t7xx_clr_device_irq_via_pcie(t7xx_dev);
141 /* Clear L1 */
142 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
143 }
144
t7xx_acpi_reset(struct t7xx_pci_dev * t7xx_dev,char * fn_name)145 static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
146 {
147 #ifdef CONFIG_ACPI
148 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
149 struct device *dev = &t7xx_dev->pdev->dev;
150 acpi_status acpi_ret;
151 acpi_handle handle;
152
153 handle = ACPI_HANDLE(dev);
154 if (!handle) {
155 dev_err(dev, "ACPI handle not found\n");
156 return -EFAULT;
157 }
158
159 if (!acpi_has_method(handle, fn_name)) {
160 dev_err(dev, "%s method not found\n", fn_name);
161 return -EFAULT;
162 }
163
164 acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
165 if (ACPI_FAILURE(acpi_ret)) {
166 dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
167 return -EFAULT;
168 }
169
170 kfree(buffer.pointer);
171 #else
172 struct device *dev = &t7xx_dev->pdev->dev;
173 int ret;
174
175 ret = pci_reset_function(t7xx_dev->pdev);
176 if (ret) {
177 dev_err(dev, "Failed to reset device, error:%d\n", ret);
178 return ret;
179 }
180 #endif
181 return 0;
182 }
183
t7xx_host_event_notify(struct t7xx_pci_dev * t7xx_dev,unsigned int event_id)184 static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id)
185 {
186 u32 value;
187
188 value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
189 value &= ~HOST_EVENT_MASK;
190 value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
191 iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
192 }
193
t7xx_reset_device(struct t7xx_pci_dev * t7xx_dev,enum reset_type type)194 int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type)
195 {
196 int ret = 0;
197
198 pci_save_state(t7xx_dev->pdev);
199 t7xx_pci_reprobe_early(t7xx_dev);
200 t7xx_mode_update(t7xx_dev, T7XX_RESET);
201 WRITE_ONCE(t7xx_dev->debug_ports_show, false);
202
203 if (type == FLDR) {
204 ret = t7xx_acpi_reset(t7xx_dev, "_RST");
205 } else if (type == PLDR) {
206 ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST");
207 } else if (type == FASTBOOT) {
208 t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY);
209 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
210 msleep(FASTBOOT_RESET_DELAY_MS);
211 }
212
213 pci_restore_state(t7xx_dev->pdev);
214 if (ret)
215 return ret;
216
217 return t7xx_pci_reprobe(t7xx_dev, true);
218 }
219
t7xx_reset_device_via_pmic(struct t7xx_pci_dev * t7xx_dev)220 static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
221 {
222 u32 val;
223
224 val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
225 if (val & MISC_RESET_TYPE_PLDR)
226 t7xx_reset_device(t7xx_dev, PLDR);
227 else if (val & MISC_RESET_TYPE_FLDR)
228 t7xx_reset_device(t7xx_dev, FLDR);
229 }
230
t7xx_rgu_isr_thread(int irq,void * data)231 static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
232 {
233 struct t7xx_pci_dev *t7xx_dev = data;
234
235 msleep(RGU_RESET_DELAY_MS);
236 t7xx_reset_device_via_pmic(t7xx_dev);
237 return IRQ_HANDLED;
238 }
239
t7xx_rgu_isr_handler(int irq,void * data)240 static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
241 {
242 struct t7xx_pci_dev *t7xx_dev = data;
243 struct t7xx_modem *modem;
244
245 t7xx_clear_rgu_irq(t7xx_dev);
246 if (!t7xx_dev->rgu_pci_irq_en)
247 return IRQ_HANDLED;
248
249 modem = t7xx_dev->md;
250 modem->rgu_irq_asserted = true;
251 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
252 return IRQ_WAKE_THREAD;
253 }
254
t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev * t7xx_dev)255 static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
256 {
257 /* Registers RGU callback ISR with PCIe driver */
258 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
259 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
260
261 t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
262 t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
263 t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
264 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
265 }
266
267 /**
268 * t7xx_cldma_exception() - CLDMA exception handler.
269 * @md_ctrl: modem control struct.
270 * @stage: exception stage.
271 *
272 * Part of the modem exception recovery.
273 * Stages are one after the other as describe below:
274 * HIF_EX_INIT: Disable and clear TXQ.
275 * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
276 * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
277 */
278
279 /* Modem Exception Handshake Flow
280 *
281 * Modem HW Exception interrupt received
282 * (MD_IRQ_CCIF_EX)
283 * |
284 * +---------v--------+
285 * | HIF_EX_INIT | : Disable and clear TXQ
286 * +------------------+
287 * |
288 * +---------v--------+
289 * | HIF_EX_INIT_DONE | : Wait for the init to be done
290 * +------------------+
291 * |
292 * +---------v--------+
293 * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
294 * +------------------+ : Flush TX/RX workqueues
295 * |
296 * +---------v--------+
297 * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
298 * +------------------+
299 */
t7xx_cldma_exception(struct cldma_ctrl * md_ctrl,enum hif_ex_stage stage)300 static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
301 {
302 switch (stage) {
303 case HIF_EX_INIT:
304 t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
305 t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
306 break;
307
308 case HIF_EX_CLEARQ_DONE:
309 /* We do not want to get CLDMA IRQ when MD is
310 * resetting CLDMA after it got clearq_ack.
311 */
312 t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
313 t7xx_cldma_stop(md_ctrl);
314
315 if (md_ctrl->hif_id == CLDMA_ID_MD)
316 t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
317
318 t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
319 break;
320
321 case HIF_EX_ALLQ_RESET:
322 t7xx_cldma_hw_init(&md_ctrl->hw_info);
323 t7xx_cldma_start(md_ctrl);
324 break;
325
326 default:
327 break;
328 }
329 }
330
t7xx_md_exception(struct t7xx_modem * md,enum hif_ex_stage stage)331 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
332 {
333 struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
334
335 if (stage == HIF_EX_CLEARQ_DONE) {
336 /* Give DHL time to flush data */
337 msleep(PORT_RESET_DELAY_MS);
338 t7xx_port_proxy_reset(md->port_prox);
339 }
340
341 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
342 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
343
344 if (stage == HIF_EX_INIT)
345 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
346 else if (stage == HIF_EX_CLEARQ_DONE)
347 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
348 }
349
t7xx_wait_hif_ex_hk_event(struct t7xx_modem * md,int event_id)350 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
351 {
352 unsigned int waited_time_ms = 0;
353
354 do {
355 if (md->exp_id & event_id)
356 return 0;
357
358 waited_time_ms += EX_HS_POLL_DELAY_MS;
359 msleep(EX_HS_POLL_DELAY_MS);
360 } while (waited_time_ms < EX_HS_TIMEOUT_MS);
361
362 return -EFAULT;
363 }
364
t7xx_md_sys_sw_init(struct t7xx_pci_dev * t7xx_dev)365 static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
366 {
367 /* Register the MHCCIF ISR for MD exception, port enum and
368 * async handshake notifications.
369 */
370 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
371 t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
372
373 /* Register RGU IRQ handler for sAP exception notification */
374 t7xx_dev->rgu_pci_irq_en = true;
375 t7xx_pcie_register_rgu_isr(t7xx_dev);
376 }
377
378 struct feature_query {
379 __le32 head_pattern;
380 u8 feature_set[FEATURE_COUNT];
381 __le32 tail_pattern;
382 };
383
t7xx_prepare_host_rt_data_query(struct t7xx_sys_info * core)384 static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
385 {
386 struct feature_query *ft_query;
387 struct sk_buff *skb;
388
389 skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
390 if (!skb)
391 return;
392
393 ft_query = skb_put(skb, sizeof(*ft_query));
394 ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
395 memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
396 ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
397
398 /* Send HS1 message to device */
399 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
400 }
401
t7xx_prepare_device_rt_data(struct t7xx_sys_info * core,struct device * dev,void * data)402 static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
403 void *data)
404 {
405 struct feature_query *md_feature = data;
406 struct mtk_runtime_feature *rt_feature;
407 unsigned int i, rt_data_len = 0;
408 struct sk_buff *skb;
409
410 /* Parse MD runtime data query */
411 if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
412 le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
413 dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
414 le32_to_cpu(md_feature->head_pattern),
415 le32_to_cpu(md_feature->tail_pattern));
416 return -EINVAL;
417 }
418
419 for (i = 0; i < FEATURE_COUNT; i++) {
420 if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
421 MTK_FEATURE_MUST_BE_SUPPORTED)
422 rt_data_len += sizeof(*rt_feature);
423 }
424
425 skb = t7xx_ctrl_alloc_skb(rt_data_len);
426 if (!skb)
427 return -ENOMEM;
428
429 rt_feature = skb_put(skb, rt_data_len);
430 memset(rt_feature, 0, rt_data_len);
431
432 /* Fill runtime feature */
433 for (i = 0; i < FEATURE_COUNT; i++) {
434 u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
435
436 if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
437 continue;
438
439 rt_feature->feature_id = i;
440 if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
441 rt_feature->support_info = md_feature->feature_set[i];
442
443 rt_feature++;
444 }
445
446 /* Send HS3 message to device */
447 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
448 return 0;
449 }
450
t7xx_parse_host_rt_data(struct t7xx_fsm_ctl * ctl,struct t7xx_sys_info * core,struct device * dev,void * data,int data_length)451 static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
452 struct device *dev, void *data, int data_length)
453 {
454 enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
455 struct mtk_runtime_feature *rt_feature;
456 int i, offset;
457
458 offset = sizeof(struct feature_query);
459 for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
460 size_t remaining = data_length - offset;
461 size_t feat_data_len, feat_total;
462
463 if (remaining < sizeof(*rt_feature))
464 break;
465
466 rt_feature = data + offset;
467 feat_data_len = le32_to_cpu(rt_feature->data_len);
468
469 if (feat_data_len > remaining - sizeof(*rt_feature))
470 break;
471
472 feat_total = sizeof(*rt_feature) + feat_data_len;
473 offset += feat_total;
474
475 ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
476 if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
477 continue;
478
479 ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
480 if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
481 return -EINVAL;
482
483 if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM) {
484 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data,
485 feat_data_len);
486 }
487 }
488
489 return 0;
490 }
491
t7xx_core_reset(struct t7xx_modem * md)492 static int t7xx_core_reset(struct t7xx_modem *md)
493 {
494 struct device *dev = &md->t7xx_dev->pdev->dev;
495 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
496
497 md->core_md.ready = false;
498
499 if (!ctl) {
500 dev_err(dev, "FSM is not initialized\n");
501 return -EINVAL;
502 }
503
504 if (md->core_md.handshake_ongoing) {
505 int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
506
507 if (ret)
508 return ret;
509 }
510
511 md->core_md.handshake_ongoing = false;
512 return 0;
513 }
514
t7xx_core_hk_handler(struct t7xx_modem * md,struct t7xx_sys_info * core_info,struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,enum t7xx_fsm_event_state err_detect)515 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
516 struct t7xx_fsm_ctl *ctl,
517 enum t7xx_fsm_event_state event_id,
518 enum t7xx_fsm_event_state err_detect)
519 {
520 struct t7xx_fsm_event *event = NULL, *event_next;
521 struct device *dev = &md->t7xx_dev->pdev->dev;
522 unsigned long flags;
523 int ret;
524
525 t7xx_prepare_host_rt_data_query(core_info);
526
527 while (!kthread_should_stop()) {
528 bool event_received = false;
529
530 spin_lock_irqsave(&ctl->event_lock, flags);
531 list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
532 if (event->event_id == err_detect) {
533 list_del(&event->entry);
534 spin_unlock_irqrestore(&ctl->event_lock, flags);
535 dev_err(dev, "Core handshake error event received\n");
536 goto err_free_event;
537 } else if (event->event_id == event_id) {
538 list_del(&event->entry);
539 event_received = true;
540 break;
541 }
542 }
543 spin_unlock_irqrestore(&ctl->event_lock, flags);
544
545 if (event_received)
546 break;
547
548 wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
549 kthread_should_stop());
550 if (kthread_should_stop())
551 goto err_free_event;
552 }
553
554 if (!event || ctl->exp_flg)
555 goto err_free_event;
556
557 ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
558 if (ret) {
559 dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
560 goto err_free_event;
561 }
562
563 if (ctl->exp_flg)
564 goto err_free_event;
565
566 ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
567 if (ret) {
568 dev_err(dev, "Device failure parsing runtime data: %d", ret);
569 goto err_free_event;
570 }
571
572 core_info->ready = true;
573 core_info->handshake_ongoing = false;
574 wake_up(&ctl->async_hk_wq);
575 err_free_event:
576 kfree(event);
577 }
578
t7xx_md_hk_wq(struct work_struct * work)579 static void t7xx_md_hk_wq(struct work_struct *work)
580 {
581 struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
582 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
583
584 /* Clear the HS2 EXIT event appended in core_reset() */
585 t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
586 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
587 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
588 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
589 md->core_md.handshake_ongoing = true;
590 t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
591 }
592
t7xx_ap_hk_wq(struct work_struct * work)593 static void t7xx_ap_hk_wq(struct work_struct *work)
594 {
595 struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
596 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
597
598 /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
599 t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
600 t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
601 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
602 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
603 md->core_ap.handshake_ongoing = true;
604 t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
605 }
606
t7xx_md_event_notify(struct t7xx_modem * md,enum md_event_id evt_id)607 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
608 {
609 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
610 unsigned int int_sta;
611 unsigned long flags;
612
613 switch (evt_id) {
614 case FSM_PRE_START:
615 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
616 D2H_INT_ASYNC_AP_HK);
617 break;
618
619 case FSM_START:
620 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
621
622 spin_lock_irqsave(&md->exp_lock, flags);
623 int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
624 md->exp_id |= int_sta;
625 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
626 ctl->exp_flg = true;
627 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
628 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
629 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
630 } else if (ctl->exp_flg) {
631 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
632 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
633 } else {
634 void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
635
636 if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
637 queue_work(md->handshake_wq, &md->handshake_work);
638 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
639 iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
640 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
641 }
642
643 if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
644 queue_work(md->handshake_wq, &md->ap_handshake_work);
645 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
646 iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
647 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
648 }
649 }
650 spin_unlock_irqrestore(&md->exp_lock, flags);
651
652 t7xx_mhccif_mask_clr(md->t7xx_dev,
653 D2H_INT_EXCEPTION_INIT |
654 D2H_INT_EXCEPTION_INIT_DONE |
655 D2H_INT_EXCEPTION_CLEARQ_DONE |
656 D2H_INT_EXCEPTION_ALLQ_RESET);
657 break;
658
659 case FSM_READY:
660 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
661 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
662 break;
663
664 default:
665 break;
666 }
667 }
668
t7xx_md_exception_handshake(struct t7xx_modem * md)669 void t7xx_md_exception_handshake(struct t7xx_modem *md)
670 {
671 struct device *dev = &md->t7xx_dev->pdev->dev;
672 int ret;
673
674 t7xx_md_exception(md, HIF_EX_INIT);
675 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
676 if (ret)
677 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
678
679 t7xx_md_exception(md, HIF_EX_INIT_DONE);
680 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
681 if (ret)
682 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
683
684 t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
685 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
686 if (ret)
687 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
688
689 t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
690 }
691
t7xx_md_alloc(struct t7xx_pci_dev * t7xx_dev)692 static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
693 {
694 struct device *dev = &t7xx_dev->pdev->dev;
695 struct t7xx_modem *md;
696
697 md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
698 if (!md)
699 return NULL;
700
701 md->t7xx_dev = t7xx_dev;
702 t7xx_dev->md = md;
703 spin_lock_init(&md->exp_lock);
704 md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
705 0, "md_hk_wq");
706 if (!md->handshake_wq)
707 return NULL;
708
709 INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
710 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
711 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
712 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
713
714 INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
715 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
716 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
717 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
718
719 return md;
720 }
721
t7xx_md_reset(struct t7xx_pci_dev * t7xx_dev)722 int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
723 {
724 struct t7xx_modem *md = t7xx_dev->md;
725
726 md->md_init_finish = false;
727 md->exp_id = 0;
728 t7xx_fsm_reset(md);
729 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
730 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
731 t7xx_port_proxy_reset(md->port_prox);
732 md->md_init_finish = true;
733 return t7xx_core_reset(md);
734 }
735
736 /**
737 * t7xx_md_init() - Initialize modem.
738 * @t7xx_dev: MTK device.
739 *
740 * Allocate and initialize MD control block, and initialize data path.
741 * Register MHCCIF ISR and RGU ISR, and start the state machine.
742 *
743 * Return:
744 ** 0 - Success.
745 ** -ENOMEM - Allocation failure.
746 */
t7xx_md_init(struct t7xx_pci_dev * t7xx_dev)747 int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
748 {
749 struct t7xx_modem *md;
750 int ret;
751
752 md = t7xx_md_alloc(t7xx_dev);
753 if (!md)
754 return -ENOMEM;
755
756 ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
757 if (ret)
758 goto err_destroy_hswq;
759
760 ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
761 if (ret)
762 goto err_destroy_hswq;
763
764 ret = t7xx_fsm_init(md);
765 if (ret)
766 goto err_destroy_hswq;
767
768 ret = t7xx_ccmni_init(t7xx_dev);
769 if (ret)
770 goto err_uninit_fsm;
771
772 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
773 if (ret)
774 goto err_uninit_ccmni;
775
776 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
777 if (ret)
778 goto err_uninit_md_cldma;
779
780 ret = t7xx_port_proxy_init(md);
781 if (ret)
782 goto err_uninit_ap_cldma;
783
784 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
785 if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
786 goto err_uninit_proxy;
787
788 t7xx_md_sys_sw_init(t7xx_dev);
789 md->md_init_finish = true;
790 return 0;
791
792 err_uninit_proxy:
793 t7xx_port_proxy_uninit(md->port_prox);
794
795 err_uninit_ap_cldma:
796 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
797
798 err_uninit_md_cldma:
799 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
800
801 err_uninit_ccmni:
802 t7xx_ccmni_exit(t7xx_dev);
803
804 err_uninit_fsm:
805 t7xx_fsm_uninit(md);
806
807 err_destroy_hswq:
808 destroy_workqueue(md->handshake_wq);
809 dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
810 return ret;
811 }
812
t7xx_md_exit(struct t7xx_pci_dev * t7xx_dev)813 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
814 {
815 enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
816 struct t7xx_modem *md = t7xx_dev->md;
817
818 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
819
820 if (!md->md_init_finish)
821 return;
822
823 if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
824 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
825 t7xx_port_proxy_uninit(md->port_prox);
826 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
827 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
828 t7xx_ccmni_exit(t7xx_dev);
829 t7xx_fsm_uninit(md);
830 destroy_workqueue(md->handshake_wq);
831 }
832