xref: /linux/drivers/net/wwan/t7xx/t7xx_modem_ops.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <haijun.liu@mediatek.com>
8  *  Eliot Lee <eliot.lee@intel.com>
9  *  Moises Veleta <moises.veleta@intel.com>
10  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
11  *
12  * Contributors:
13  *  Amir Hanania <amir.hanania@intel.com>
14  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
16  */
17 
18 #include <linux/acpi.h>
19 #include <linux/bits.h>
20 #include <linux/bitfield.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/gfp.h>
24 #include <linux/io.h>
25 #include <linux/irqreturn.h>
26 #include <linux/kthread.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/wait.h>
32 #include <linux/workqueue.h>
33 
34 #include "t7xx_cldma.h"
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_netdev.h"
39 #include "t7xx_pci.h"
40 #include "t7xx_pcie_mac.h"
41 #include "t7xx_port.h"
42 #include "t7xx_port_proxy.h"
43 #include "t7xx_reg.h"
44 #include "t7xx_state_monitor.h"
45 
46 #define RT_ID_MD_PORT_ENUM	0
47 #define RT_ID_AP_PORT_ENUM	1
48 /* Modem feature query identification code - "ICCC" */
49 #define MD_FEATURE_QUERY_ID	0x49434343
50 
51 #define FEATURE_VER		GENMASK(7, 4)
52 #define FEATURE_MSK		GENMASK(3, 0)
53 
54 #define RGU_RESET_DELAY_MS	10
55 #define PORT_RESET_DELAY_MS	2000
56 #define FASTBOOT_RESET_DELAY_MS	2000
57 #define EX_HS_TIMEOUT_MS	5000
58 #define EX_HS_POLL_DELAY_MS	10
59 
60 enum mtk_feature_support_type {
61 	MTK_FEATURE_DOES_NOT_EXIST,
62 	MTK_FEATURE_NOT_SUPPORTED,
63 	MTK_FEATURE_MUST_BE_SUPPORTED,
64 };
65 
t7xx_get_interrupt_status(struct t7xx_pci_dev * t7xx_dev)66 static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
67 {
68 	return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
69 }
70 
71 /**
72  * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
73  * @t7xx_dev: MTK device.
74  *
75  * Check the interrupt status and queue commands accordingly.
76  *
77  * Returns:
78  ** 0		- Success.
79  ** -EINVAL	- Failure to get FSM control.
80  */
t7xx_pci_mhccif_isr(struct t7xx_pci_dev * t7xx_dev)81 int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
82 {
83 	struct t7xx_modem *md = t7xx_dev->md;
84 	struct t7xx_fsm_ctl *ctl;
85 	unsigned int int_sta;
86 	int ret = 0;
87 	u32 mask;
88 
89 	ctl = md->fsm_ctl;
90 	if (!ctl) {
91 		dev_err_ratelimited(&t7xx_dev->pdev->dev,
92 				    "MHCCIF interrupt received before initializing MD monitor\n");
93 		return -EINVAL;
94 	}
95 
96 	spin_lock_bh(&md->exp_lock);
97 	int_sta = t7xx_get_interrupt_status(t7xx_dev);
98 	md->exp_id |= int_sta;
99 	if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
100 		if (ctl->md_state == MD_STATE_INVALID ||
101 		    ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
102 		    ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
103 		    ctl->md_state == MD_STATE_READY) {
104 			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
105 			ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
106 		}
107 	} else if (md->exp_id & D2H_INT_PORT_ENUM) {
108 		md->exp_id &= ~D2H_INT_PORT_ENUM;
109 
110 		if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
111 		    ctl->curr_state == FSM_STATE_STOPPED)
112 			ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
113 	} else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
114 		mask = t7xx_mhccif_mask_get(t7xx_dev);
115 		if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
116 			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
117 			queue_work(md->handshake_wq, &md->handshake_work);
118 		}
119 	}
120 	spin_unlock_bh(&md->exp_lock);
121 
122 	return ret;
123 }
124 
t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev * t7xx_dev)125 static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
126 {
127 	struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
128 	void __iomem *reset_pcie_reg;
129 	u32 val;
130 
131 	reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
132 			  pbase_addr->pcie_dev_reg_trsl_addr;
133 	val = ioread32(reset_pcie_reg);
134 	iowrite32(val, reset_pcie_reg);
135 }
136 
t7xx_clear_rgu_irq(struct t7xx_pci_dev * t7xx_dev)137 void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
138 {
139 	/* Clear L2 */
140 	t7xx_clr_device_irq_via_pcie(t7xx_dev);
141 	/* Clear L1 */
142 	t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
143 }
144 
t7xx_acpi_reset(struct t7xx_pci_dev * t7xx_dev,char * fn_name)145 static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
146 {
147 #ifdef CONFIG_ACPI
148 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
149 	struct device *dev = &t7xx_dev->pdev->dev;
150 	acpi_status acpi_ret;
151 	acpi_handle handle;
152 
153 	handle = ACPI_HANDLE(dev);
154 	if (!handle) {
155 		dev_err(dev, "ACPI handle not found\n");
156 		return -EFAULT;
157 	}
158 
159 	if (!acpi_has_method(handle, fn_name)) {
160 		dev_err(dev, "%s method not found\n", fn_name);
161 		return -EFAULT;
162 	}
163 
164 	acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
165 	if (ACPI_FAILURE(acpi_ret)) {
166 		dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
167 		return -EFAULT;
168 	}
169 
170 	kfree(buffer.pointer);
171 #else
172 	struct device *dev = &t7xx_dev->pdev->dev;
173 	int ret;
174 
175 	ret = pci_reset_function(t7xx_dev->pdev);
176 	if (ret) {
177 		dev_err(dev, "Failed to reset device, error:%d\n", ret);
178 		return ret;
179 	}
180 #endif
181 	return 0;
182 }
183 
t7xx_host_event_notify(struct t7xx_pci_dev * t7xx_dev,unsigned int event_id)184 static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id)
185 {
186 	u32 value;
187 
188 	value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
189 	value &= ~HOST_EVENT_MASK;
190 	value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
191 	iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
192 }
193 
t7xx_reset_device(struct t7xx_pci_dev * t7xx_dev,enum reset_type type)194 int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type)
195 {
196 	int ret = 0;
197 
198 	pci_save_state(t7xx_dev->pdev);
199 	t7xx_pci_reprobe_early(t7xx_dev);
200 	t7xx_mode_update(t7xx_dev, T7XX_RESET);
201 
202 	if (type == FLDR) {
203 		ret = t7xx_acpi_reset(t7xx_dev, "_RST");
204 	} else if (type == PLDR) {
205 		ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST");
206 	} else if (type == FASTBOOT) {
207 		t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY);
208 		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
209 		msleep(FASTBOOT_RESET_DELAY_MS);
210 	}
211 
212 	pci_restore_state(t7xx_dev->pdev);
213 	if (ret)
214 		return ret;
215 
216 	return t7xx_pci_reprobe(t7xx_dev, true);
217 }
218 
t7xx_reset_device_via_pmic(struct t7xx_pci_dev * t7xx_dev)219 static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
220 {
221 	u32 val;
222 
223 	val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
224 	if (val & MISC_RESET_TYPE_PLDR)
225 		t7xx_reset_device(t7xx_dev, PLDR);
226 	else if (val & MISC_RESET_TYPE_FLDR)
227 		t7xx_reset_device(t7xx_dev, FLDR);
228 }
229 
t7xx_rgu_isr_thread(int irq,void * data)230 static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
231 {
232 	struct t7xx_pci_dev *t7xx_dev = data;
233 
234 	msleep(RGU_RESET_DELAY_MS);
235 	t7xx_reset_device_via_pmic(t7xx_dev);
236 	return IRQ_HANDLED;
237 }
238 
t7xx_rgu_isr_handler(int irq,void * data)239 static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
240 {
241 	struct t7xx_pci_dev *t7xx_dev = data;
242 	struct t7xx_modem *modem;
243 
244 	t7xx_clear_rgu_irq(t7xx_dev);
245 	if (!t7xx_dev->rgu_pci_irq_en)
246 		return IRQ_HANDLED;
247 
248 	modem = t7xx_dev->md;
249 	modem->rgu_irq_asserted = true;
250 	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
251 	return IRQ_WAKE_THREAD;
252 }
253 
t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev * t7xx_dev)254 static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
255 {
256 	/* Registers RGU callback ISR with PCIe driver */
257 	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
258 	t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
259 
260 	t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
261 	t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
262 	t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
263 	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
264 }
265 
266 /**
267  * t7xx_cldma_exception() - CLDMA exception handler.
268  * @md_ctrl: modem control struct.
269  * @stage: exception stage.
270  *
271  * Part of the modem exception recovery.
272  * Stages are one after the other as describe below:
273  * HIF_EX_INIT:		Disable and clear TXQ.
274  * HIF_EX_CLEARQ_DONE:	Disable RX, flush TX/RX workqueues and clear RX.
275  * HIF_EX_ALLQ_RESET:	HW is back in safe mode for re-initialization and restart.
276  */
277 
278 /* Modem Exception Handshake Flow
279  *
280  * Modem HW Exception interrupt received
281  *           (MD_IRQ_CCIF_EX)
282  *                   |
283  *         +---------v--------+
284  *         |   HIF_EX_INIT    | : Disable and clear TXQ
285  *         +------------------+
286  *                   |
287  *         +---------v--------+
288  *         | HIF_EX_INIT_DONE | : Wait for the init to be done
289  *         +------------------+
290  *                   |
291  *         +---------v--------+
292  *         |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
293  *         +------------------+ : Flush TX/RX workqueues
294  *                   |
295  *         +---------v--------+
296  *         |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
297  *         +------------------+
298  */
t7xx_cldma_exception(struct cldma_ctrl * md_ctrl,enum hif_ex_stage stage)299 static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
300 {
301 	switch (stage) {
302 	case HIF_EX_INIT:
303 		t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
304 		t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
305 		break;
306 
307 	case HIF_EX_CLEARQ_DONE:
308 		/* We do not want to get CLDMA IRQ when MD is
309 		 * resetting CLDMA after it got clearq_ack.
310 		 */
311 		t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
312 		t7xx_cldma_stop(md_ctrl);
313 
314 		if (md_ctrl->hif_id == CLDMA_ID_MD)
315 			t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
316 
317 		t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
318 		break;
319 
320 	case HIF_EX_ALLQ_RESET:
321 		t7xx_cldma_hw_init(&md_ctrl->hw_info);
322 		t7xx_cldma_start(md_ctrl);
323 		break;
324 
325 	default:
326 		break;
327 	}
328 }
329 
t7xx_md_exception(struct t7xx_modem * md,enum hif_ex_stage stage)330 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
331 {
332 	struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
333 
334 	if (stage == HIF_EX_CLEARQ_DONE) {
335 		/* Give DHL time to flush data */
336 		msleep(PORT_RESET_DELAY_MS);
337 		t7xx_port_proxy_reset(md->port_prox);
338 	}
339 
340 	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
341 	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
342 
343 	if (stage == HIF_EX_INIT)
344 		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
345 	else if (stage == HIF_EX_CLEARQ_DONE)
346 		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
347 }
348 
t7xx_wait_hif_ex_hk_event(struct t7xx_modem * md,int event_id)349 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
350 {
351 	unsigned int waited_time_ms = 0;
352 
353 	do {
354 		if (md->exp_id & event_id)
355 			return 0;
356 
357 		waited_time_ms += EX_HS_POLL_DELAY_MS;
358 		msleep(EX_HS_POLL_DELAY_MS);
359 	} while (waited_time_ms < EX_HS_TIMEOUT_MS);
360 
361 	return -EFAULT;
362 }
363 
t7xx_md_sys_sw_init(struct t7xx_pci_dev * t7xx_dev)364 static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
365 {
366 	/* Register the MHCCIF ISR for MD exception, port enum and
367 	 * async handshake notifications.
368 	 */
369 	t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
370 	t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
371 
372 	/* Register RGU IRQ handler for sAP exception notification */
373 	t7xx_dev->rgu_pci_irq_en = true;
374 	t7xx_pcie_register_rgu_isr(t7xx_dev);
375 }
376 
377 struct feature_query {
378 	__le32 head_pattern;
379 	u8 feature_set[FEATURE_COUNT];
380 	__le32 tail_pattern;
381 };
382 
t7xx_prepare_host_rt_data_query(struct t7xx_sys_info * core)383 static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
384 {
385 	struct feature_query *ft_query;
386 	struct sk_buff *skb;
387 
388 	skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
389 	if (!skb)
390 		return;
391 
392 	ft_query = skb_put(skb, sizeof(*ft_query));
393 	ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
394 	memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
395 	ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
396 
397 	/* Send HS1 message to device */
398 	t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
399 }
400 
t7xx_prepare_device_rt_data(struct t7xx_sys_info * core,struct device * dev,void * data)401 static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
402 				       void *data)
403 {
404 	struct feature_query *md_feature = data;
405 	struct mtk_runtime_feature *rt_feature;
406 	unsigned int i, rt_data_len = 0;
407 	struct sk_buff *skb;
408 
409 	/* Parse MD runtime data query */
410 	if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
411 	    le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
412 		dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
413 			le32_to_cpu(md_feature->head_pattern),
414 			le32_to_cpu(md_feature->tail_pattern));
415 		return -EINVAL;
416 	}
417 
418 	for (i = 0; i < FEATURE_COUNT; i++) {
419 		if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
420 		    MTK_FEATURE_MUST_BE_SUPPORTED)
421 			rt_data_len += sizeof(*rt_feature);
422 	}
423 
424 	skb = t7xx_ctrl_alloc_skb(rt_data_len);
425 	if (!skb)
426 		return -ENOMEM;
427 
428 	rt_feature = skb_put(skb, rt_data_len);
429 	memset(rt_feature, 0, rt_data_len);
430 
431 	/* Fill runtime feature */
432 	for (i = 0; i < FEATURE_COUNT; i++) {
433 		u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
434 
435 		if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
436 			continue;
437 
438 		rt_feature->feature_id = i;
439 		if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
440 			rt_feature->support_info = md_feature->feature_set[i];
441 
442 		rt_feature++;
443 	}
444 
445 	/* Send HS3 message to device */
446 	t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
447 	return 0;
448 }
449 
t7xx_parse_host_rt_data(struct t7xx_fsm_ctl * ctl,struct t7xx_sys_info * core,struct device * dev,void * data,int data_length)450 static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
451 				   struct device *dev, void *data, int data_length)
452 {
453 	enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
454 	struct mtk_runtime_feature *rt_feature;
455 	int i, offset;
456 
457 	offset = sizeof(struct feature_query);
458 	for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
459 		rt_feature = data + offset;
460 		offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
461 
462 		ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
463 		if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
464 			continue;
465 
466 		ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
467 		if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
468 			return -EINVAL;
469 
470 		if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
471 			t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
472 	}
473 
474 	return 0;
475 }
476 
t7xx_core_reset(struct t7xx_modem * md)477 static int t7xx_core_reset(struct t7xx_modem *md)
478 {
479 	struct device *dev = &md->t7xx_dev->pdev->dev;
480 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
481 
482 	md->core_md.ready = false;
483 
484 	if (!ctl) {
485 		dev_err(dev, "FSM is not initialized\n");
486 		return -EINVAL;
487 	}
488 
489 	if (md->core_md.handshake_ongoing) {
490 		int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
491 
492 		if (ret)
493 			return ret;
494 	}
495 
496 	md->core_md.handshake_ongoing = false;
497 	return 0;
498 }
499 
t7xx_core_hk_handler(struct t7xx_modem * md,struct t7xx_sys_info * core_info,struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,enum t7xx_fsm_event_state err_detect)500 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
501 				 struct t7xx_fsm_ctl *ctl,
502 				 enum t7xx_fsm_event_state event_id,
503 				 enum t7xx_fsm_event_state err_detect)
504 {
505 	struct t7xx_fsm_event *event = NULL, *event_next;
506 	struct device *dev = &md->t7xx_dev->pdev->dev;
507 	unsigned long flags;
508 	int ret;
509 
510 	t7xx_prepare_host_rt_data_query(core_info);
511 
512 	while (!kthread_should_stop()) {
513 		bool event_received = false;
514 
515 		spin_lock_irqsave(&ctl->event_lock, flags);
516 		list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
517 			if (event->event_id == err_detect) {
518 				list_del(&event->entry);
519 				spin_unlock_irqrestore(&ctl->event_lock, flags);
520 				dev_err(dev, "Core handshake error event received\n");
521 				goto err_free_event;
522 			} else if (event->event_id == event_id) {
523 				list_del(&event->entry);
524 				event_received = true;
525 				break;
526 			}
527 		}
528 		spin_unlock_irqrestore(&ctl->event_lock, flags);
529 
530 		if (event_received)
531 			break;
532 
533 		wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
534 					 kthread_should_stop());
535 		if (kthread_should_stop())
536 			goto err_free_event;
537 	}
538 
539 	if (!event || ctl->exp_flg)
540 		goto err_free_event;
541 
542 	ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
543 	if (ret) {
544 		dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
545 		goto err_free_event;
546 	}
547 
548 	if (ctl->exp_flg)
549 		goto err_free_event;
550 
551 	ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
552 	if (ret) {
553 		dev_err(dev, "Device failure parsing runtime data: %d", ret);
554 		goto err_free_event;
555 	}
556 
557 	core_info->ready = true;
558 	core_info->handshake_ongoing = false;
559 	wake_up(&ctl->async_hk_wq);
560 err_free_event:
561 	kfree(event);
562 }
563 
t7xx_md_hk_wq(struct work_struct * work)564 static void t7xx_md_hk_wq(struct work_struct *work)
565 {
566 	struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
567 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
568 
569 	/* Clear the HS2 EXIT event appended in core_reset() */
570 	t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
571 	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
572 	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
573 	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
574 	md->core_md.handshake_ongoing = true;
575 	t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
576 }
577 
t7xx_ap_hk_wq(struct work_struct * work)578 static void t7xx_ap_hk_wq(struct work_struct *work)
579 {
580 	struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
581 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
582 
583 	 /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
584 	t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
585 	t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
586 	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
587 	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
588 	md->core_ap.handshake_ongoing = true;
589 	t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
590 }
591 
t7xx_md_event_notify(struct t7xx_modem * md,enum md_event_id evt_id)592 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
593 {
594 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
595 	unsigned int int_sta;
596 	unsigned long flags;
597 
598 	switch (evt_id) {
599 	case FSM_PRE_START:
600 		t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
601 						   D2H_INT_ASYNC_AP_HK);
602 		break;
603 
604 	case FSM_START:
605 		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
606 
607 		spin_lock_irqsave(&md->exp_lock, flags);
608 		int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
609 		md->exp_id |= int_sta;
610 		if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
611 			ctl->exp_flg = true;
612 			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
613 			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
614 			md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
615 		} else if (ctl->exp_flg) {
616 			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
617 			md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
618 		} else {
619 			void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
620 
621 			if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
622 				queue_work(md->handshake_wq, &md->handshake_work);
623 				md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
624 				iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
625 				t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
626 			}
627 
628 			if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
629 				queue_work(md->handshake_wq, &md->ap_handshake_work);
630 				md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
631 				iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
632 				t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
633 			}
634 		}
635 		spin_unlock_irqrestore(&md->exp_lock, flags);
636 
637 		t7xx_mhccif_mask_clr(md->t7xx_dev,
638 				     D2H_INT_EXCEPTION_INIT |
639 				     D2H_INT_EXCEPTION_INIT_DONE |
640 				     D2H_INT_EXCEPTION_CLEARQ_DONE |
641 				     D2H_INT_EXCEPTION_ALLQ_RESET);
642 		break;
643 
644 	case FSM_READY:
645 		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
646 		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
647 		break;
648 
649 	default:
650 		break;
651 	}
652 }
653 
t7xx_md_exception_handshake(struct t7xx_modem * md)654 void t7xx_md_exception_handshake(struct t7xx_modem *md)
655 {
656 	struct device *dev = &md->t7xx_dev->pdev->dev;
657 	int ret;
658 
659 	t7xx_md_exception(md, HIF_EX_INIT);
660 	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
661 	if (ret)
662 		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
663 
664 	t7xx_md_exception(md, HIF_EX_INIT_DONE);
665 	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
666 	if (ret)
667 		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
668 
669 	t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
670 	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
671 	if (ret)
672 		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
673 
674 	t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
675 }
676 
t7xx_md_alloc(struct t7xx_pci_dev * t7xx_dev)677 static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
678 {
679 	struct device *dev = &t7xx_dev->pdev->dev;
680 	struct t7xx_modem *md;
681 
682 	md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
683 	if (!md)
684 		return NULL;
685 
686 	md->t7xx_dev = t7xx_dev;
687 	t7xx_dev->md = md;
688 	spin_lock_init(&md->exp_lock);
689 	md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
690 					   0, "md_hk_wq");
691 	if (!md->handshake_wq)
692 		return NULL;
693 
694 	INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
695 	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
696 	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
697 		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
698 
699 	INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
700 	md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
701 	md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
702 		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
703 
704 	return md;
705 }
706 
t7xx_md_reset(struct t7xx_pci_dev * t7xx_dev)707 int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
708 {
709 	struct t7xx_modem *md = t7xx_dev->md;
710 
711 	md->md_init_finish = false;
712 	md->exp_id = 0;
713 	t7xx_fsm_reset(md);
714 	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
715 	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
716 	t7xx_port_proxy_reset(md->port_prox);
717 	md->md_init_finish = true;
718 	return t7xx_core_reset(md);
719 }
720 
721 /**
722  * t7xx_md_init() - Initialize modem.
723  * @t7xx_dev: MTK device.
724  *
725  * Allocate and initialize MD control block, and initialize data path.
726  * Register MHCCIF ISR and RGU ISR, and start the state machine.
727  *
728  * Return:
729  ** 0		- Success.
730  ** -ENOMEM	- Allocation failure.
731  */
t7xx_md_init(struct t7xx_pci_dev * t7xx_dev)732 int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
733 {
734 	struct t7xx_modem *md;
735 	int ret;
736 
737 	md = t7xx_md_alloc(t7xx_dev);
738 	if (!md)
739 		return -ENOMEM;
740 
741 	ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
742 	if (ret)
743 		goto err_destroy_hswq;
744 
745 	ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
746 	if (ret)
747 		goto err_destroy_hswq;
748 
749 	ret = t7xx_fsm_init(md);
750 	if (ret)
751 		goto err_destroy_hswq;
752 
753 	ret = t7xx_ccmni_init(t7xx_dev);
754 	if (ret)
755 		goto err_uninit_fsm;
756 
757 	ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
758 	if (ret)
759 		goto err_uninit_ccmni;
760 
761 	ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
762 	if (ret)
763 		goto err_uninit_md_cldma;
764 
765 	ret = t7xx_port_proxy_init(md);
766 	if (ret)
767 		goto err_uninit_ap_cldma;
768 
769 	ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
770 	if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
771 		goto err_uninit_proxy;
772 
773 	t7xx_md_sys_sw_init(t7xx_dev);
774 	md->md_init_finish = true;
775 	return 0;
776 
777 err_uninit_proxy:
778 	t7xx_port_proxy_uninit(md->port_prox);
779 
780 err_uninit_ap_cldma:
781 	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
782 
783 err_uninit_md_cldma:
784 	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
785 
786 err_uninit_ccmni:
787 	t7xx_ccmni_exit(t7xx_dev);
788 
789 err_uninit_fsm:
790 	t7xx_fsm_uninit(md);
791 
792 err_destroy_hswq:
793 	destroy_workqueue(md->handshake_wq);
794 	dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
795 	return ret;
796 }
797 
t7xx_md_exit(struct t7xx_pci_dev * t7xx_dev)798 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
799 {
800 	enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
801 	struct t7xx_modem *md = t7xx_dev->md;
802 
803 	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
804 
805 	if (!md->md_init_finish)
806 		return;
807 
808 	if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
809 		t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
810 	t7xx_port_proxy_uninit(md->port_prox);
811 	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
812 	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
813 	t7xx_ccmni_exit(t7xx_dev);
814 	t7xx_fsm_uninit(md);
815 	destroy_workqueue(md->handshake_wq);
816 }
817