xref: /linux/drivers/net/wwan/t7xx/t7xx_pci.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <haijun.liu@mediatek.com>
8  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
9  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
10  *
11  * Contributors:
12  *  Amir Hanania <amir.hanania@intel.com>
13  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15  *  Eliot Lee <eliot.lee@intel.com>
16  *  Moises Veleta <moises.veleta@intel.com>
17  */
18 
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/pci.h>
33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pm_wakeup.h>
36 #include <linux/spinlock.h>
37 
38 #include "t7xx_mhccif.h"
39 #include "t7xx_modem_ops.h"
40 #include "t7xx_pci.h"
41 #include "t7xx_pcie_mac.h"
42 #include "t7xx_reg.h"
43 #include "t7xx_state_monitor.h"
44 
45 #define T7XX_PCI_IREG_BASE		0
46 #define T7XX_PCI_EREG_BASE		2
47 
48 #define T7XX_INIT_TIMEOUT		20
49 #define PM_SLEEP_DIS_TIMEOUT_MS		20
50 #define PM_ACK_TIMEOUT_MS		1500
51 #define PM_AUTOSUSPEND_MS		20000
52 #define PM_RESOURCE_POLL_TIMEOUT_US	10000
53 #define PM_RESOURCE_POLL_STEP_US	100
54 
55 static const char * const t7xx_mode_names[] = {
56 	[T7XX_UNKNOWN] = "unknown",
57 	[T7XX_READY] = "ready",
58 	[T7XX_RESET] = "reset",
59 	[T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
60 	[T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
61 	[T7XX_FASTBOOT_DUMP] = "fastboot_dump",
62 };
63 
64 static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
65 
66 static ssize_t t7xx_mode_store(struct device *dev,
67 			       struct device_attribute *attr,
68 			       const char *buf, size_t count)
69 {
70 	struct t7xx_pci_dev *t7xx_dev;
71 	struct pci_dev *pdev;
72 	enum t7xx_mode mode;
73 	int index = 0;
74 
75 	pdev = to_pci_dev(dev);
76 	t7xx_dev = pci_get_drvdata(pdev);
77 	if (!t7xx_dev)
78 		return -ENODEV;
79 
80 	mode = READ_ONCE(t7xx_dev->mode);
81 
82 	index = sysfs_match_string(t7xx_mode_names, buf);
83 	if (index == mode)
84 		return -EBUSY;
85 
86 	if (index == T7XX_FASTBOOT_SWITCHING) {
87 		if (mode == T7XX_FASTBOOT_DOWNLOAD)
88 			return count;
89 
90 		WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
91 		pm_runtime_resume(dev);
92 		t7xx_reset_device(t7xx_dev, FASTBOOT);
93 	} else if (index == T7XX_RESET) {
94 		pm_runtime_resume(dev);
95 		t7xx_reset_device(t7xx_dev, PLDR);
96 	}
97 
98 	return count;
99 };
100 
101 static ssize_t t7xx_mode_show(struct device *dev,
102 			      struct device_attribute *attr,
103 			      char *buf)
104 {
105 	enum t7xx_mode mode = T7XX_UNKNOWN;
106 	struct t7xx_pci_dev *t7xx_dev;
107 	struct pci_dev *pdev;
108 
109 	pdev = to_pci_dev(dev);
110 	t7xx_dev = pci_get_drvdata(pdev);
111 	if (!t7xx_dev)
112 		return -ENODEV;
113 
114 	mode = READ_ONCE(t7xx_dev->mode);
115 	if (mode < T7XX_MODE_LAST)
116 		return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
117 
118 	return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
119 }
120 
121 static DEVICE_ATTR_RW(t7xx_mode);
122 
123 static struct attribute *t7xx_mode_attr[] = {
124 	&dev_attr_t7xx_mode.attr,
125 	NULL
126 };
127 
128 static const struct attribute_group t7xx_mode_attribute_group = {
129 	.attrs = t7xx_mode_attr,
130 };
131 
132 void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
133 {
134 	if (!t7xx_dev)
135 		return;
136 
137 	WRITE_ONCE(t7xx_dev->mode, mode);
138 	sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
139 }
140 
141 enum t7xx_pm_state {
142 	MTK_PM_EXCEPTION,
143 	MTK_PM_INIT,		/* Device initialized, but handshake not completed */
144 	MTK_PM_SUSPENDED,
145 	MTK_PM_RESUMED,
146 };
147 
148 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
149 {
150 	void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
151 	u32 value;
152 
153 	value = ioread32(ctrl_reg);
154 
155 	if (enable)
156 		value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
157 	else
158 		value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
159 
160 	iowrite32(value, ctrl_reg);
161 }
162 
163 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
164 {
165 	int ret, val;
166 
167 	ret = read_poll_timeout(ioread32, val,
168 				(val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
169 				PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
170 				IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
171 	if (ret == -ETIMEDOUT)
172 		dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
173 
174 	return ret;
175 }
176 
177 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
178 {
179 	struct pci_dev *pdev = t7xx_dev->pdev;
180 
181 	INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
182 	mutex_init(&t7xx_dev->md_pm_entity_mtx);
183 	spin_lock_init(&t7xx_dev->md_pm_lock);
184 	init_completion(&t7xx_dev->sleep_lock_acquire);
185 	init_completion(&t7xx_dev->pm_sr_ack);
186 	init_completion(&t7xx_dev->init_done);
187 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
188 
189 	device_init_wakeup(&pdev->dev, true);
190 	dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
191 				DPM_FLAG_NO_DIRECT_COMPLETE);
192 
193 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
194 	pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
195 	pm_runtime_use_autosuspend(&pdev->dev);
196 
197 	return 0;
198 }
199 
200 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
201 {
202 	/* Enable the PCIe resource lock only after MD deep sleep is done */
203 	t7xx_mhccif_mask_clr(t7xx_dev,
204 			     D2H_INT_DS_LOCK_ACK |
205 			     D2H_INT_SUSPEND_ACK |
206 			     D2H_INT_RESUME_ACK |
207 			     D2H_INT_SUSPEND_ACK_AP |
208 			     D2H_INT_RESUME_ACK_AP);
209 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
210 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
211 
212 	pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
213 	pm_runtime_allow(&t7xx_dev->pdev->dev);
214 	pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
215 	complete_all(&t7xx_dev->init_done);
216 }
217 
218 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
219 {
220 	/* The device is kept in FSM re-init flow
221 	 * so just roll back PM setting to the init setting.
222 	 */
223 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
224 
225 	pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
226 
227 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
228 	return t7xx_wait_pm_config(t7xx_dev);
229 }
230 
231 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
232 {
233 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
234 	t7xx_wait_pm_config(t7xx_dev);
235 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
236 }
237 
238 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
239 {
240 	struct md_pm_entity *entity;
241 
242 	mutex_lock(&t7xx_dev->md_pm_entity_mtx);
243 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
244 		if (entity->id == pm_entity->id) {
245 			mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
246 			return -EEXIST;
247 		}
248 	}
249 
250 	list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
251 	mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
252 	return 0;
253 }
254 
255 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
256 {
257 	struct md_pm_entity *entity, *tmp_entity;
258 
259 	mutex_lock(&t7xx_dev->md_pm_entity_mtx);
260 	list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
261 		if (entity->id == pm_entity->id) {
262 			list_del(&pm_entity->entity);
263 			mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
264 			return 0;
265 		}
266 	}
267 
268 	mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
269 
270 	return -ENXIO;
271 }
272 
273 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
274 {
275 	struct device *dev = &t7xx_dev->pdev->dev;
276 	int ret;
277 
278 	ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
279 					  msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
280 	if (!ret)
281 		dev_err_ratelimited(dev, "Resource wait complete timed out\n");
282 
283 	return ret;
284 }
285 
286 /**
287  * t7xx_pci_disable_sleep() - Disable deep sleep capability.
288  * @t7xx_dev: MTK device.
289  *
290  * Lock the deep sleep capability, note that the device can still go into deep sleep
291  * state while device is in D0 state, from the host's point-of-view.
292  *
293  * If device is in deep sleep state, wake up the device and disable deep sleep capability.
294  */
295 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
296 {
297 	unsigned long flags;
298 
299 	spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
300 	t7xx_dev->sleep_disable_count++;
301 	if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
302 		goto unlock_and_complete;
303 
304 	if (t7xx_dev->sleep_disable_count == 1) {
305 		u32 status;
306 
307 		reinit_completion(&t7xx_dev->sleep_lock_acquire);
308 		t7xx_dev_set_sleep_capability(t7xx_dev, false);
309 
310 		status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
311 		if (status & T7XX_PCIE_RESOURCE_STS_MSK)
312 			goto unlock_and_complete;
313 
314 		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
315 	}
316 	spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
317 	return;
318 
319 unlock_and_complete:
320 	spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
321 	complete_all(&t7xx_dev->sleep_lock_acquire);
322 }
323 
324 /**
325  * t7xx_pci_enable_sleep() - Enable deep sleep capability.
326  * @t7xx_dev: MTK device.
327  *
328  * After enabling deep sleep, device can enter into deep sleep state.
329  */
330 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
331 {
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
335 	t7xx_dev->sleep_disable_count--;
336 	if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
337 		goto unlock;
338 
339 	if (t7xx_dev->sleep_disable_count == 0)
340 		t7xx_dev_set_sleep_capability(t7xx_dev, true);
341 
342 unlock:
343 	spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
344 }
345 
346 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
347 {
348 	unsigned long wait_ret;
349 
350 	reinit_completion(&t7xx_dev->pm_sr_ack);
351 	t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
352 	wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
353 					       msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
354 	if (!wait_ret)
355 		return -ETIMEDOUT;
356 
357 	return 0;
358 }
359 
360 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
361 {
362 	enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
363 	struct t7xx_pci_dev *t7xx_dev;
364 	struct md_pm_entity *entity;
365 	int ret;
366 
367 	t7xx_dev = pci_get_drvdata(pdev);
368 	if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
369 	    READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
370 		dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
371 		return -EFAULT;
372 	}
373 
374 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
375 	ret = t7xx_wait_pm_config(t7xx_dev);
376 	if (ret) {
377 		iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
378 		return ret;
379 	}
380 
381 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
382 	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
383 	t7xx_dev->rgu_pci_irq_en = false;
384 
385 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
386 		if (!entity->suspend)
387 			continue;
388 
389 		ret = entity->suspend(t7xx_dev, entity->entity_param);
390 		if (ret) {
391 			entity_id = entity->id;
392 			dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
393 			goto abort_suspend;
394 		}
395 	}
396 
397 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
398 	if (ret) {
399 		dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
400 		goto abort_suspend;
401 	}
402 
403 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
404 	if (ret) {
405 		t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
406 		dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
407 		goto abort_suspend;
408 	}
409 
410 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
411 		if (entity->suspend_late)
412 			entity->suspend_late(t7xx_dev, entity->entity_param);
413 	}
414 
415 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
416 	return 0;
417 
418 abort_suspend:
419 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
420 		if (entity_id == entity->id)
421 			break;
422 
423 		if (entity->resume)
424 			entity->resume(t7xx_dev, entity->entity_param);
425 	}
426 
427 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
428 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
429 	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
430 	return ret;
431 }
432 
433 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
434 {
435 	t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
436 
437 	/* Disable interrupt first and let the IPs enable them */
438 	iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
439 
440 	/* Device disables PCIe interrupts during resume and
441 	 * following function will re-enable PCIe interrupts.
442 	 */
443 	t7xx_pcie_mac_interrupts_en(t7xx_dev);
444 	t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
445 }
446 
447 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
448 {
449 	int ret;
450 
451 	ret = pcim_enable_device(t7xx_dev->pdev);
452 	if (ret)
453 		return ret;
454 
455 	t7xx_pcie_mac_atr_init(t7xx_dev);
456 	t7xx_pcie_interrupt_reinit(t7xx_dev);
457 
458 	if (is_d3) {
459 		t7xx_mhccif_init(t7xx_dev);
460 		t7xx_pci_pm_reinit(t7xx_dev);
461 	}
462 
463 	return 0;
464 }
465 
466 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
467 {
468 	struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
469 	struct device *dev = &t7xx_dev->pdev->dev;
470 	int ret = -EINVAL;
471 
472 	switch (event) {
473 	case FSM_CMD_STOP:
474 		ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
475 		break;
476 
477 	case FSM_CMD_START:
478 		t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
479 		t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
480 		t7xx_dev->rgu_pci_irq_en = true;
481 		t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
482 		ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
483 		break;
484 
485 	default:
486 		break;
487 	}
488 
489 	if (ret)
490 		dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
491 
492 	return ret;
493 }
494 
495 int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev)
496 {
497 	enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
498 	int ret;
499 
500 	if (mode == T7XX_FASTBOOT_DOWNLOAD)
501 		pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
502 
503 	ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
504 	if (ret)
505 		return ret;
506 
507 	return 0;
508 }
509 
510 int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot)
511 {
512 	int ret;
513 
514 	ret = t7xx_pcie_reinit(t7xx_dev, boot);
515 	if (ret)
516 		return ret;
517 
518 	t7xx_clear_rgu_irq(t7xx_dev);
519 	return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
520 }
521 
522 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
523 {
524 	struct t7xx_pci_dev *t7xx_dev;
525 	struct md_pm_entity *entity;
526 	u32 prev_state;
527 	int ret = 0;
528 
529 	t7xx_dev = pci_get_drvdata(pdev);
530 	if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
531 		iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
532 		return 0;
533 	}
534 
535 	t7xx_pcie_mac_interrupts_en(t7xx_dev);
536 	prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
537 
538 	if (state_check) {
539 		/* For D3/L3 resume, the device could boot so quickly that the
540 		 * initial value of the dummy register might be overwritten.
541 		 * Identify new boots if the ATR source address register is not initialized.
542 		 */
543 		u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
544 					   ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
545 		if (prev_state == PM_RESUME_REG_STATE_L3 ||
546 		    (prev_state == PM_RESUME_REG_STATE_INIT &&
547 		     atr_reg_val == ATR_SRC_ADDR_INVALID)) {
548 			ret = t7xx_pci_reprobe_early(t7xx_dev);
549 			if (ret)
550 				return ret;
551 
552 			return t7xx_pci_reprobe(t7xx_dev, true);
553 		}
554 
555 		if (prev_state == PM_RESUME_REG_STATE_EXP ||
556 		    prev_state == PM_RESUME_REG_STATE_L2_EXP) {
557 			if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
558 				ret = t7xx_pcie_reinit(t7xx_dev, false);
559 				if (ret)
560 					return ret;
561 			}
562 
563 			atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
564 			t7xx_dev->rgu_pci_irq_en = true;
565 			t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
566 
567 			t7xx_mhccif_mask_clr(t7xx_dev,
568 					     D2H_INT_EXCEPTION_INIT |
569 					     D2H_INT_EXCEPTION_INIT_DONE |
570 					     D2H_INT_EXCEPTION_CLEARQ_DONE |
571 					     D2H_INT_EXCEPTION_ALLQ_RESET |
572 					     D2H_INT_PORT_ENUM);
573 
574 			return ret;
575 		}
576 
577 		if (prev_state == PM_RESUME_REG_STATE_L2) {
578 			ret = t7xx_pcie_reinit(t7xx_dev, false);
579 			if (ret)
580 				return ret;
581 
582 		} else if (prev_state != PM_RESUME_REG_STATE_L1 &&
583 			   prev_state != PM_RESUME_REG_STATE_INIT) {
584 			ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
585 			if (ret)
586 				return ret;
587 
588 			t7xx_clear_rgu_irq(t7xx_dev);
589 			atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
590 			return 0;
591 		}
592 	}
593 
594 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
595 	t7xx_wait_pm_config(t7xx_dev);
596 
597 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
598 		if (entity->resume_early)
599 			entity->resume_early(t7xx_dev, entity->entity_param);
600 	}
601 
602 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
603 	if (ret)
604 		dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
605 
606 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
607 	if (ret)
608 		dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
609 
610 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
611 		if (entity->resume) {
612 			ret = entity->resume(t7xx_dev, entity->entity_param);
613 			if (ret)
614 				dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
615 					entity->id, ret);
616 		}
617 	}
618 
619 	t7xx_dev->rgu_pci_irq_en = true;
620 	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
621 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
622 	pm_runtime_mark_last_busy(&pdev->dev);
623 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
624 
625 	return ret;
626 }
627 
628 static int t7xx_pci_pm_resume_noirq(struct device *dev)
629 {
630 	struct pci_dev *pdev = to_pci_dev(dev);
631 	struct t7xx_pci_dev *t7xx_dev;
632 
633 	t7xx_dev = pci_get_drvdata(pdev);
634 	t7xx_pcie_mac_interrupts_dis(t7xx_dev);
635 
636 	return 0;
637 }
638 
639 static void t7xx_pci_shutdown(struct pci_dev *pdev)
640 {
641 	__t7xx_pci_pm_suspend(pdev);
642 }
643 
644 static int t7xx_pci_pm_prepare(struct device *dev)
645 {
646 	struct pci_dev *pdev = to_pci_dev(dev);
647 	struct t7xx_pci_dev *t7xx_dev;
648 
649 	t7xx_dev = pci_get_drvdata(pdev);
650 	if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
651 		dev_warn(dev, "Not ready for system sleep.\n");
652 		return -ETIMEDOUT;
653 	}
654 
655 	return 0;
656 }
657 
658 static int t7xx_pci_pm_suspend(struct device *dev)
659 {
660 	return __t7xx_pci_pm_suspend(to_pci_dev(dev));
661 }
662 
663 static int t7xx_pci_pm_resume(struct device *dev)
664 {
665 	return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
666 }
667 
668 static int t7xx_pci_pm_thaw(struct device *dev)
669 {
670 	return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
671 }
672 
673 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
674 {
675 	return __t7xx_pci_pm_suspend(to_pci_dev(dev));
676 }
677 
678 static int t7xx_pci_pm_runtime_resume(struct device *dev)
679 {
680 	return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
681 }
682 
683 static const struct dev_pm_ops t7xx_pci_pm_ops = {
684 	.prepare = t7xx_pci_pm_prepare,
685 	.suspend = t7xx_pci_pm_suspend,
686 	.resume = t7xx_pci_pm_resume,
687 	.resume_noirq = t7xx_pci_pm_resume_noirq,
688 	.freeze = t7xx_pci_pm_suspend,
689 	.thaw = t7xx_pci_pm_thaw,
690 	.poweroff = t7xx_pci_pm_suspend,
691 	.restore = t7xx_pci_pm_resume,
692 	.restore_noirq = t7xx_pci_pm_resume_noirq,
693 	.runtime_suspend = t7xx_pci_pm_runtime_suspend,
694 	.runtime_resume = t7xx_pci_pm_runtime_resume
695 };
696 
697 static int t7xx_request_irq(struct pci_dev *pdev)
698 {
699 	struct t7xx_pci_dev *t7xx_dev;
700 	int ret = 0, i;
701 
702 	t7xx_dev = pci_get_drvdata(pdev);
703 
704 	for (i = 0; i < EXT_INT_NUM; i++) {
705 		const char *irq_descr;
706 		int irq_vec;
707 
708 		if (!t7xx_dev->intr_handler[i])
709 			continue;
710 
711 		irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
712 					   dev_driver_string(&pdev->dev), i);
713 		if (!irq_descr) {
714 			ret = -ENOMEM;
715 			break;
716 		}
717 
718 		irq_vec = pci_irq_vector(pdev, i);
719 		ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
720 					   t7xx_dev->intr_thread[i], 0, irq_descr,
721 					   t7xx_dev->callback_param[i]);
722 		if (ret) {
723 			dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
724 			break;
725 		}
726 	}
727 
728 	if (ret) {
729 		while (i--) {
730 			if (!t7xx_dev->intr_handler[i])
731 				continue;
732 
733 			free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
734 		}
735 	}
736 
737 	return ret;
738 }
739 
740 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
741 {
742 	struct pci_dev *pdev = t7xx_dev->pdev;
743 	int ret;
744 
745 	/* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
746 	ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
747 	if (ret < 0) {
748 		dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
749 		return ret;
750 	}
751 
752 	ret = t7xx_request_irq(pdev);
753 	if (ret) {
754 		pci_free_irq_vectors(pdev);
755 		return ret;
756 	}
757 
758 	t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
759 	return 0;
760 }
761 
762 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
763 {
764 	int ret, i;
765 
766 	if (!t7xx_dev->pdev->msix_cap)
767 		return -EINVAL;
768 
769 	ret = t7xx_setup_msix(t7xx_dev);
770 	if (ret)
771 		return ret;
772 
773 	/* IPs enable interrupts when ready */
774 	for (i = 0; i < EXT_INT_NUM; i++)
775 		t7xx_pcie_mac_set_int(t7xx_dev, i);
776 
777 	return 0;
778 }
779 
780 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
781 {
782 	t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
783 					      INFRACFG_AO_DEV_CHIP -
784 					      t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
785 }
786 
787 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
788 {
789 	struct t7xx_pci_dev *t7xx_dev;
790 	int ret;
791 
792 	t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
793 	if (!t7xx_dev)
794 		return -ENOMEM;
795 
796 	pci_set_drvdata(pdev, t7xx_dev);
797 	t7xx_dev->pdev = pdev;
798 
799 	ret = pcim_enable_device(pdev);
800 	if (ret)
801 		return ret;
802 
803 	pci_set_master(pdev);
804 
805 	ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
806 				 pci_name(pdev));
807 	if (ret) {
808 		dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
809 		return -ENOMEM;
810 	}
811 
812 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
813 	if (ret) {
814 		dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
815 		return ret;
816 	}
817 
818 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
819 	if (ret) {
820 		dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
821 		return ret;
822 	}
823 
824 	IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
825 	t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
826 
827 	ret = t7xx_pci_pm_init(t7xx_dev);
828 	if (ret)
829 		return ret;
830 
831 	t7xx_pcie_mac_atr_init(t7xx_dev);
832 	t7xx_pci_infracfg_ao_calc(t7xx_dev);
833 	t7xx_mhccif_init(t7xx_dev);
834 
835 	ret = t7xx_md_init(t7xx_dev);
836 	if (ret)
837 		return ret;
838 
839 	t7xx_pcie_mac_interrupts_dis(t7xx_dev);
840 
841 	ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
842 				 &t7xx_mode_attribute_group);
843 	if (ret)
844 		goto err_md_exit;
845 
846 	ret = t7xx_interrupt_init(t7xx_dev);
847 	if (ret)
848 		goto err_remove_group;
849 
850 
851 	t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
852 	t7xx_pcie_mac_interrupts_en(t7xx_dev);
853 
854 	return 0;
855 
856 err_remove_group:
857 	sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
858 			   &t7xx_mode_attribute_group);
859 
860 err_md_exit:
861 	t7xx_md_exit(t7xx_dev);
862 	return ret;
863 }
864 
865 static void t7xx_pci_remove(struct pci_dev *pdev)
866 {
867 	struct t7xx_pci_dev *t7xx_dev;
868 	int i;
869 
870 	t7xx_dev = pci_get_drvdata(pdev);
871 
872 	sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
873 			   &t7xx_mode_attribute_group);
874 	t7xx_md_exit(t7xx_dev);
875 
876 	for (i = 0; i < EXT_INT_NUM; i++) {
877 		if (!t7xx_dev->intr_handler[i])
878 			continue;
879 
880 		free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
881 	}
882 
883 	pci_free_irq_vectors(t7xx_dev->pdev);
884 }
885 
886 static const struct pci_device_id t7xx_pci_table[] = {
887 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
888 	{ PCI_DEVICE(0x14c0, 0x4d75) }, // Dell DW5933e
889 	{ }
890 };
891 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
892 
893 static struct pci_driver t7xx_pci_driver = {
894 	.name = "mtk_t7xx",
895 	.id_table = t7xx_pci_table,
896 	.probe = t7xx_pci_probe,
897 	.remove = t7xx_pci_remove,
898 	.driver.pm = &t7xx_pci_pm_ops,
899 	.shutdown = t7xx_pci_shutdown,
900 };
901 
902 module_pci_driver(t7xx_pci_driver);
903 
904 MODULE_AUTHOR("MediaTek Inc");
905 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
906 MODULE_LICENSE("GPL");
907