xref: /linux/drivers/net/wwan/t7xx/t7xx_pci.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <haijun.liu@mediatek.com>
8  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
9  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
10  *
11  * Contributors:
12  *  Amir Hanania <amir.hanania@intel.com>
13  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15  *  Eliot Lee <eliot.lee@intel.com>
16  *  Moises Veleta <moises.veleta@intel.com>
17  */
18 
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/pci.h>
33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pm_wakeup.h>
36 #include <linux/spinlock.h>
37 
38 #include "t7xx_mhccif.h"
39 #include "t7xx_modem_ops.h"
40 #include "t7xx_pci.h"
41 #include "t7xx_pcie_mac.h"
42 #include "t7xx_reg.h"
43 #include "t7xx_state_monitor.h"
44 #include "t7xx_port_proxy.h"
45 
46 #define T7XX_PCI_IREG_BASE		0
47 #define T7XX_PCI_EREG_BASE		2
48 
49 #define T7XX_INIT_TIMEOUT		20
50 #define PM_SLEEP_DIS_TIMEOUT_MS		20
51 #define PM_ACK_TIMEOUT_MS		1500
52 #define PM_AUTOSUSPEND_MS		5000
53 #define PM_RESOURCE_POLL_TIMEOUT_US	10000
54 #define PM_RESOURCE_POLL_STEP_US	100
55 
56 static const char * const t7xx_mode_names[] = {
57 	[T7XX_UNKNOWN] = "unknown",
58 	[T7XX_READY] = "ready",
59 	[T7XX_RESET] = "reset",
60 	[T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
61 	[T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
62 	[T7XX_FASTBOOT_DUMP] = "fastboot_dump",
63 };
64 
65 static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
66 
t7xx_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)67 static ssize_t t7xx_mode_store(struct device *dev,
68 			       struct device_attribute *attr,
69 			       const char *buf, size_t count)
70 {
71 	struct t7xx_pci_dev *t7xx_dev;
72 	struct pci_dev *pdev;
73 	enum t7xx_mode mode;
74 	int index = 0;
75 
76 	pdev = to_pci_dev(dev);
77 	t7xx_dev = pci_get_drvdata(pdev);
78 	if (!t7xx_dev)
79 		return -ENODEV;
80 
81 	mode = READ_ONCE(t7xx_dev->mode);
82 
83 	index = sysfs_match_string(t7xx_mode_names, buf);
84 	if (index == mode)
85 		return -EBUSY;
86 
87 	if (index == T7XX_FASTBOOT_SWITCHING) {
88 		if (mode == T7XX_FASTBOOT_DOWNLOAD)
89 			return count;
90 
91 		WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
92 		pm_runtime_resume(dev);
93 		t7xx_reset_device(t7xx_dev, FASTBOOT);
94 	} else if (index == T7XX_RESET) {
95 		pm_runtime_resume(dev);
96 		t7xx_reset_device(t7xx_dev, PLDR);
97 	}
98 
99 	return count;
100 };
101 
t7xx_mode_show(struct device * dev,struct device_attribute * attr,char * buf)102 static ssize_t t7xx_mode_show(struct device *dev,
103 			      struct device_attribute *attr,
104 			      char *buf)
105 {
106 	enum t7xx_mode mode = T7XX_UNKNOWN;
107 	struct t7xx_pci_dev *t7xx_dev;
108 	struct pci_dev *pdev;
109 
110 	pdev = to_pci_dev(dev);
111 	t7xx_dev = pci_get_drvdata(pdev);
112 	if (!t7xx_dev)
113 		return -ENODEV;
114 
115 	mode = READ_ONCE(t7xx_dev->mode);
116 	if (mode < T7XX_MODE_LAST)
117 		return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
118 
119 	return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
120 }
121 
122 static DEVICE_ATTR_RW(t7xx_mode);
123 
t7xx_debug_ports_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)124 static ssize_t t7xx_debug_ports_store(struct device *dev,
125 				      struct device_attribute *attr,
126 				      const char *buf, size_t count)
127 {
128 	struct t7xx_pci_dev *t7xx_dev;
129 	struct pci_dev *pdev;
130 	bool show;
131 	int ret;
132 
133 	pdev = to_pci_dev(dev);
134 	t7xx_dev = pci_get_drvdata(pdev);
135 	if (!t7xx_dev)
136 		return -ENODEV;
137 
138 	ret = kstrtobool(buf, &show);
139 	if (ret < 0)
140 		return ret;
141 
142 	t7xx_proxy_debug_ports_show(t7xx_dev, show);
143 	WRITE_ONCE(t7xx_dev->debug_ports_show, show);
144 
145 	return count;
146 };
147 
t7xx_debug_ports_show(struct device * dev,struct device_attribute * attr,char * buf)148 static ssize_t t7xx_debug_ports_show(struct device *dev,
149 				     struct device_attribute *attr,
150 				     char *buf)
151 {
152 	struct t7xx_pci_dev *t7xx_dev;
153 	struct pci_dev *pdev;
154 	bool show;
155 
156 	pdev = to_pci_dev(dev);
157 	t7xx_dev = pci_get_drvdata(pdev);
158 	if (!t7xx_dev)
159 		return -ENODEV;
160 
161 	show = READ_ONCE(t7xx_dev->debug_ports_show);
162 
163 	return sysfs_emit(buf, "%d\n", show);
164 }
165 
166 static DEVICE_ATTR_RW(t7xx_debug_ports);
167 
168 static struct attribute *t7xx_attr[] = {
169 	&dev_attr_t7xx_mode.attr,
170 	&dev_attr_t7xx_debug_ports.attr,
171 	NULL
172 };
173 
174 static const struct attribute_group t7xx_attribute_group = {
175 	.attrs = t7xx_attr,
176 };
177 
t7xx_mode_update(struct t7xx_pci_dev * t7xx_dev,enum t7xx_mode mode)178 void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
179 {
180 	if (!t7xx_dev)
181 		return;
182 
183 	WRITE_ONCE(t7xx_dev->mode, mode);
184 	sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
185 }
186 
187 enum t7xx_pm_state {
188 	MTK_PM_EXCEPTION,
189 	MTK_PM_INIT,		/* Device initialized, but handshake not completed */
190 	MTK_PM_SUSPENDED,
191 	MTK_PM_RESUMED,
192 };
193 
t7xx_dev_set_sleep_capability(struct t7xx_pci_dev * t7xx_dev,bool enable)194 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
195 {
196 	void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
197 	u32 value;
198 
199 	value = ioread32(ctrl_reg);
200 
201 	if (enable)
202 		value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
203 	else
204 		value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
205 
206 	iowrite32(value, ctrl_reg);
207 }
208 
t7xx_wait_pm_config(struct t7xx_pci_dev * t7xx_dev)209 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
210 {
211 	int ret, val;
212 
213 	ret = read_poll_timeout(ioread32, val,
214 				(val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
215 				PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
216 				IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
217 	if (ret == -ETIMEDOUT)
218 		dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
219 
220 	return ret;
221 }
222 
t7xx_pci_pm_init(struct t7xx_pci_dev * t7xx_dev)223 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
224 {
225 	struct pci_dev *pdev = t7xx_dev->pdev;
226 
227 	INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
228 	mutex_init(&t7xx_dev->md_pm_entity_mtx);
229 	spin_lock_init(&t7xx_dev->md_pm_lock);
230 	init_completion(&t7xx_dev->sleep_lock_acquire);
231 	init_completion(&t7xx_dev->pm_sr_ack);
232 	init_completion(&t7xx_dev->init_done);
233 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
234 
235 	device_init_wakeup(&pdev->dev, true);
236 	dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
237 				DPM_FLAG_NO_DIRECT_COMPLETE);
238 
239 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
240 	pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
241 	pm_runtime_use_autosuspend(&pdev->dev);
242 
243 	return 0;
244 }
245 
t7xx_pci_pm_init_late(struct t7xx_pci_dev * t7xx_dev)246 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
247 {
248 	/* Enable the PCIe resource lock only after MD deep sleep is done */
249 	t7xx_mhccif_mask_clr(t7xx_dev,
250 			     D2H_INT_DS_LOCK_ACK |
251 			     D2H_INT_SUSPEND_ACK |
252 			     D2H_INT_RESUME_ACK |
253 			     D2H_INT_SUSPEND_ACK_AP |
254 			     D2H_INT_RESUME_ACK_AP);
255 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
256 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
257 
258 	pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
259 	pm_runtime_allow(&t7xx_dev->pdev->dev);
260 	pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
261 	complete_all(&t7xx_dev->init_done);
262 }
263 
t7xx_pci_pm_reinit(struct t7xx_pci_dev * t7xx_dev)264 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
265 {
266 	/* The device is kept in FSM re-init flow
267 	 * so just roll back PM setting to the init setting.
268 	 */
269 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
270 
271 	pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
272 
273 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
274 	return t7xx_wait_pm_config(t7xx_dev);
275 }
276 
t7xx_pci_pm_exp_detected(struct t7xx_pci_dev * t7xx_dev)277 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
278 {
279 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
280 	t7xx_wait_pm_config(t7xx_dev);
281 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
282 }
283 
t7xx_pci_pm_entity_register(struct t7xx_pci_dev * t7xx_dev,struct md_pm_entity * pm_entity)284 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
285 {
286 	struct md_pm_entity *entity;
287 
288 	mutex_lock(&t7xx_dev->md_pm_entity_mtx);
289 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
290 		if (entity->id == pm_entity->id) {
291 			mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
292 			return -EEXIST;
293 		}
294 	}
295 
296 	list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
297 	mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
298 	return 0;
299 }
300 
t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev * t7xx_dev,struct md_pm_entity * pm_entity)301 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
302 {
303 	struct md_pm_entity *entity, *tmp_entity;
304 
305 	mutex_lock(&t7xx_dev->md_pm_entity_mtx);
306 	list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
307 		if (entity->id == pm_entity->id) {
308 			list_del(&pm_entity->entity);
309 			mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
310 			return 0;
311 		}
312 	}
313 
314 	mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
315 
316 	return -ENXIO;
317 }
318 
t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev * t7xx_dev)319 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
320 {
321 	struct device *dev = &t7xx_dev->pdev->dev;
322 	int ret;
323 
324 	ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
325 					  msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
326 	if (!ret)
327 		dev_err_ratelimited(dev, "Resource wait complete timed out\n");
328 
329 	return ret;
330 }
331 
332 /**
333  * t7xx_pci_disable_sleep() - Disable deep sleep capability.
334  * @t7xx_dev: MTK device.
335  *
336  * Lock the deep sleep capability, note that the device can still go into deep sleep
337  * state while device is in D0 state, from the host's point-of-view.
338  *
339  * If device is in deep sleep state, wake up the device and disable deep sleep capability.
340  */
t7xx_pci_disable_sleep(struct t7xx_pci_dev * t7xx_dev)341 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
342 {
343 	unsigned long flags;
344 
345 	spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
346 	t7xx_dev->sleep_disable_count++;
347 	if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
348 		goto unlock_and_complete;
349 
350 	if (t7xx_dev->sleep_disable_count == 1) {
351 		u32 status;
352 
353 		reinit_completion(&t7xx_dev->sleep_lock_acquire);
354 		t7xx_dev_set_sleep_capability(t7xx_dev, false);
355 
356 		status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
357 		if (status & T7XX_PCIE_RESOURCE_STS_MSK)
358 			goto unlock_and_complete;
359 
360 		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
361 	}
362 	spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
363 	return;
364 
365 unlock_and_complete:
366 	spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
367 	complete_all(&t7xx_dev->sleep_lock_acquire);
368 }
369 
370 /**
371  * t7xx_pci_enable_sleep() - Enable deep sleep capability.
372  * @t7xx_dev: MTK device.
373  *
374  * After enabling deep sleep, device can enter into deep sleep state.
375  */
t7xx_pci_enable_sleep(struct t7xx_pci_dev * t7xx_dev)376 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
377 {
378 	unsigned long flags;
379 
380 	spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
381 	t7xx_dev->sleep_disable_count--;
382 	if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
383 		goto unlock;
384 
385 	if (t7xx_dev->sleep_disable_count == 0)
386 		t7xx_dev_set_sleep_capability(t7xx_dev, true);
387 
388 unlock:
389 	spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
390 }
391 
t7xx_send_pm_request(struct t7xx_pci_dev * t7xx_dev,u32 request)392 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
393 {
394 	unsigned long wait_ret;
395 
396 	reinit_completion(&t7xx_dev->pm_sr_ack);
397 	t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
398 	wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
399 					       msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
400 	if (!wait_ret)
401 		return -ETIMEDOUT;
402 
403 	return 0;
404 }
405 
__t7xx_pci_pm_suspend(struct pci_dev * pdev)406 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
407 {
408 	enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
409 	struct t7xx_pci_dev *t7xx_dev;
410 	struct md_pm_entity *entity;
411 	int ret;
412 
413 	t7xx_dev = pci_get_drvdata(pdev);
414 	if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
415 	    READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
416 		dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
417 		return -EFAULT;
418 	}
419 
420 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
421 	ret = t7xx_wait_pm_config(t7xx_dev);
422 	if (ret) {
423 		iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
424 		return ret;
425 	}
426 
427 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
428 	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
429 	t7xx_dev->rgu_pci_irq_en = false;
430 
431 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
432 		if (!entity->suspend)
433 			continue;
434 
435 		ret = entity->suspend(t7xx_dev, entity->entity_param);
436 		if (ret) {
437 			entity_id = entity->id;
438 			dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
439 			goto abort_suspend;
440 		}
441 	}
442 
443 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
444 	if (ret) {
445 		dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
446 		goto abort_suspend;
447 	}
448 
449 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
450 	if (ret) {
451 		t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
452 		dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
453 		goto abort_suspend;
454 	}
455 
456 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
457 		if (entity->suspend_late)
458 			entity->suspend_late(t7xx_dev, entity->entity_param);
459 	}
460 
461 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
462 	return 0;
463 
464 abort_suspend:
465 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
466 		if (entity_id == entity->id)
467 			break;
468 
469 		if (entity->resume)
470 			entity->resume(t7xx_dev, entity->entity_param);
471 	}
472 
473 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
474 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
475 	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
476 	return ret;
477 }
478 
t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev * t7xx_dev)479 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
480 {
481 	t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
482 
483 	/* Disable interrupt first and let the IPs enable them */
484 	iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
485 
486 	/* Device disables PCIe interrupts during resume and
487 	 * following function will re-enable PCIe interrupts.
488 	 */
489 	t7xx_pcie_mac_interrupts_en(t7xx_dev);
490 	t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
491 }
492 
t7xx_pcie_reinit(struct t7xx_pci_dev * t7xx_dev,bool is_d3)493 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
494 {
495 	int ret;
496 
497 	ret = pcim_enable_device(t7xx_dev->pdev);
498 	if (ret)
499 		return ret;
500 
501 	t7xx_pcie_mac_atr_init(t7xx_dev);
502 	t7xx_pcie_interrupt_reinit(t7xx_dev);
503 
504 	if (is_d3) {
505 		t7xx_mhccif_init(t7xx_dev);
506 		t7xx_pci_pm_reinit(t7xx_dev);
507 	}
508 
509 	return 0;
510 }
511 
t7xx_send_fsm_command(struct t7xx_pci_dev * t7xx_dev,u32 event)512 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
513 {
514 	struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
515 	struct device *dev = &t7xx_dev->pdev->dev;
516 	int ret = -EINVAL;
517 
518 	switch (event) {
519 	case FSM_CMD_STOP:
520 		ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
521 		break;
522 
523 	case FSM_CMD_START:
524 		t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
525 		t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
526 		t7xx_dev->rgu_pci_irq_en = true;
527 		t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
528 		ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
529 		break;
530 
531 	default:
532 		break;
533 	}
534 
535 	if (ret)
536 		dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
537 
538 	return ret;
539 }
540 
t7xx_pci_reprobe_early(struct t7xx_pci_dev * t7xx_dev)541 int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev)
542 {
543 	enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
544 	int ret;
545 
546 	if (mode == T7XX_FASTBOOT_DOWNLOAD)
547 		pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
548 
549 	ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
550 	if (ret)
551 		return ret;
552 
553 	return 0;
554 }
555 
t7xx_pci_reprobe(struct t7xx_pci_dev * t7xx_dev,bool boot)556 int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot)
557 {
558 	int ret;
559 
560 	ret = t7xx_pcie_reinit(t7xx_dev, boot);
561 	if (ret)
562 		return ret;
563 
564 	t7xx_clear_rgu_irq(t7xx_dev);
565 	return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
566 }
567 
__t7xx_pci_pm_resume(struct pci_dev * pdev,bool state_check)568 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
569 {
570 	struct t7xx_pci_dev *t7xx_dev;
571 	struct md_pm_entity *entity;
572 	u32 prev_state;
573 	int ret = 0;
574 
575 	t7xx_dev = pci_get_drvdata(pdev);
576 	if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
577 		iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
578 		return 0;
579 	}
580 
581 	t7xx_pcie_mac_interrupts_en(t7xx_dev);
582 	prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
583 
584 	if (state_check) {
585 		/* For D3/L3 resume, the device could boot so quickly that the
586 		 * initial value of the dummy register might be overwritten.
587 		 * Identify new boots if the ATR source address register is not initialized.
588 		 */
589 		u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
590 					   ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
591 		if (prev_state == PM_RESUME_REG_STATE_L3 ||
592 		    (prev_state == PM_RESUME_REG_STATE_INIT &&
593 		     atr_reg_val == ATR_SRC_ADDR_INVALID)) {
594 			ret = t7xx_pci_reprobe_early(t7xx_dev);
595 			if (ret)
596 				return ret;
597 
598 			return t7xx_pci_reprobe(t7xx_dev, true);
599 		}
600 
601 		if (prev_state == PM_RESUME_REG_STATE_EXP ||
602 		    prev_state == PM_RESUME_REG_STATE_L2_EXP) {
603 			if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
604 				ret = t7xx_pcie_reinit(t7xx_dev, false);
605 				if (ret)
606 					return ret;
607 			}
608 
609 			atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
610 			t7xx_dev->rgu_pci_irq_en = true;
611 			t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
612 
613 			t7xx_mhccif_mask_clr(t7xx_dev,
614 					     D2H_INT_EXCEPTION_INIT |
615 					     D2H_INT_EXCEPTION_INIT_DONE |
616 					     D2H_INT_EXCEPTION_CLEARQ_DONE |
617 					     D2H_INT_EXCEPTION_ALLQ_RESET |
618 					     D2H_INT_PORT_ENUM);
619 
620 			return ret;
621 		}
622 
623 		if (prev_state == PM_RESUME_REG_STATE_L2) {
624 			ret = t7xx_pcie_reinit(t7xx_dev, false);
625 			if (ret)
626 				return ret;
627 
628 		} else if (prev_state != PM_RESUME_REG_STATE_L1 &&
629 			   prev_state != PM_RESUME_REG_STATE_INIT) {
630 			ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
631 			if (ret)
632 				return ret;
633 
634 			t7xx_clear_rgu_irq(t7xx_dev);
635 			atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
636 			return 0;
637 		}
638 	}
639 
640 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
641 	t7xx_wait_pm_config(t7xx_dev);
642 
643 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
644 		if (entity->resume_early)
645 			entity->resume_early(t7xx_dev, entity->entity_param);
646 	}
647 
648 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
649 	if (ret)
650 		dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
651 
652 	ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
653 	if (ret)
654 		dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
655 
656 	list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
657 		if (entity->resume) {
658 			ret = entity->resume(t7xx_dev, entity->entity_param);
659 			if (ret)
660 				dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
661 					entity->id, ret);
662 		}
663 	}
664 
665 	t7xx_dev->rgu_pci_irq_en = true;
666 	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
667 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
668 	pm_runtime_mark_last_busy(&pdev->dev);
669 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
670 
671 	return ret;
672 }
673 
t7xx_pci_pm_resume_noirq(struct device * dev)674 static int t7xx_pci_pm_resume_noirq(struct device *dev)
675 {
676 	struct pci_dev *pdev = to_pci_dev(dev);
677 	struct t7xx_pci_dev *t7xx_dev;
678 
679 	t7xx_dev = pci_get_drvdata(pdev);
680 	t7xx_pcie_mac_interrupts_dis(t7xx_dev);
681 
682 	return 0;
683 }
684 
t7xx_pci_shutdown(struct pci_dev * pdev)685 static void t7xx_pci_shutdown(struct pci_dev *pdev)
686 {
687 	__t7xx_pci_pm_suspend(pdev);
688 }
689 
t7xx_pci_pm_prepare(struct device * dev)690 static int t7xx_pci_pm_prepare(struct device *dev)
691 {
692 	struct pci_dev *pdev = to_pci_dev(dev);
693 	struct t7xx_pci_dev *t7xx_dev;
694 
695 	t7xx_dev = pci_get_drvdata(pdev);
696 	if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
697 		dev_warn(dev, "Not ready for system sleep.\n");
698 		return -ETIMEDOUT;
699 	}
700 
701 	return 0;
702 }
703 
t7xx_pci_pm_suspend(struct device * dev)704 static int t7xx_pci_pm_suspend(struct device *dev)
705 {
706 	return __t7xx_pci_pm_suspend(to_pci_dev(dev));
707 }
708 
t7xx_pci_pm_resume(struct device * dev)709 static int t7xx_pci_pm_resume(struct device *dev)
710 {
711 	return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
712 }
713 
t7xx_pci_pm_thaw(struct device * dev)714 static int t7xx_pci_pm_thaw(struct device *dev)
715 {
716 	return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
717 }
718 
t7xx_pci_pm_runtime_suspend(struct device * dev)719 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
720 {
721 	return __t7xx_pci_pm_suspend(to_pci_dev(dev));
722 }
723 
t7xx_pci_pm_runtime_resume(struct device * dev)724 static int t7xx_pci_pm_runtime_resume(struct device *dev)
725 {
726 	return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
727 }
728 
729 static const struct dev_pm_ops t7xx_pci_pm_ops = {
730 	.prepare = t7xx_pci_pm_prepare,
731 	.suspend = t7xx_pci_pm_suspend,
732 	.resume = t7xx_pci_pm_resume,
733 	.resume_noirq = t7xx_pci_pm_resume_noirq,
734 	.freeze = t7xx_pci_pm_suspend,
735 	.thaw = t7xx_pci_pm_thaw,
736 	.poweroff = t7xx_pci_pm_suspend,
737 	.restore = t7xx_pci_pm_resume,
738 	.restore_noirq = t7xx_pci_pm_resume_noirq,
739 	.runtime_suspend = t7xx_pci_pm_runtime_suspend,
740 	.runtime_resume = t7xx_pci_pm_runtime_resume
741 };
742 
t7xx_request_irq(struct pci_dev * pdev)743 static int t7xx_request_irq(struct pci_dev *pdev)
744 {
745 	struct t7xx_pci_dev *t7xx_dev;
746 	int ret = 0, i;
747 
748 	t7xx_dev = pci_get_drvdata(pdev);
749 
750 	for (i = 0; i < EXT_INT_NUM; i++) {
751 		const char *irq_descr;
752 		int irq_vec;
753 
754 		if (!t7xx_dev->intr_handler[i])
755 			continue;
756 
757 		irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
758 					   dev_driver_string(&pdev->dev), i);
759 		if (!irq_descr) {
760 			ret = -ENOMEM;
761 			break;
762 		}
763 
764 		irq_vec = pci_irq_vector(pdev, i);
765 		ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
766 					   t7xx_dev->intr_thread[i], 0, irq_descr,
767 					   t7xx_dev->callback_param[i]);
768 		if (ret) {
769 			dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
770 			break;
771 		}
772 	}
773 
774 	if (ret) {
775 		while (i--) {
776 			if (!t7xx_dev->intr_handler[i])
777 				continue;
778 
779 			free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
780 		}
781 	}
782 
783 	return ret;
784 }
785 
t7xx_setup_msix(struct t7xx_pci_dev * t7xx_dev)786 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
787 {
788 	struct pci_dev *pdev = t7xx_dev->pdev;
789 	int ret;
790 
791 	/* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
792 	ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
793 	if (ret < 0) {
794 		dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
795 		return ret;
796 	}
797 
798 	ret = t7xx_request_irq(pdev);
799 	if (ret) {
800 		pci_free_irq_vectors(pdev);
801 		return ret;
802 	}
803 
804 	t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
805 	return 0;
806 }
807 
t7xx_interrupt_init(struct t7xx_pci_dev * t7xx_dev)808 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
809 {
810 	int ret, i;
811 
812 	if (!t7xx_dev->pdev->msix_cap)
813 		return -EINVAL;
814 
815 	ret = t7xx_setup_msix(t7xx_dev);
816 	if (ret)
817 		return ret;
818 
819 	/* IPs enable interrupts when ready */
820 	for (i = 0; i < EXT_INT_NUM; i++)
821 		t7xx_pcie_mac_set_int(t7xx_dev, i);
822 
823 	return 0;
824 }
825 
t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev * t7xx_dev)826 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
827 {
828 	t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
829 					      INFRACFG_AO_DEV_CHIP -
830 					      t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
831 }
832 
t7xx_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)833 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
834 {
835 	struct t7xx_pci_dev *t7xx_dev;
836 	int ret;
837 
838 	t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
839 	if (!t7xx_dev)
840 		return -ENOMEM;
841 
842 	pci_set_drvdata(pdev, t7xx_dev);
843 	t7xx_dev->pdev = pdev;
844 
845 	ret = pcim_enable_device(pdev);
846 	if (ret)
847 		return ret;
848 
849 	pci_set_master(pdev);
850 
851 	ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
852 				 pci_name(pdev));
853 	if (ret) {
854 		dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
855 		return -ENOMEM;
856 	}
857 
858 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
859 	if (ret) {
860 		dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
861 		return ret;
862 	}
863 
864 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
865 	if (ret) {
866 		dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
867 		return ret;
868 	}
869 
870 	IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
871 	t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
872 
873 	ret = t7xx_pci_pm_init(t7xx_dev);
874 	if (ret)
875 		return ret;
876 
877 	t7xx_pcie_mac_atr_init(t7xx_dev);
878 	t7xx_pci_infracfg_ao_calc(t7xx_dev);
879 	t7xx_mhccif_init(t7xx_dev);
880 
881 	ret = t7xx_md_init(t7xx_dev);
882 	if (ret)
883 		return ret;
884 
885 	t7xx_pcie_mac_interrupts_dis(t7xx_dev);
886 
887 	ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
888 				 &t7xx_attribute_group);
889 	if (ret)
890 		goto err_md_exit;
891 
892 	ret = t7xx_interrupt_init(t7xx_dev);
893 	if (ret)
894 		goto err_remove_group;
895 
896 
897 	t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
898 	t7xx_pcie_mac_interrupts_en(t7xx_dev);
899 
900 	return 0;
901 
902 err_remove_group:
903 	sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
904 			   &t7xx_attribute_group);
905 
906 err_md_exit:
907 	t7xx_md_exit(t7xx_dev);
908 	return ret;
909 }
910 
t7xx_pci_remove(struct pci_dev * pdev)911 static void t7xx_pci_remove(struct pci_dev *pdev)
912 {
913 	struct t7xx_pci_dev *t7xx_dev;
914 	int i;
915 
916 	t7xx_dev = pci_get_drvdata(pdev);
917 
918 	sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
919 			   &t7xx_attribute_group);
920 	t7xx_md_exit(t7xx_dev);
921 
922 	for (i = 0; i < EXT_INT_NUM; i++) {
923 		if (!t7xx_dev->intr_handler[i])
924 			continue;
925 
926 		free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
927 	}
928 
929 	pci_free_irq_vectors(t7xx_dev->pdev);
930 }
931 
932 static const struct pci_device_id t7xx_pci_table[] = {
933 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
934 	{ PCI_DEVICE(0x14c0, 0x4d75) }, // Dell DW5933e
935 	{ }
936 };
937 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
938 
939 static struct pci_driver t7xx_pci_driver = {
940 	.name = "mtk_t7xx",
941 	.id_table = t7xx_pci_table,
942 	.probe = t7xx_pci_probe,
943 	.remove = t7xx_pci_remove,
944 	.driver.pm = &t7xx_pci_pm_ops,
945 	.shutdown = t7xx_pci_shutdown,
946 };
947 
948 module_pci_driver(t7xx_pci_driver);
949 
950 MODULE_AUTHOR("MediaTek Inc");
951 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
952 MODULE_LICENSE("GPL");
953