xref: /linux/drivers/bus/mhi/host/pci_generic.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MHI PCI driver - MHI over PCI controller driver
4  *
5  * This module is a generic driver for registering MHI-over-PCI devices,
6  * such as PCIe QCOM modems.
7  *
8  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
19 
20 #define MHI_PCI_DEFAULT_BAR_NUM 0
21 
22 #define MHI_POST_RESET_DELAY_MS 2000
23 
24 #define HEALTH_CHECK_PERIOD (HZ * 2)
25 
26 /* PCI VID definitions */
27 #define PCI_VENDOR_ID_THALES	0x1269
28 #define PCI_VENDOR_ID_QUECTEL	0x1eac
29 
30 #define MHI_EDL_DB			91
31 #define MHI_EDL_COOKIE			0xEDEDEDED
32 
33 /**
34  * struct mhi_pci_dev_info - MHI PCI device specific information
35  * @config: MHI controller configuration
36  * @name: name of the PCI module
37  * @fw: firmware path (if any)
38  * @edl: emergency download mode firmware path (if any)
39  * @edl_trigger: capable of triggering EDL mode in the device (if supported)
40  * @bar_num: PCI base address register to use for MHI MMIO register space
41  * @dma_data_width: DMA transfer word size (32 or 64 bits)
42  * @mru_default: default MRU size for MBIM network packets
43  * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
44  *		   of inband wake support (such as sdx24)
45  */
46 struct mhi_pci_dev_info {
47 	const struct mhi_controller_config *config;
48 	const char *name;
49 	const char *fw;
50 	const char *edl;
51 	bool edl_trigger;
52 	unsigned int bar_num;
53 	unsigned int dma_data_width;
54 	unsigned int mru_default;
55 	bool sideband_wake;
56 };
57 
58 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
59 	{						\
60 		.num = ch_num,				\
61 		.name = ch_name,			\
62 		.num_elements = el_count,		\
63 		.event_ring = ev_ring,			\
64 		.dir = DMA_TO_DEVICE,			\
65 		.ee_mask = BIT(MHI_EE_AMSS),		\
66 		.pollcfg = 0,				\
67 		.doorbell = MHI_DB_BRST_DISABLE,	\
68 		.lpm_notify = false,			\
69 		.offload_channel = false,		\
70 		.doorbell_mode_switch = false,		\
71 	}						\
72 
73 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
74 	{						\
75 		.num = ch_num,				\
76 		.name = ch_name,			\
77 		.num_elements = el_count,		\
78 		.event_ring = ev_ring,			\
79 		.dir = DMA_FROM_DEVICE,			\
80 		.ee_mask = BIT(MHI_EE_AMSS),		\
81 		.pollcfg = 0,				\
82 		.doorbell = MHI_DB_BRST_DISABLE,	\
83 		.lpm_notify = false,			\
84 		.offload_channel = false,		\
85 		.doorbell_mode_switch = false,		\
86 	}
87 
88 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
89 	{						\
90 		.num = ch_num,				\
91 		.name = ch_name,			\
92 		.num_elements = el_count,		\
93 		.event_ring = ev_ring,			\
94 		.dir = DMA_FROM_DEVICE,			\
95 		.ee_mask = BIT(MHI_EE_AMSS),		\
96 		.pollcfg = 0,				\
97 		.doorbell = MHI_DB_BRST_DISABLE,	\
98 		.lpm_notify = false,			\
99 		.offload_channel = false,		\
100 		.doorbell_mode_switch = false,		\
101 		.auto_queue = true,			\
102 	}
103 
104 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
105 	{					\
106 		.num_elements = el_count,	\
107 		.irq_moderation_ms = 0,		\
108 		.irq = (ev_ring) + 1,		\
109 		.priority = 1,			\
110 		.mode = MHI_DB_BRST_DISABLE,	\
111 		.data_type = MHI_ER_CTRL,	\
112 		.hardware_event = false,	\
113 		.client_managed = false,	\
114 		.offload_channel = false,	\
115 	}
116 
117 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
118 	{						\
119 		.num = ch_num,				\
120 		.name = ch_name,			\
121 		.num_elements = el_count,		\
122 		.event_ring = ev_ring,			\
123 		.dir = DMA_TO_DEVICE,			\
124 		.ee_mask = BIT(MHI_EE_AMSS),		\
125 		.pollcfg = 0,				\
126 		.doorbell = MHI_DB_BRST_ENABLE,	\
127 		.lpm_notify = false,			\
128 		.offload_channel = false,		\
129 		.doorbell_mode_switch = true,		\
130 	}						\
131 
132 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
133 	{						\
134 		.num = ch_num,				\
135 		.name = ch_name,			\
136 		.num_elements = el_count,		\
137 		.event_ring = ev_ring,			\
138 		.dir = DMA_FROM_DEVICE,			\
139 		.ee_mask = BIT(MHI_EE_AMSS),		\
140 		.pollcfg = 0,				\
141 		.doorbell = MHI_DB_BRST_ENABLE,	\
142 		.lpm_notify = false,			\
143 		.offload_channel = false,		\
144 		.doorbell_mode_switch = true,		\
145 	}
146 
147 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
148 	{						\
149 		.num = ch_num,				\
150 		.name = ch_name,			\
151 		.num_elements = el_count,		\
152 		.event_ring = ev_ring,			\
153 		.dir = DMA_TO_DEVICE,			\
154 		.ee_mask = BIT(MHI_EE_SBL),		\
155 		.pollcfg = 0,				\
156 		.doorbell = MHI_DB_BRST_DISABLE,	\
157 		.lpm_notify = false,			\
158 		.offload_channel = false,		\
159 		.doorbell_mode_switch = false,		\
160 	}						\
161 
162 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
163 	{						\
164 		.num = ch_num,				\
165 		.name = ch_name,			\
166 		.num_elements = el_count,		\
167 		.event_ring = ev_ring,			\
168 		.dir = DMA_FROM_DEVICE,			\
169 		.ee_mask = BIT(MHI_EE_SBL),		\
170 		.pollcfg = 0,				\
171 		.doorbell = MHI_DB_BRST_DISABLE,	\
172 		.lpm_notify = false,			\
173 		.offload_channel = false,		\
174 		.doorbell_mode_switch = false,		\
175 	}
176 
177 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
178 	{						\
179 		.num = ch_num,				\
180 		.name = ch_name,			\
181 		.num_elements = el_count,		\
182 		.event_ring = ev_ring,			\
183 		.dir = DMA_TO_DEVICE,			\
184 		.ee_mask = BIT(MHI_EE_FP),		\
185 		.pollcfg = 0,				\
186 		.doorbell = MHI_DB_BRST_DISABLE,	\
187 		.lpm_notify = false,			\
188 		.offload_channel = false,		\
189 		.doorbell_mode_switch = false,		\
190 	}						\
191 
192 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
193 	{						\
194 		.num = ch_num,				\
195 		.name = ch_name,			\
196 		.num_elements = el_count,		\
197 		.event_ring = ev_ring,			\
198 		.dir = DMA_FROM_DEVICE,			\
199 		.ee_mask = BIT(MHI_EE_FP),		\
200 		.pollcfg = 0,				\
201 		.doorbell = MHI_DB_BRST_DISABLE,	\
202 		.lpm_notify = false,			\
203 		.offload_channel = false,		\
204 		.doorbell_mode_switch = false,		\
205 	}
206 
207 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
208 	{					\
209 		.num_elements = el_count,	\
210 		.irq_moderation_ms = 5,		\
211 		.irq = (ev_ring) + 1,		\
212 		.priority = 1,			\
213 		.mode = MHI_DB_BRST_DISABLE,	\
214 		.data_type = MHI_ER_DATA,	\
215 		.hardware_event = false,	\
216 		.client_managed = false,	\
217 		.offload_channel = false,	\
218 	}
219 
220 #define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
221 	{					\
222 		.num_elements = el_count,	\
223 		.irq_moderation_ms = 0,		\
224 		.irq = (ev_ring) + 1,		\
225 		.priority = 1,			\
226 		.mode = MHI_DB_BRST_DISABLE,	\
227 		.data_type = MHI_ER_DATA,	\
228 		.hardware_event = false,	\
229 		.client_managed = false,	\
230 		.offload_channel = false,	\
231 	}
232 
233 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
234 	{					\
235 		.num_elements = el_count,	\
236 		.irq_moderation_ms = 1,		\
237 		.irq = (ev_ring) + 1,		\
238 		.priority = 1,			\
239 		.mode = MHI_DB_BRST_DISABLE,	\
240 		.data_type = MHI_ER_DATA,	\
241 		.hardware_event = true,		\
242 		.client_managed = false,	\
243 		.offload_channel = false,	\
244 		.channel = ch_num,		\
245 	}
246 
247 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
248 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
249 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
250 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
251 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
252 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
253 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
254 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
255 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
256 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
257 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
258 	MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
259 	MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
260 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
261 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
262 };
263 
264 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
265 	/* first ring is control+data ring */
266 	MHI_EVENT_CONFIG_CTRL(0, 64),
267 	/* DIAG dedicated event ring */
268 	MHI_EVENT_CONFIG_DATA(1, 128),
269 	/* Software channels dedicated event ring */
270 	MHI_EVENT_CONFIG_SW_DATA(2, 64),
271 	MHI_EVENT_CONFIG_SW_DATA(3, 64),
272 	/* Hardware channels request dedicated hardware event rings */
273 	MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
274 	MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
275 };
276 
277 static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
278 	.max_channels = 128,
279 	.timeout_ms = 8000,
280 	.ready_timeout_ms = 50000,
281 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
282 	.ch_cfg = modem_qcom_v1_mhi_channels,
283 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
284 	.event_cfg = modem_qcom_v1_mhi_events,
285 };
286 
287 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
288 	.max_channels = 128,
289 	.timeout_ms = 8000,
290 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
291 	.ch_cfg = modem_qcom_v1_mhi_channels,
292 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
293 	.event_cfg = modem_qcom_v1_mhi_events,
294 };
295 
296 static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
297 	.name = "qcom-sdx75m",
298 	.fw = "qcom/sdx75m/xbl.elf",
299 	.edl = "qcom/sdx75m/edl.mbn",
300 	.edl_trigger = true,
301 	.config = &modem_qcom_v2_mhiv_config,
302 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
303 	.dma_data_width = 32,
304 	.sideband_wake = false,
305 };
306 
307 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
308 	.name = "qcom-sdx65m",
309 	.fw = "qcom/sdx65m/xbl.elf",
310 	.edl = "qcom/sdx65m/edl.mbn",
311 	.edl_trigger = true,
312 	.config = &modem_qcom_v1_mhiv_config,
313 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
314 	.dma_data_width = 32,
315 	.sideband_wake = false,
316 };
317 
318 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
319 	.name = "qcom-sdx55m",
320 	.fw = "qcom/sdx55m/sbl1.mbn",
321 	.edl = "qcom/sdx55m/edl.mbn",
322 	.edl_trigger = true,
323 	.config = &modem_qcom_v1_mhiv_config,
324 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
325 	.dma_data_width = 32,
326 	.mru_default = 32768,
327 	.sideband_wake = false,
328 };
329 
330 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
331 	.name = "qcom-sdx24",
332 	.edl = "qcom/prog_firehose_sdx24.mbn",
333 	.config = &modem_qcom_v1_mhiv_config,
334 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
335 	.dma_data_width = 32,
336 	.sideband_wake = true,
337 };
338 
339 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
340 	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
341 	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
342 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
343 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
344 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
345 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
346 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
347 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
348 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
349 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
350 	/* The EDL firmware is a flash-programmer exposing firehose protocol */
351 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
352 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
353 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
354 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
355 };
356 
357 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
358 	MHI_EVENT_CONFIG_CTRL(0, 128),
359 	MHI_EVENT_CONFIG_DATA(1, 128),
360 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
361 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
362 };
363 
364 static const struct mhi_controller_config modem_quectel_em1xx_config = {
365 	.max_channels = 128,
366 	.timeout_ms = 20000,
367 	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
368 	.ch_cfg = mhi_quectel_em1xx_channels,
369 	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
370 	.event_cfg = mhi_quectel_em1xx_events,
371 };
372 
373 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
374 	.name = "quectel-em1xx",
375 	.edl = "qcom/prog_firehose_sdx24.mbn",
376 	.config = &modem_quectel_em1xx_config,
377 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
378 	.dma_data_width = 32,
379 	.mru_default = 32768,
380 	.sideband_wake = true,
381 };
382 
383 static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
384 	.name = "quectel-rm5xx",
385 	.edl = "qcom/prog_firehose_sdx6x.elf",
386 	.config = &modem_quectel_em1xx_config,
387 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
388 	.dma_data_width = 32,
389 	.mru_default = 32768,
390 	.sideband_wake = true,
391 };
392 
393 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
394 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
395 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
396 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
397 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
398 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
399 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
400 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
401 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
402 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
403 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
404 };
405 
406 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
407 	MHI_EVENT_CONFIG_CTRL(0, 128),
408 	MHI_EVENT_CONFIG_DATA(1, 128),
409 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
410 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
411 };
412 
413 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
414 	.max_channels = 128,
415 	.timeout_ms = 20000,
416 	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
417 	.ch_cfg = mhi_foxconn_sdx55_channels,
418 	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
419 	.event_cfg = mhi_foxconn_sdx55_events,
420 };
421 
422 static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
423 	.name = "foxconn-sdx24",
424 	.config = &modem_foxconn_sdx55_config,
425 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
426 	.dma_data_width = 32,
427 	.mru_default = 32768,
428 	.sideband_wake = false,
429 };
430 
431 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
432 	.name = "foxconn-sdx55",
433 	.fw = "qcom/sdx55m/sbl1.mbn",
434 	.edl = "qcom/sdx55m/edl.mbn",
435 	.config = &modem_foxconn_sdx55_config,
436 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
437 	.dma_data_width = 32,
438 	.mru_default = 32768,
439 	.sideband_wake = false,
440 };
441 
442 static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
443 	.name = "foxconn-sdx65",
444 	.config = &modem_foxconn_sdx55_config,
445 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
446 	.dma_data_width = 32,
447 	.mru_default = 32768,
448 	.sideband_wake = false,
449 };
450 
451 static const struct mhi_channel_config mhi_mv3x_channels[] = {
452 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
453 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
454 	/* MBIM Control Channel */
455 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
456 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
457 	/* MBIM Data Channel */
458 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
459 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
460 };
461 
462 static struct mhi_event_config mhi_mv3x_events[] = {
463 	MHI_EVENT_CONFIG_CTRL(0, 256),
464 	MHI_EVENT_CONFIG_DATA(1, 256),
465 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
466 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
467 };
468 
469 static const struct mhi_controller_config modem_mv3x_config = {
470 	.max_channels = 128,
471 	.timeout_ms = 20000,
472 	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
473 	.ch_cfg = mhi_mv3x_channels,
474 	.num_events = ARRAY_SIZE(mhi_mv3x_events),
475 	.event_cfg = mhi_mv3x_events,
476 };
477 
478 static const struct mhi_pci_dev_info mhi_mv31_info = {
479 	.name = "cinterion-mv31",
480 	.config = &modem_mv3x_config,
481 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
482 	.dma_data_width = 32,
483 	.mru_default = 32768,
484 };
485 
486 static const struct mhi_pci_dev_info mhi_mv32_info = {
487 	.name = "cinterion-mv32",
488 	.config = &modem_mv3x_config,
489 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
490 	.dma_data_width = 32,
491 	.mru_default = 32768,
492 };
493 
494 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
495 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
496 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
497 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
498 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
499 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
500 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
501 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
502 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
503 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
504 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
505 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
506 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
507 };
508 
509 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
510 	/* first ring is control+data and DIAG ring */
511 	MHI_EVENT_CONFIG_CTRL(0, 2048),
512 	/* Hardware channels request dedicated hardware event rings */
513 	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
514 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
515 };
516 
517 static const struct mhi_controller_config modem_sierra_em919x_config = {
518 	.max_channels = 128,
519 	.timeout_ms = 24000,
520 	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
521 	.ch_cfg = mhi_sierra_em919x_channels,
522 	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
523 	.event_cfg = modem_sierra_em919x_mhi_events,
524 };
525 
526 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
527 	.name = "sierra-em919x",
528 	.config = &modem_sierra_em919x_config,
529 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
530 	.dma_data_width = 32,
531 	.sideband_wake = false,
532 };
533 
534 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
535 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
536 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
537 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
538 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
539 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
540 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
541 };
542 
543 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
544 	MHI_EVENT_CONFIG_CTRL(0, 128),
545 	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
546 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
547 };
548 
549 static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
550 	.max_channels = 128,
551 	.timeout_ms = 20000,
552 	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
553 	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
554 	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
555 	.event_cfg = mhi_telit_fn980_hw_v1_events,
556 };
557 
558 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
559 	.name = "telit-fn980-hwv1",
560 	.fw = "qcom/sdx55m/sbl1.mbn",
561 	.edl = "qcom/sdx55m/edl.mbn",
562 	.config = &modem_telit_fn980_hw_v1_config,
563 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
564 	.dma_data_width = 32,
565 	.mru_default = 32768,
566 	.sideband_wake = false,
567 };
568 
569 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
570 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
571 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
572 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
573 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
574 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
575 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
576 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
577 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
578 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
579 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
580 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
581 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
582 };
583 
584 static struct mhi_event_config mhi_telit_fn990_events[] = {
585 	MHI_EVENT_CONFIG_CTRL(0, 128),
586 	MHI_EVENT_CONFIG_DATA(1, 128),
587 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
588 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
589 };
590 
591 static const struct mhi_controller_config modem_telit_fn990_config = {
592 	.max_channels = 128,
593 	.timeout_ms = 20000,
594 	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
595 	.ch_cfg = mhi_telit_fn990_channels,
596 	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
597 	.event_cfg = mhi_telit_fn990_events,
598 };
599 
600 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
601 	.name = "telit-fn990",
602 	.config = &modem_telit_fn990_config,
603 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
604 	.dma_data_width = 32,
605 	.sideband_wake = false,
606 	.mru_default = 32768,
607 };
608 
609 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
610 static const struct pci_device_id mhi_pci_id_table[] = {
611 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
612 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
613 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
614 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
615 	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
616 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
617 		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
618 	/* Telit FN980 hardware revision v1 */
619 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
620 		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
621 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
622 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
623 	/* Telit FN990 */
624 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
625 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
626 	/* Telit FE990 */
627 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
628 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
629 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
630 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
631 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
632 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
633 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
634 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
635 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
636 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
637 	/* RM520N-GL (sdx6x), eSIM */
638 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
639 		.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
640 	/* RM520N-GL (sdx6x), Lenovo variant */
641 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
642 		.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
643 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
644 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
645 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
646 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
647 	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
648 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
649 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
650 	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
651 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
652 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
653 	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
654 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
655 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
656 	/* T99W175 (sdx55), Based on Qualcomm new baseline */
657 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
658 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
659 	/* T99W175 (sdx55) */
660 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
661 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
662 	/* T99W368 (sdx65) */
663 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
664 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
665 	/* T99W373 (sdx62) */
666 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
667 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
668 	/* T99W510 (sdx24), variant 1 */
669 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
670 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
671 	/* T99W510 (sdx24), variant 2 */
672 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
673 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
674 	/* T99W510 (sdx24), variant 3 */
675 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
676 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
677 	/* DW5932e-eSIM (sdx62), With eSIM */
678 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
679 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
680 	/* DW5932e (sdx62), Non-eSIM */
681 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
682 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
683 	/* MV31-W (Cinterion) */
684 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
685 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
686 	/* MV31-W (Cinterion), based on new baseline */
687 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
688 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
689 	/* MV32-WA (Cinterion) */
690 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
691 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
692 	/* MV32-WB (Cinterion) */
693 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
694 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
695 	/* T99W175 (sdx55), HP variant */
696 	{ PCI_DEVICE(0x03f0, 0x0a6c),
697 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
698 	{  }
699 };
700 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
701 
702 enum mhi_pci_device_status {
703 	MHI_PCI_DEV_STARTED,
704 	MHI_PCI_DEV_SUSPENDED,
705 };
706 
707 struct mhi_pci_device {
708 	struct mhi_controller mhi_cntrl;
709 	struct pci_saved_state *pci_state;
710 	struct work_struct recovery_work;
711 	struct timer_list health_check_timer;
712 	unsigned long status;
713 };
714 
715 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
716 			    void __iomem *addr, u32 *out)
717 {
718 	*out = readl(addr);
719 	return 0;
720 }
721 
722 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
723 			      void __iomem *addr, u32 val)
724 {
725 	writel(val, addr);
726 }
727 
728 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
729 			      enum mhi_callback cb)
730 {
731 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
732 
733 	/* Nothing to do for now */
734 	switch (cb) {
735 	case MHI_CB_FATAL_ERROR:
736 	case MHI_CB_SYS_ERROR:
737 		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
738 		pm_runtime_forbid(&pdev->dev);
739 		break;
740 	case MHI_CB_EE_MISSION_MODE:
741 		pm_runtime_allow(&pdev->dev);
742 		break;
743 	default:
744 		break;
745 	}
746 }
747 
748 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
749 {
750 	/* no-op */
751 }
752 
753 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
754 {
755 	/* no-op */
756 }
757 
758 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
759 {
760 	/* no-op */
761 }
762 
763 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
764 {
765 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
766 	u16 vendor = 0;
767 
768 	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
769 		return false;
770 
771 	if (vendor == (u16) ~0 || vendor == 0)
772 		return false;
773 
774 	return true;
775 }
776 
777 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
778 			 unsigned int bar_num, u64 dma_mask)
779 {
780 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
781 	int err;
782 
783 	err = pci_assign_resource(pdev, bar_num);
784 	if (err)
785 		return err;
786 
787 	err = pcim_enable_device(pdev);
788 	if (err) {
789 		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
790 		return err;
791 	}
792 
793 	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
794 	if (err) {
795 		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
796 		return err;
797 	}
798 	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
799 	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
800 
801 	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
802 	if (err) {
803 		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
804 		return err;
805 	}
806 
807 	pci_set_master(pdev);
808 
809 	return 0;
810 }
811 
812 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
813 			    const struct mhi_controller_config *mhi_cntrl_config)
814 {
815 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
816 	int nr_vectors, i;
817 	int *irq;
818 
819 	/*
820 	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
821 	 * No explicit pci_free_irq_vectors required, done by pcim_release.
822 	 */
823 	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
824 
825 	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
826 	if (nr_vectors < 0) {
827 		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
828 			nr_vectors);
829 		return nr_vectors;
830 	}
831 
832 	if (nr_vectors < mhi_cntrl->nr_irqs) {
833 		dev_warn(&pdev->dev, "using shared MSI\n");
834 
835 		/* Patch msi vectors, use only one (shared) */
836 		for (i = 0; i < mhi_cntrl_config->num_events; i++)
837 			mhi_cntrl_config->event_cfg[i].irq = 0;
838 		mhi_cntrl->nr_irqs = 1;
839 	}
840 
841 	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
842 	if (!irq)
843 		return -ENOMEM;
844 
845 	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
846 		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
847 
848 		irq[i] = pci_irq_vector(pdev, vector);
849 	}
850 
851 	mhi_cntrl->irq = irq;
852 
853 	return 0;
854 }
855 
856 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
857 {
858 	/* The runtime_get() MHI callback means:
859 	 *    Do whatever is requested to leave M3.
860 	 */
861 	return pm_runtime_get(mhi_cntrl->cntrl_dev);
862 }
863 
864 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
865 {
866 	/* The runtime_put() MHI callback means:
867 	 *    Device can be moved in M3 state.
868 	 */
869 	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
870 	pm_runtime_put(mhi_cntrl->cntrl_dev);
871 }
872 
873 static void mhi_pci_recovery_work(struct work_struct *work)
874 {
875 	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
876 						       recovery_work);
877 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
878 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
879 	int err;
880 
881 	dev_warn(&pdev->dev, "device recovery started\n");
882 
883 	del_timer(&mhi_pdev->health_check_timer);
884 	pm_runtime_forbid(&pdev->dev);
885 
886 	/* Clean up MHI state */
887 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
888 		mhi_power_down(mhi_cntrl, false);
889 		mhi_unprepare_after_power_down(mhi_cntrl);
890 	}
891 
892 	pci_set_power_state(pdev, PCI_D0);
893 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
894 	pci_restore_state(pdev);
895 
896 	if (!mhi_pci_is_alive(mhi_cntrl))
897 		goto err_try_reset;
898 
899 	err = mhi_prepare_for_power_up(mhi_cntrl);
900 	if (err)
901 		goto err_try_reset;
902 
903 	err = mhi_sync_power_up(mhi_cntrl);
904 	if (err)
905 		goto err_unprepare;
906 
907 	dev_dbg(&pdev->dev, "Recovery completed\n");
908 
909 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
910 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
911 	return;
912 
913 err_unprepare:
914 	mhi_unprepare_after_power_down(mhi_cntrl);
915 err_try_reset:
916 	if (pci_reset_function(pdev))
917 		dev_err(&pdev->dev, "Recovery failed\n");
918 }
919 
920 static void health_check(struct timer_list *t)
921 {
922 	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
923 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
924 
925 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
926 			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
927 		return;
928 
929 	if (!mhi_pci_is_alive(mhi_cntrl)) {
930 		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
931 		queue_work(system_long_wq, &mhi_pdev->recovery_work);
932 		return;
933 	}
934 
935 	/* reschedule in two seconds */
936 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
937 }
938 
939 static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl)
940 {
941 	void __iomem *base = mhi_cntrl->regs;
942 	void __iomem *edl_db;
943 	int ret;
944 	u32 val;
945 
946 	ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
947 	if (ret) {
948 		dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n");
949 		return ret;
950 	}
951 
952 	pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
953 	mhi_cntrl->runtime_get(mhi_cntrl);
954 
955 	ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
956 	if (ret)
957 		goto err_get_chdb;
958 
959 	edl_db = base + val + (8 * MHI_EDL_DB);
960 
961 	mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE));
962 	mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE));
963 
964 	mhi_soc_reset(mhi_cntrl);
965 
966 err_get_chdb:
967 	mhi_cntrl->runtime_put(mhi_cntrl);
968 	mhi_device_put(mhi_cntrl->mhi_dev);
969 
970 	return ret;
971 }
972 
973 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
974 {
975 	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
976 	const struct mhi_controller_config *mhi_cntrl_config;
977 	struct mhi_pci_device *mhi_pdev;
978 	struct mhi_controller *mhi_cntrl;
979 	int err;
980 
981 	dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
982 
983 	/* mhi_pdev.mhi_cntrl must be zero-initialized */
984 	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
985 	if (!mhi_pdev)
986 		return -ENOMEM;
987 
988 	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
989 	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
990 
991 	mhi_cntrl_config = info->config;
992 	mhi_cntrl = &mhi_pdev->mhi_cntrl;
993 
994 	mhi_cntrl->cntrl_dev = &pdev->dev;
995 	mhi_cntrl->iova_start = 0;
996 	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
997 	mhi_cntrl->fw_image = info->fw;
998 	mhi_cntrl->edl_image = info->edl;
999 
1000 	mhi_cntrl->read_reg = mhi_pci_read_reg;
1001 	mhi_cntrl->write_reg = mhi_pci_write_reg;
1002 	mhi_cntrl->status_cb = mhi_pci_status_cb;
1003 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
1004 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
1005 	mhi_cntrl->mru = info->mru_default;
1006 
1007 	if (info->edl_trigger)
1008 		mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
1009 
1010 	if (info->sideband_wake) {
1011 		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
1012 		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
1013 		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
1014 	}
1015 
1016 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
1017 	if (err)
1018 		return err;
1019 
1020 	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
1021 	if (err)
1022 		return err;
1023 
1024 	pci_set_drvdata(pdev, mhi_pdev);
1025 
1026 	/* Have stored pci confspace at hand for restore in sudden PCI error.
1027 	 * cache the state locally and discard the PCI core one.
1028 	 */
1029 	pci_save_state(pdev);
1030 	mhi_pdev->pci_state = pci_store_saved_state(pdev);
1031 	pci_load_saved_state(pdev, NULL);
1032 
1033 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
1034 	if (err)
1035 		return err;
1036 
1037 	/* MHI bus does not power up the controller by default */
1038 	err = mhi_prepare_for_power_up(mhi_cntrl);
1039 	if (err) {
1040 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1041 		goto err_unregister;
1042 	}
1043 
1044 	err = mhi_sync_power_up(mhi_cntrl);
1045 	if (err) {
1046 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1047 		goto err_unprepare;
1048 	}
1049 
1050 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1051 
1052 	/* start health check */
1053 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1054 
1055 	/* Only allow runtime-suspend if PME capable (for wakeup) */
1056 	if (pci_pme_capable(pdev, PCI_D3hot)) {
1057 		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
1058 		pm_runtime_use_autosuspend(&pdev->dev);
1059 		pm_runtime_mark_last_busy(&pdev->dev);
1060 		pm_runtime_put_noidle(&pdev->dev);
1061 	}
1062 
1063 	return 0;
1064 
1065 err_unprepare:
1066 	mhi_unprepare_after_power_down(mhi_cntrl);
1067 err_unregister:
1068 	mhi_unregister_controller(mhi_cntrl);
1069 
1070 	return err;
1071 }
1072 
1073 static void mhi_pci_remove(struct pci_dev *pdev)
1074 {
1075 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1076 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1077 
1078 	del_timer_sync(&mhi_pdev->health_check_timer);
1079 	cancel_work_sync(&mhi_pdev->recovery_work);
1080 
1081 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1082 		mhi_power_down(mhi_cntrl, true);
1083 		mhi_unprepare_after_power_down(mhi_cntrl);
1084 	}
1085 
1086 	/* balancing probe put_noidle */
1087 	if (pci_pme_capable(pdev, PCI_D3hot))
1088 		pm_runtime_get_noresume(&pdev->dev);
1089 
1090 	mhi_unregister_controller(mhi_cntrl);
1091 }
1092 
1093 static void mhi_pci_shutdown(struct pci_dev *pdev)
1094 {
1095 	mhi_pci_remove(pdev);
1096 	pci_set_power_state(pdev, PCI_D3hot);
1097 }
1098 
1099 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
1100 {
1101 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1102 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1103 
1104 	dev_info(&pdev->dev, "reset\n");
1105 
1106 	del_timer(&mhi_pdev->health_check_timer);
1107 
1108 	/* Clean up MHI state */
1109 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1110 		mhi_power_down(mhi_cntrl, false);
1111 		mhi_unprepare_after_power_down(mhi_cntrl);
1112 	}
1113 
1114 	/* cause internal device reset */
1115 	mhi_soc_reset(mhi_cntrl);
1116 
1117 	/* Be sure device reset has been executed */
1118 	msleep(MHI_POST_RESET_DELAY_MS);
1119 }
1120 
1121 static void mhi_pci_reset_done(struct pci_dev *pdev)
1122 {
1123 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1124 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1125 	int err;
1126 
1127 	/* Restore initial known working PCI state */
1128 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
1129 	pci_restore_state(pdev);
1130 
1131 	/* Is device status available ? */
1132 	if (!mhi_pci_is_alive(mhi_cntrl)) {
1133 		dev_err(&pdev->dev, "reset failed\n");
1134 		return;
1135 	}
1136 
1137 	err = mhi_prepare_for_power_up(mhi_cntrl);
1138 	if (err) {
1139 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1140 		return;
1141 	}
1142 
1143 	err = mhi_sync_power_up(mhi_cntrl);
1144 	if (err) {
1145 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1146 		mhi_unprepare_after_power_down(mhi_cntrl);
1147 		return;
1148 	}
1149 
1150 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1151 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1152 }
1153 
1154 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1155 					       pci_channel_state_t state)
1156 {
1157 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1158 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1159 
1160 	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1161 
1162 	if (state == pci_channel_io_perm_failure)
1163 		return PCI_ERS_RESULT_DISCONNECT;
1164 
1165 	/* Clean up MHI state */
1166 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1167 		mhi_power_down(mhi_cntrl, false);
1168 		mhi_unprepare_after_power_down(mhi_cntrl);
1169 	} else {
1170 		/* Nothing to do */
1171 		return PCI_ERS_RESULT_RECOVERED;
1172 	}
1173 
1174 	pci_disable_device(pdev);
1175 
1176 	return PCI_ERS_RESULT_NEED_RESET;
1177 }
1178 
1179 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1180 {
1181 	if (pci_enable_device(pdev)) {
1182 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1183 		return PCI_ERS_RESULT_DISCONNECT;
1184 	}
1185 
1186 	return PCI_ERS_RESULT_RECOVERED;
1187 }
1188 
1189 static void mhi_pci_io_resume(struct pci_dev *pdev)
1190 {
1191 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1192 
1193 	dev_err(&pdev->dev, "PCI slot reset done\n");
1194 
1195 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1196 }
1197 
1198 static const struct pci_error_handlers mhi_pci_err_handler = {
1199 	.error_detected = mhi_pci_error_detected,
1200 	.slot_reset = mhi_pci_slot_reset,
1201 	.resume = mhi_pci_io_resume,
1202 	.reset_prepare = mhi_pci_reset_prepare,
1203 	.reset_done = mhi_pci_reset_done,
1204 };
1205 
1206 static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1207 {
1208 	struct pci_dev *pdev = to_pci_dev(dev);
1209 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1210 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1211 	int err;
1212 
1213 	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1214 		return 0;
1215 
1216 	del_timer(&mhi_pdev->health_check_timer);
1217 	cancel_work_sync(&mhi_pdev->recovery_work);
1218 
1219 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1220 			mhi_cntrl->ee != MHI_EE_AMSS)
1221 		goto pci_suspend; /* Nothing to do at MHI level */
1222 
1223 	/* Transition to M3 state */
1224 	err = mhi_pm_suspend(mhi_cntrl);
1225 	if (err) {
1226 		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1227 		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1228 		return -EBUSY;
1229 	}
1230 
1231 pci_suspend:
1232 	pci_disable_device(pdev);
1233 	pci_wake_from_d3(pdev, true);
1234 
1235 	return 0;
1236 }
1237 
1238 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1239 {
1240 	struct pci_dev *pdev = to_pci_dev(dev);
1241 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1242 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1243 	int err;
1244 
1245 	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1246 		return 0;
1247 
1248 	err = pci_enable_device(pdev);
1249 	if (err)
1250 		goto err_recovery;
1251 
1252 	pci_set_master(pdev);
1253 	pci_wake_from_d3(pdev, false);
1254 
1255 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1256 			mhi_cntrl->ee != MHI_EE_AMSS)
1257 		return 0; /* Nothing to do at MHI level */
1258 
1259 	/* Exit M3, transition to M0 state */
1260 	err = mhi_pm_resume(mhi_cntrl);
1261 	if (err) {
1262 		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1263 		goto err_recovery;
1264 	}
1265 
1266 	/* Resume health check */
1267 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1268 
1269 	/* It can be a remote wakeup (no mhi runtime_get), update access time */
1270 	pm_runtime_mark_last_busy(dev);
1271 
1272 	return 0;
1273 
1274 err_recovery:
1275 	/* Do not fail to not mess up our PCI device state, the device likely
1276 	 * lost power (d3cold) and we simply need to reset it from the recovery
1277 	 * procedure, trigger the recovery asynchronously to prevent system
1278 	 * suspend exit delaying.
1279 	 */
1280 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1281 	pm_runtime_mark_last_busy(dev);
1282 
1283 	return 0;
1284 }
1285 
1286 static int  __maybe_unused mhi_pci_suspend(struct device *dev)
1287 {
1288 	pm_runtime_disable(dev);
1289 	return mhi_pci_runtime_suspend(dev);
1290 }
1291 
1292 static int __maybe_unused mhi_pci_resume(struct device *dev)
1293 {
1294 	int ret;
1295 
1296 	/* Depending the platform, device may have lost power (d3cold), we need
1297 	 * to resume it now to check its state and recover when necessary.
1298 	 */
1299 	ret = mhi_pci_runtime_resume(dev);
1300 	pm_runtime_enable(dev);
1301 
1302 	return ret;
1303 }
1304 
1305 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1306 {
1307 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1308 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1309 
1310 	/* We want to stop all operations, hibernation does not guarantee that
1311 	 * device will be in the same state as before freezing, especially if
1312 	 * the intermediate restore kernel reinitializes MHI device with new
1313 	 * context.
1314 	 */
1315 	flush_work(&mhi_pdev->recovery_work);
1316 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1317 		mhi_power_down(mhi_cntrl, true);
1318 		mhi_unprepare_after_power_down(mhi_cntrl);
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static int __maybe_unused mhi_pci_restore(struct device *dev)
1325 {
1326 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1327 
1328 	/* Reinitialize the device */
1329 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1330 
1331 	return 0;
1332 }
1333 
1334 static const struct dev_pm_ops mhi_pci_pm_ops = {
1335 	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1336 #ifdef CONFIG_PM_SLEEP
1337 	.suspend = mhi_pci_suspend,
1338 	.resume = mhi_pci_resume,
1339 	.freeze = mhi_pci_freeze,
1340 	.thaw = mhi_pci_restore,
1341 	.poweroff = mhi_pci_freeze,
1342 	.restore = mhi_pci_restore,
1343 #endif
1344 };
1345 
1346 static struct pci_driver mhi_pci_driver = {
1347 	.name		= "mhi-pci-generic",
1348 	.id_table	= mhi_pci_id_table,
1349 	.probe		= mhi_pci_probe,
1350 	.remove		= mhi_pci_remove,
1351 	.shutdown	= mhi_pci_shutdown,
1352 	.err_handler	= &mhi_pci_err_handler,
1353 	.driver.pm	= &mhi_pci_pm_ops
1354 };
1355 module_pci_driver(mhi_pci_driver);
1356 
1357 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1358 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1359 MODULE_LICENSE("GPL");
1360