xref: /linux/drivers/bus/mhi/host/pci_generic.c (revision e96fddb32931d007db12b1fce9b5e8e4c080401b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MHI PCI driver - MHI over PCI controller driver
4  *
5  * This module is a generic driver for registering MHI-over-PCI devices,
6  * such as PCIe QCOM modems.
7  *
8  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
19 
20 #define MHI_PCI_DEFAULT_BAR_NUM 0
21 
22 #define MHI_POST_RESET_DELAY_MS 2000
23 
24 #define HEALTH_CHECK_PERIOD (HZ * 2)
25 
26 /* PCI VID definitions */
27 #define PCI_VENDOR_ID_THALES	0x1269
28 #define PCI_VENDOR_ID_QUECTEL	0x1eac
29 
30 /**
31  * struct mhi_pci_dev_info - MHI PCI device specific information
32  * @config: MHI controller configuration
33  * @name: name of the PCI module
34  * @fw: firmware path (if any)
35  * @edl: emergency download mode firmware path (if any)
36  * @bar_num: PCI base address register to use for MHI MMIO register space
37  * @dma_data_width: DMA transfer word size (32 or 64 bits)
38  * @mru_default: default MRU size for MBIM network packets
39  * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
40  *		   of inband wake support (such as sdx24)
41  */
42 struct mhi_pci_dev_info {
43 	const struct mhi_controller_config *config;
44 	const char *name;
45 	const char *fw;
46 	const char *edl;
47 	unsigned int bar_num;
48 	unsigned int dma_data_width;
49 	unsigned int mru_default;
50 	bool sideband_wake;
51 };
52 
53 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
54 	{						\
55 		.num = ch_num,				\
56 		.name = ch_name,			\
57 		.num_elements = el_count,		\
58 		.event_ring = ev_ring,			\
59 		.dir = DMA_TO_DEVICE,			\
60 		.ee_mask = BIT(MHI_EE_AMSS),		\
61 		.pollcfg = 0,				\
62 		.doorbell = MHI_DB_BRST_DISABLE,	\
63 		.lpm_notify = false,			\
64 		.offload_channel = false,		\
65 		.doorbell_mode_switch = false,		\
66 	}						\
67 
68 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
69 	{						\
70 		.num = ch_num,				\
71 		.name = ch_name,			\
72 		.num_elements = el_count,		\
73 		.event_ring = ev_ring,			\
74 		.dir = DMA_FROM_DEVICE,			\
75 		.ee_mask = BIT(MHI_EE_AMSS),		\
76 		.pollcfg = 0,				\
77 		.doorbell = MHI_DB_BRST_DISABLE,	\
78 		.lpm_notify = false,			\
79 		.offload_channel = false,		\
80 		.doorbell_mode_switch = false,		\
81 	}
82 
83 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
84 	{						\
85 		.num = ch_num,				\
86 		.name = ch_name,			\
87 		.num_elements = el_count,		\
88 		.event_ring = ev_ring,			\
89 		.dir = DMA_FROM_DEVICE,			\
90 		.ee_mask = BIT(MHI_EE_AMSS),		\
91 		.pollcfg = 0,				\
92 		.doorbell = MHI_DB_BRST_DISABLE,	\
93 		.lpm_notify = false,			\
94 		.offload_channel = false,		\
95 		.doorbell_mode_switch = false,		\
96 		.auto_queue = true,			\
97 	}
98 
99 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
100 	{					\
101 		.num_elements = el_count,	\
102 		.irq_moderation_ms = 0,		\
103 		.irq = (ev_ring) + 1,		\
104 		.priority = 1,			\
105 		.mode = MHI_DB_BRST_DISABLE,	\
106 		.data_type = MHI_ER_CTRL,	\
107 		.hardware_event = false,	\
108 		.client_managed = false,	\
109 		.offload_channel = false,	\
110 	}
111 
112 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
113 	{						\
114 		.num = ch_num,				\
115 		.name = ch_name,			\
116 		.num_elements = el_count,		\
117 		.event_ring = ev_ring,			\
118 		.dir = DMA_TO_DEVICE,			\
119 		.ee_mask = BIT(MHI_EE_AMSS),		\
120 		.pollcfg = 0,				\
121 		.doorbell = MHI_DB_BRST_ENABLE,	\
122 		.lpm_notify = false,			\
123 		.offload_channel = false,		\
124 		.doorbell_mode_switch = true,		\
125 	}						\
126 
127 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
128 	{						\
129 		.num = ch_num,				\
130 		.name = ch_name,			\
131 		.num_elements = el_count,		\
132 		.event_ring = ev_ring,			\
133 		.dir = DMA_FROM_DEVICE,			\
134 		.ee_mask = BIT(MHI_EE_AMSS),		\
135 		.pollcfg = 0,				\
136 		.doorbell = MHI_DB_BRST_ENABLE,	\
137 		.lpm_notify = false,			\
138 		.offload_channel = false,		\
139 		.doorbell_mode_switch = true,		\
140 	}
141 
142 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
143 	{						\
144 		.num = ch_num,				\
145 		.name = ch_name,			\
146 		.num_elements = el_count,		\
147 		.event_ring = ev_ring,			\
148 		.dir = DMA_TO_DEVICE,			\
149 		.ee_mask = BIT(MHI_EE_SBL),		\
150 		.pollcfg = 0,				\
151 		.doorbell = MHI_DB_BRST_DISABLE,	\
152 		.lpm_notify = false,			\
153 		.offload_channel = false,		\
154 		.doorbell_mode_switch = false,		\
155 	}						\
156 
157 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
158 	{						\
159 		.num = ch_num,				\
160 		.name = ch_name,			\
161 		.num_elements = el_count,		\
162 		.event_ring = ev_ring,			\
163 		.dir = DMA_FROM_DEVICE,			\
164 		.ee_mask = BIT(MHI_EE_SBL),		\
165 		.pollcfg = 0,				\
166 		.doorbell = MHI_DB_BRST_DISABLE,	\
167 		.lpm_notify = false,			\
168 		.offload_channel = false,		\
169 		.doorbell_mode_switch = false,		\
170 	}
171 
172 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
173 	{						\
174 		.num = ch_num,				\
175 		.name = ch_name,			\
176 		.num_elements = el_count,		\
177 		.event_ring = ev_ring,			\
178 		.dir = DMA_TO_DEVICE,			\
179 		.ee_mask = BIT(MHI_EE_FP),		\
180 		.pollcfg = 0,				\
181 		.doorbell = MHI_DB_BRST_DISABLE,	\
182 		.lpm_notify = false,			\
183 		.offload_channel = false,		\
184 		.doorbell_mode_switch = false,		\
185 	}						\
186 
187 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
188 	{						\
189 		.num = ch_num,				\
190 		.name = ch_name,			\
191 		.num_elements = el_count,		\
192 		.event_ring = ev_ring,			\
193 		.dir = DMA_FROM_DEVICE,			\
194 		.ee_mask = BIT(MHI_EE_FP),		\
195 		.pollcfg = 0,				\
196 		.doorbell = MHI_DB_BRST_DISABLE,	\
197 		.lpm_notify = false,			\
198 		.offload_channel = false,		\
199 		.doorbell_mode_switch = false,		\
200 	}
201 
202 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
203 	{					\
204 		.num_elements = el_count,	\
205 		.irq_moderation_ms = 5,		\
206 		.irq = (ev_ring) + 1,		\
207 		.priority = 1,			\
208 		.mode = MHI_DB_BRST_DISABLE,	\
209 		.data_type = MHI_ER_DATA,	\
210 		.hardware_event = false,	\
211 		.client_managed = false,	\
212 		.offload_channel = false,	\
213 	}
214 
215 #define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
216 	{					\
217 		.num_elements = el_count,	\
218 		.irq_moderation_ms = 0,		\
219 		.irq = (ev_ring) + 1,		\
220 		.priority = 1,			\
221 		.mode = MHI_DB_BRST_DISABLE,	\
222 		.data_type = MHI_ER_DATA,	\
223 		.hardware_event = false,	\
224 		.client_managed = false,	\
225 		.offload_channel = false,	\
226 	}
227 
228 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
229 	{					\
230 		.num_elements = el_count,	\
231 		.irq_moderation_ms = 1,		\
232 		.irq = (ev_ring) + 1,		\
233 		.priority = 1,			\
234 		.mode = MHI_DB_BRST_DISABLE,	\
235 		.data_type = MHI_ER_DATA,	\
236 		.hardware_event = true,		\
237 		.client_managed = false,	\
238 		.offload_channel = false,	\
239 		.channel = ch_num,		\
240 	}
241 
242 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
243 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
244 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
245 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
246 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
247 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
248 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
249 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
250 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
251 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
252 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
253 	MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
254 	MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
255 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
256 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
257 };
258 
259 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
260 	/* first ring is control+data ring */
261 	MHI_EVENT_CONFIG_CTRL(0, 64),
262 	/* DIAG dedicated event ring */
263 	MHI_EVENT_CONFIG_DATA(1, 128),
264 	/* Software channels dedicated event ring */
265 	MHI_EVENT_CONFIG_SW_DATA(2, 64),
266 	MHI_EVENT_CONFIG_SW_DATA(3, 64),
267 	/* Hardware channels request dedicated hardware event rings */
268 	MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
269 	MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
270 };
271 
272 static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
273 	.max_channels = 128,
274 	.timeout_ms = 8000,
275 	.ready_timeout_ms = 50000,
276 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
277 	.ch_cfg = modem_qcom_v1_mhi_channels,
278 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
279 	.event_cfg = modem_qcom_v1_mhi_events,
280 };
281 
282 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
283 	.max_channels = 128,
284 	.timeout_ms = 8000,
285 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
286 	.ch_cfg = modem_qcom_v1_mhi_channels,
287 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
288 	.event_cfg = modem_qcom_v1_mhi_events,
289 };
290 
291 static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
292 	.name = "qcom-sdx75m",
293 	.fw = "qcom/sdx75m/xbl.elf",
294 	.edl = "qcom/sdx75m/edl.mbn",
295 	.config = &modem_qcom_v2_mhiv_config,
296 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
297 	.dma_data_width = 32,
298 	.sideband_wake = false,
299 };
300 
301 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
302 	.name = "qcom-sdx65m",
303 	.fw = "qcom/sdx65m/xbl.elf",
304 	.edl = "qcom/sdx65m/edl.mbn",
305 	.config = &modem_qcom_v1_mhiv_config,
306 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
307 	.dma_data_width = 32,
308 	.sideband_wake = false,
309 };
310 
311 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
312 	.name = "qcom-sdx55m",
313 	.fw = "qcom/sdx55m/sbl1.mbn",
314 	.edl = "qcom/sdx55m/edl.mbn",
315 	.config = &modem_qcom_v1_mhiv_config,
316 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
317 	.dma_data_width = 32,
318 	.mru_default = 32768,
319 	.sideband_wake = false,
320 };
321 
322 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
323 	.name = "qcom-sdx24",
324 	.edl = "qcom/prog_firehose_sdx24.mbn",
325 	.config = &modem_qcom_v1_mhiv_config,
326 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
327 	.dma_data_width = 32,
328 	.sideband_wake = true,
329 };
330 
331 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
332 	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
333 	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
334 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
335 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
336 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
337 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
338 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
339 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
340 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
341 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
342 	/* The EDL firmware is a flash-programmer exposing firehose protocol */
343 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
344 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
345 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
346 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
347 };
348 
349 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
350 	MHI_EVENT_CONFIG_CTRL(0, 128),
351 	MHI_EVENT_CONFIG_DATA(1, 128),
352 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
353 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
354 };
355 
356 static const struct mhi_controller_config modem_quectel_em1xx_config = {
357 	.max_channels = 128,
358 	.timeout_ms = 20000,
359 	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
360 	.ch_cfg = mhi_quectel_em1xx_channels,
361 	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
362 	.event_cfg = mhi_quectel_em1xx_events,
363 };
364 
365 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
366 	.name = "quectel-em1xx",
367 	.edl = "qcom/prog_firehose_sdx24.mbn",
368 	.config = &modem_quectel_em1xx_config,
369 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
370 	.dma_data_width = 32,
371 	.mru_default = 32768,
372 	.sideband_wake = true,
373 };
374 
375 static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
376 	.name = "quectel-rm5xx",
377 	.edl = "qcom/prog_firehose_sdx6x.elf",
378 	.config = &modem_quectel_em1xx_config,
379 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
380 	.dma_data_width = 32,
381 	.mru_default = 32768,
382 	.sideband_wake = true,
383 };
384 
385 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
386 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
387 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
388 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
389 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
390 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
391 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
392 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
393 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
394 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
395 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
396 };
397 
398 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
399 	MHI_EVENT_CONFIG_CTRL(0, 128),
400 	MHI_EVENT_CONFIG_DATA(1, 128),
401 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
402 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
403 };
404 
405 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
406 	.max_channels = 128,
407 	.timeout_ms = 20000,
408 	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
409 	.ch_cfg = mhi_foxconn_sdx55_channels,
410 	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
411 	.event_cfg = mhi_foxconn_sdx55_events,
412 };
413 
414 static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
415 	.name = "foxconn-sdx24",
416 	.config = &modem_foxconn_sdx55_config,
417 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
418 	.dma_data_width = 32,
419 	.mru_default = 32768,
420 	.sideband_wake = false,
421 };
422 
423 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
424 	.name = "foxconn-sdx55",
425 	.fw = "qcom/sdx55m/sbl1.mbn",
426 	.edl = "qcom/sdx55m/edl.mbn",
427 	.config = &modem_foxconn_sdx55_config,
428 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
429 	.dma_data_width = 32,
430 	.mru_default = 32768,
431 	.sideband_wake = false,
432 };
433 
434 static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
435 	.name = "foxconn-sdx65",
436 	.config = &modem_foxconn_sdx55_config,
437 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
438 	.dma_data_width = 32,
439 	.mru_default = 32768,
440 	.sideband_wake = false,
441 };
442 
443 static const struct mhi_channel_config mhi_mv3x_channels[] = {
444 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
445 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
446 	/* MBIM Control Channel */
447 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
448 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
449 	/* MBIM Data Channel */
450 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
451 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
452 };
453 
454 static struct mhi_event_config mhi_mv3x_events[] = {
455 	MHI_EVENT_CONFIG_CTRL(0, 256),
456 	MHI_EVENT_CONFIG_DATA(1, 256),
457 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
458 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
459 };
460 
461 static const struct mhi_controller_config modem_mv3x_config = {
462 	.max_channels = 128,
463 	.timeout_ms = 20000,
464 	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
465 	.ch_cfg = mhi_mv3x_channels,
466 	.num_events = ARRAY_SIZE(mhi_mv3x_events),
467 	.event_cfg = mhi_mv3x_events,
468 };
469 
470 static const struct mhi_pci_dev_info mhi_mv31_info = {
471 	.name = "cinterion-mv31",
472 	.config = &modem_mv3x_config,
473 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
474 	.dma_data_width = 32,
475 	.mru_default = 32768,
476 };
477 
478 static const struct mhi_pci_dev_info mhi_mv32_info = {
479 	.name = "cinterion-mv32",
480 	.config = &modem_mv3x_config,
481 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
482 	.dma_data_width = 32,
483 	.mru_default = 32768,
484 };
485 
486 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
487 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
488 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
489 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
490 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
491 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
492 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
493 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
494 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
495 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
496 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
497 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
498 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
499 };
500 
501 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
502 	/* first ring is control+data and DIAG ring */
503 	MHI_EVENT_CONFIG_CTRL(0, 2048),
504 	/* Hardware channels request dedicated hardware event rings */
505 	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
506 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
507 };
508 
509 static const struct mhi_controller_config modem_sierra_em919x_config = {
510 	.max_channels = 128,
511 	.timeout_ms = 24000,
512 	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
513 	.ch_cfg = mhi_sierra_em919x_channels,
514 	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
515 	.event_cfg = modem_sierra_em919x_mhi_events,
516 };
517 
518 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
519 	.name = "sierra-em919x",
520 	.config = &modem_sierra_em919x_config,
521 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
522 	.dma_data_width = 32,
523 	.sideband_wake = false,
524 };
525 
526 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
527 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
528 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
529 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
530 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
531 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
532 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
533 };
534 
535 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
536 	MHI_EVENT_CONFIG_CTRL(0, 128),
537 	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
538 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
539 };
540 
541 static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
542 	.max_channels = 128,
543 	.timeout_ms = 20000,
544 	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
545 	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
546 	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
547 	.event_cfg = mhi_telit_fn980_hw_v1_events,
548 };
549 
550 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
551 	.name = "telit-fn980-hwv1",
552 	.fw = "qcom/sdx55m/sbl1.mbn",
553 	.edl = "qcom/sdx55m/edl.mbn",
554 	.config = &modem_telit_fn980_hw_v1_config,
555 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
556 	.dma_data_width = 32,
557 	.mru_default = 32768,
558 	.sideband_wake = false,
559 };
560 
561 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
562 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
563 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
564 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
565 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
566 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
567 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
568 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
569 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
570 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
571 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
572 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
573 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
574 };
575 
576 static struct mhi_event_config mhi_telit_fn990_events[] = {
577 	MHI_EVENT_CONFIG_CTRL(0, 128),
578 	MHI_EVENT_CONFIG_DATA(1, 128),
579 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
580 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
581 };
582 
583 static const struct mhi_controller_config modem_telit_fn990_config = {
584 	.max_channels = 128,
585 	.timeout_ms = 20000,
586 	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
587 	.ch_cfg = mhi_telit_fn990_channels,
588 	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
589 	.event_cfg = mhi_telit_fn990_events,
590 };
591 
592 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
593 	.name = "telit-fn990",
594 	.config = &modem_telit_fn990_config,
595 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
596 	.dma_data_width = 32,
597 	.sideband_wake = false,
598 	.mru_default = 32768,
599 };
600 
601 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
602 static const struct pci_device_id mhi_pci_id_table[] = {
603 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
604 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
605 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
606 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
607 	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
608 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
609 		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
610 	/* Telit FN980 hardware revision v1 */
611 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
612 		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
613 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
614 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
615 	/* Telit FN990 */
616 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
617 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
618 	/* Telit FE990 */
619 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
620 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
621 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
622 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
623 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
624 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
625 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
626 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
627 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
628 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
629 	/* RM520N-GL (sdx6x), eSIM */
630 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
631 		.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
632 	/* RM520N-GL (sdx6x), Lenovo variant */
633 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
634 		.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
635 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
636 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
637 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
638 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
639 	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
640 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
641 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
642 	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
643 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
644 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
645 	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
646 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
647 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
648 	/* T99W175 (sdx55), Based on Qualcomm new baseline */
649 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
650 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
651 	/* T99W175 (sdx55) */
652 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
653 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
654 	/* T99W368 (sdx65) */
655 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
656 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
657 	/* T99W373 (sdx62) */
658 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
659 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
660 	/* T99W510 (sdx24), variant 1 */
661 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
662 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
663 	/* T99W510 (sdx24), variant 2 */
664 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
665 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
666 	/* T99W510 (sdx24), variant 3 */
667 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
668 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
669 	/* DW5932e-eSIM (sdx62), With eSIM */
670 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
671 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
672 	/* DW5932e (sdx62), Non-eSIM */
673 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
674 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
675 	/* MV31-W (Cinterion) */
676 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
677 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
678 	/* MV31-W (Cinterion), based on new baseline */
679 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
680 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
681 	/* MV32-WA (Cinterion) */
682 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
683 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
684 	/* MV32-WB (Cinterion) */
685 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
686 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
687 	/* T99W175 (sdx55), HP variant */
688 	{ PCI_DEVICE(0x03f0, 0x0a6c),
689 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
690 	{  }
691 };
692 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
693 
694 enum mhi_pci_device_status {
695 	MHI_PCI_DEV_STARTED,
696 	MHI_PCI_DEV_SUSPENDED,
697 };
698 
699 struct mhi_pci_device {
700 	struct mhi_controller mhi_cntrl;
701 	struct pci_saved_state *pci_state;
702 	struct work_struct recovery_work;
703 	struct timer_list health_check_timer;
704 	unsigned long status;
705 };
706 
707 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
708 			    void __iomem *addr, u32 *out)
709 {
710 	*out = readl(addr);
711 	return 0;
712 }
713 
714 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
715 			      void __iomem *addr, u32 val)
716 {
717 	writel(val, addr);
718 }
719 
720 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
721 			      enum mhi_callback cb)
722 {
723 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
724 
725 	/* Nothing to do for now */
726 	switch (cb) {
727 	case MHI_CB_FATAL_ERROR:
728 	case MHI_CB_SYS_ERROR:
729 		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
730 		pm_runtime_forbid(&pdev->dev);
731 		break;
732 	case MHI_CB_EE_MISSION_MODE:
733 		pm_runtime_allow(&pdev->dev);
734 		break;
735 	default:
736 		break;
737 	}
738 }
739 
740 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
741 {
742 	/* no-op */
743 }
744 
745 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
746 {
747 	/* no-op */
748 }
749 
750 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
751 {
752 	/* no-op */
753 }
754 
755 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
756 {
757 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
758 	u16 vendor = 0;
759 
760 	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
761 		return false;
762 
763 	if (vendor == (u16) ~0 || vendor == 0)
764 		return false;
765 
766 	return true;
767 }
768 
769 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
770 			 unsigned int bar_num, u64 dma_mask)
771 {
772 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
773 	int err;
774 
775 	err = pci_assign_resource(pdev, bar_num);
776 	if (err)
777 		return err;
778 
779 	err = pcim_enable_device(pdev);
780 	if (err) {
781 		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
782 		return err;
783 	}
784 
785 	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
786 	if (err) {
787 		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
788 		return err;
789 	}
790 	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
791 	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
792 
793 	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
794 	if (err) {
795 		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
796 		return err;
797 	}
798 
799 	pci_set_master(pdev);
800 
801 	return 0;
802 }
803 
804 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
805 			    const struct mhi_controller_config *mhi_cntrl_config)
806 {
807 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
808 	int nr_vectors, i;
809 	int *irq;
810 
811 	/*
812 	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
813 	 * No explicit pci_free_irq_vectors required, done by pcim_release.
814 	 */
815 	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
816 
817 	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
818 	if (nr_vectors < 0) {
819 		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
820 			nr_vectors);
821 		return nr_vectors;
822 	}
823 
824 	if (nr_vectors < mhi_cntrl->nr_irqs) {
825 		dev_warn(&pdev->dev, "using shared MSI\n");
826 
827 		/* Patch msi vectors, use only one (shared) */
828 		for (i = 0; i < mhi_cntrl_config->num_events; i++)
829 			mhi_cntrl_config->event_cfg[i].irq = 0;
830 		mhi_cntrl->nr_irqs = 1;
831 	}
832 
833 	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
834 	if (!irq)
835 		return -ENOMEM;
836 
837 	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
838 		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
839 
840 		irq[i] = pci_irq_vector(pdev, vector);
841 	}
842 
843 	mhi_cntrl->irq = irq;
844 
845 	return 0;
846 }
847 
848 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
849 {
850 	/* The runtime_get() MHI callback means:
851 	 *    Do whatever is requested to leave M3.
852 	 */
853 	return pm_runtime_get(mhi_cntrl->cntrl_dev);
854 }
855 
856 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
857 {
858 	/* The runtime_put() MHI callback means:
859 	 *    Device can be moved in M3 state.
860 	 */
861 	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
862 	pm_runtime_put(mhi_cntrl->cntrl_dev);
863 }
864 
865 static void mhi_pci_recovery_work(struct work_struct *work)
866 {
867 	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
868 						       recovery_work);
869 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
870 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
871 	int err;
872 
873 	dev_warn(&pdev->dev, "device recovery started\n");
874 
875 	del_timer(&mhi_pdev->health_check_timer);
876 	pm_runtime_forbid(&pdev->dev);
877 
878 	/* Clean up MHI state */
879 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
880 		mhi_power_down(mhi_cntrl, false);
881 		mhi_unprepare_after_power_down(mhi_cntrl);
882 	}
883 
884 	pci_set_power_state(pdev, PCI_D0);
885 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
886 	pci_restore_state(pdev);
887 
888 	if (!mhi_pci_is_alive(mhi_cntrl))
889 		goto err_try_reset;
890 
891 	err = mhi_prepare_for_power_up(mhi_cntrl);
892 	if (err)
893 		goto err_try_reset;
894 
895 	err = mhi_sync_power_up(mhi_cntrl);
896 	if (err)
897 		goto err_unprepare;
898 
899 	dev_dbg(&pdev->dev, "Recovery completed\n");
900 
901 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
902 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
903 	return;
904 
905 err_unprepare:
906 	mhi_unprepare_after_power_down(mhi_cntrl);
907 err_try_reset:
908 	if (pci_reset_function(pdev))
909 		dev_err(&pdev->dev, "Recovery failed\n");
910 }
911 
912 static void health_check(struct timer_list *t)
913 {
914 	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
915 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
916 
917 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
918 			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
919 		return;
920 
921 	if (!mhi_pci_is_alive(mhi_cntrl)) {
922 		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
923 		queue_work(system_long_wq, &mhi_pdev->recovery_work);
924 		return;
925 	}
926 
927 	/* reschedule in two seconds */
928 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
929 }
930 
931 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
932 {
933 	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
934 	const struct mhi_controller_config *mhi_cntrl_config;
935 	struct mhi_pci_device *mhi_pdev;
936 	struct mhi_controller *mhi_cntrl;
937 	int err;
938 
939 	dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
940 
941 	/* mhi_pdev.mhi_cntrl must be zero-initialized */
942 	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
943 	if (!mhi_pdev)
944 		return -ENOMEM;
945 
946 	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
947 	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
948 
949 	mhi_cntrl_config = info->config;
950 	mhi_cntrl = &mhi_pdev->mhi_cntrl;
951 
952 	mhi_cntrl->cntrl_dev = &pdev->dev;
953 	mhi_cntrl->iova_start = 0;
954 	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
955 	mhi_cntrl->fw_image = info->fw;
956 	mhi_cntrl->edl_image = info->edl;
957 
958 	mhi_cntrl->read_reg = mhi_pci_read_reg;
959 	mhi_cntrl->write_reg = mhi_pci_write_reg;
960 	mhi_cntrl->status_cb = mhi_pci_status_cb;
961 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
962 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
963 	mhi_cntrl->mru = info->mru_default;
964 
965 	if (info->sideband_wake) {
966 		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
967 		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
968 		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
969 	}
970 
971 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
972 	if (err)
973 		return err;
974 
975 	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
976 	if (err)
977 		return err;
978 
979 	pci_set_drvdata(pdev, mhi_pdev);
980 
981 	/* Have stored pci confspace at hand for restore in sudden PCI error.
982 	 * cache the state locally and discard the PCI core one.
983 	 */
984 	pci_save_state(pdev);
985 	mhi_pdev->pci_state = pci_store_saved_state(pdev);
986 	pci_load_saved_state(pdev, NULL);
987 
988 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
989 	if (err)
990 		return err;
991 
992 	/* MHI bus does not power up the controller by default */
993 	err = mhi_prepare_for_power_up(mhi_cntrl);
994 	if (err) {
995 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
996 		goto err_unregister;
997 	}
998 
999 	err = mhi_sync_power_up(mhi_cntrl);
1000 	if (err) {
1001 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1002 		goto err_unprepare;
1003 	}
1004 
1005 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1006 
1007 	/* start health check */
1008 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1009 
1010 	/* Only allow runtime-suspend if PME capable (for wakeup) */
1011 	if (pci_pme_capable(pdev, PCI_D3hot)) {
1012 		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
1013 		pm_runtime_use_autosuspend(&pdev->dev);
1014 		pm_runtime_mark_last_busy(&pdev->dev);
1015 		pm_runtime_put_noidle(&pdev->dev);
1016 	}
1017 
1018 	return 0;
1019 
1020 err_unprepare:
1021 	mhi_unprepare_after_power_down(mhi_cntrl);
1022 err_unregister:
1023 	mhi_unregister_controller(mhi_cntrl);
1024 
1025 	return err;
1026 }
1027 
1028 static void mhi_pci_remove(struct pci_dev *pdev)
1029 {
1030 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1031 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1032 
1033 	del_timer_sync(&mhi_pdev->health_check_timer);
1034 	cancel_work_sync(&mhi_pdev->recovery_work);
1035 
1036 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1037 		mhi_power_down(mhi_cntrl, true);
1038 		mhi_unprepare_after_power_down(mhi_cntrl);
1039 	}
1040 
1041 	/* balancing probe put_noidle */
1042 	if (pci_pme_capable(pdev, PCI_D3hot))
1043 		pm_runtime_get_noresume(&pdev->dev);
1044 
1045 	mhi_unregister_controller(mhi_cntrl);
1046 }
1047 
1048 static void mhi_pci_shutdown(struct pci_dev *pdev)
1049 {
1050 	mhi_pci_remove(pdev);
1051 	pci_set_power_state(pdev, PCI_D3hot);
1052 }
1053 
1054 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
1055 {
1056 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1057 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1058 
1059 	dev_info(&pdev->dev, "reset\n");
1060 
1061 	del_timer(&mhi_pdev->health_check_timer);
1062 
1063 	/* Clean up MHI state */
1064 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1065 		mhi_power_down(mhi_cntrl, false);
1066 		mhi_unprepare_after_power_down(mhi_cntrl);
1067 	}
1068 
1069 	/* cause internal device reset */
1070 	mhi_soc_reset(mhi_cntrl);
1071 
1072 	/* Be sure device reset has been executed */
1073 	msleep(MHI_POST_RESET_DELAY_MS);
1074 }
1075 
1076 static void mhi_pci_reset_done(struct pci_dev *pdev)
1077 {
1078 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1079 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1080 	int err;
1081 
1082 	/* Restore initial known working PCI state */
1083 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
1084 	pci_restore_state(pdev);
1085 
1086 	/* Is device status available ? */
1087 	if (!mhi_pci_is_alive(mhi_cntrl)) {
1088 		dev_err(&pdev->dev, "reset failed\n");
1089 		return;
1090 	}
1091 
1092 	err = mhi_prepare_for_power_up(mhi_cntrl);
1093 	if (err) {
1094 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1095 		return;
1096 	}
1097 
1098 	err = mhi_sync_power_up(mhi_cntrl);
1099 	if (err) {
1100 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1101 		mhi_unprepare_after_power_down(mhi_cntrl);
1102 		return;
1103 	}
1104 
1105 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1106 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1107 }
1108 
1109 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1110 					       pci_channel_state_t state)
1111 {
1112 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1113 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1114 
1115 	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1116 
1117 	if (state == pci_channel_io_perm_failure)
1118 		return PCI_ERS_RESULT_DISCONNECT;
1119 
1120 	/* Clean up MHI state */
1121 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1122 		mhi_power_down(mhi_cntrl, false);
1123 		mhi_unprepare_after_power_down(mhi_cntrl);
1124 	} else {
1125 		/* Nothing to do */
1126 		return PCI_ERS_RESULT_RECOVERED;
1127 	}
1128 
1129 	pci_disable_device(pdev);
1130 
1131 	return PCI_ERS_RESULT_NEED_RESET;
1132 }
1133 
1134 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1135 {
1136 	if (pci_enable_device(pdev)) {
1137 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1138 		return PCI_ERS_RESULT_DISCONNECT;
1139 	}
1140 
1141 	return PCI_ERS_RESULT_RECOVERED;
1142 }
1143 
1144 static void mhi_pci_io_resume(struct pci_dev *pdev)
1145 {
1146 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1147 
1148 	dev_err(&pdev->dev, "PCI slot reset done\n");
1149 
1150 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1151 }
1152 
1153 static const struct pci_error_handlers mhi_pci_err_handler = {
1154 	.error_detected = mhi_pci_error_detected,
1155 	.slot_reset = mhi_pci_slot_reset,
1156 	.resume = mhi_pci_io_resume,
1157 	.reset_prepare = mhi_pci_reset_prepare,
1158 	.reset_done = mhi_pci_reset_done,
1159 };
1160 
1161 static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1162 {
1163 	struct pci_dev *pdev = to_pci_dev(dev);
1164 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1165 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1166 	int err;
1167 
1168 	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1169 		return 0;
1170 
1171 	del_timer(&mhi_pdev->health_check_timer);
1172 	cancel_work_sync(&mhi_pdev->recovery_work);
1173 
1174 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1175 			mhi_cntrl->ee != MHI_EE_AMSS)
1176 		goto pci_suspend; /* Nothing to do at MHI level */
1177 
1178 	/* Transition to M3 state */
1179 	err = mhi_pm_suspend(mhi_cntrl);
1180 	if (err) {
1181 		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1182 		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1183 		return -EBUSY;
1184 	}
1185 
1186 pci_suspend:
1187 	pci_disable_device(pdev);
1188 	pci_wake_from_d3(pdev, true);
1189 
1190 	return 0;
1191 }
1192 
1193 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1194 {
1195 	struct pci_dev *pdev = to_pci_dev(dev);
1196 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1197 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1198 	int err;
1199 
1200 	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1201 		return 0;
1202 
1203 	err = pci_enable_device(pdev);
1204 	if (err)
1205 		goto err_recovery;
1206 
1207 	pci_set_master(pdev);
1208 	pci_wake_from_d3(pdev, false);
1209 
1210 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1211 			mhi_cntrl->ee != MHI_EE_AMSS)
1212 		return 0; /* Nothing to do at MHI level */
1213 
1214 	/* Exit M3, transition to M0 state */
1215 	err = mhi_pm_resume(mhi_cntrl);
1216 	if (err) {
1217 		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1218 		goto err_recovery;
1219 	}
1220 
1221 	/* Resume health check */
1222 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1223 
1224 	/* It can be a remote wakeup (no mhi runtime_get), update access time */
1225 	pm_runtime_mark_last_busy(dev);
1226 
1227 	return 0;
1228 
1229 err_recovery:
1230 	/* Do not fail to not mess up our PCI device state, the device likely
1231 	 * lost power (d3cold) and we simply need to reset it from the recovery
1232 	 * procedure, trigger the recovery asynchronously to prevent system
1233 	 * suspend exit delaying.
1234 	 */
1235 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1236 	pm_runtime_mark_last_busy(dev);
1237 
1238 	return 0;
1239 }
1240 
1241 static int  __maybe_unused mhi_pci_suspend(struct device *dev)
1242 {
1243 	pm_runtime_disable(dev);
1244 	return mhi_pci_runtime_suspend(dev);
1245 }
1246 
1247 static int __maybe_unused mhi_pci_resume(struct device *dev)
1248 {
1249 	int ret;
1250 
1251 	/* Depending the platform, device may have lost power (d3cold), we need
1252 	 * to resume it now to check its state and recover when necessary.
1253 	 */
1254 	ret = mhi_pci_runtime_resume(dev);
1255 	pm_runtime_enable(dev);
1256 
1257 	return ret;
1258 }
1259 
1260 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1261 {
1262 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1263 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1264 
1265 	/* We want to stop all operations, hibernation does not guarantee that
1266 	 * device will be in the same state as before freezing, especially if
1267 	 * the intermediate restore kernel reinitializes MHI device with new
1268 	 * context.
1269 	 */
1270 	flush_work(&mhi_pdev->recovery_work);
1271 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1272 		mhi_power_down(mhi_cntrl, true);
1273 		mhi_unprepare_after_power_down(mhi_cntrl);
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 static int __maybe_unused mhi_pci_restore(struct device *dev)
1280 {
1281 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1282 
1283 	/* Reinitialize the device */
1284 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1285 
1286 	return 0;
1287 }
1288 
1289 static const struct dev_pm_ops mhi_pci_pm_ops = {
1290 	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1291 #ifdef CONFIG_PM_SLEEP
1292 	.suspend = mhi_pci_suspend,
1293 	.resume = mhi_pci_resume,
1294 	.freeze = mhi_pci_freeze,
1295 	.thaw = mhi_pci_restore,
1296 	.poweroff = mhi_pci_freeze,
1297 	.restore = mhi_pci_restore,
1298 #endif
1299 };
1300 
1301 static struct pci_driver mhi_pci_driver = {
1302 	.name		= "mhi-pci-generic",
1303 	.id_table	= mhi_pci_id_table,
1304 	.probe		= mhi_pci_probe,
1305 	.remove		= mhi_pci_remove,
1306 	.shutdown	= mhi_pci_shutdown,
1307 	.err_handler	= &mhi_pci_err_handler,
1308 	.driver.pm	= &mhi_pci_pm_ops
1309 };
1310 module_pci_driver(mhi_pci_driver);
1311 
1312 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1313 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1314 MODULE_LICENSE("GPL");
1315