xref: /linux/drivers/bus/mhi/host/pci_generic.c (revision 55ec81f7517fad09135f65552cea0a3ee84fff30)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MHI PCI driver - MHI over PCI controller driver
4  *
5  * This module is a generic driver for registering MHI-over-PCI devices,
6  * such as PCIe QCOM modems.
7  *
8  * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
19 
20 #define MHI_PCI_DEFAULT_BAR_NUM 0
21 
22 #define MHI_POST_RESET_DELAY_MS 2000
23 
24 #define HEALTH_CHECK_PERIOD (HZ * 2)
25 
26 /* PCI VID definitions */
27 #define PCI_VENDOR_ID_THALES	0x1269
28 #define PCI_VENDOR_ID_QUECTEL	0x1eac
29 
30 /**
31  * struct mhi_pci_dev_info - MHI PCI device specific information
32  * @config: MHI controller configuration
33  * @name: name of the PCI module
34  * @fw: firmware path (if any)
35  * @edl: emergency download mode firmware path (if any)
36  * @bar_num: PCI base address register to use for MHI MMIO register space
37  * @dma_data_width: DMA transfer word size (32 or 64 bits)
38  * @mru_default: default MRU size for MBIM network packets
39  * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
40  *		   of inband wake support (such as sdx24)
41  */
42 struct mhi_pci_dev_info {
43 	const struct mhi_controller_config *config;
44 	const char *name;
45 	const char *fw;
46 	const char *edl;
47 	unsigned int bar_num;
48 	unsigned int dma_data_width;
49 	unsigned int mru_default;
50 	bool sideband_wake;
51 };
52 
53 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
54 	{						\
55 		.num = ch_num,				\
56 		.name = ch_name,			\
57 		.num_elements = el_count,		\
58 		.event_ring = ev_ring,			\
59 		.dir = DMA_TO_DEVICE,			\
60 		.ee_mask = BIT(MHI_EE_AMSS),		\
61 		.pollcfg = 0,				\
62 		.doorbell = MHI_DB_BRST_DISABLE,	\
63 		.lpm_notify = false,			\
64 		.offload_channel = false,		\
65 		.doorbell_mode_switch = false,		\
66 	}						\
67 
68 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
69 	{						\
70 		.num = ch_num,				\
71 		.name = ch_name,			\
72 		.num_elements = el_count,		\
73 		.event_ring = ev_ring,			\
74 		.dir = DMA_FROM_DEVICE,			\
75 		.ee_mask = BIT(MHI_EE_AMSS),		\
76 		.pollcfg = 0,				\
77 		.doorbell = MHI_DB_BRST_DISABLE,	\
78 		.lpm_notify = false,			\
79 		.offload_channel = false,		\
80 		.doorbell_mode_switch = false,		\
81 	}
82 
83 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
84 	{						\
85 		.num = ch_num,				\
86 		.name = ch_name,			\
87 		.num_elements = el_count,		\
88 		.event_ring = ev_ring,			\
89 		.dir = DMA_FROM_DEVICE,			\
90 		.ee_mask = BIT(MHI_EE_AMSS),		\
91 		.pollcfg = 0,				\
92 		.doorbell = MHI_DB_BRST_DISABLE,	\
93 		.lpm_notify = false,			\
94 		.offload_channel = false,		\
95 		.doorbell_mode_switch = false,		\
96 		.auto_queue = true,			\
97 	}
98 
99 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
100 	{					\
101 		.num_elements = el_count,	\
102 		.irq_moderation_ms = 0,		\
103 		.irq = (ev_ring) + 1,		\
104 		.priority = 1,			\
105 		.mode = MHI_DB_BRST_DISABLE,	\
106 		.data_type = MHI_ER_CTRL,	\
107 		.hardware_event = false,	\
108 		.client_managed = false,	\
109 		.offload_channel = false,	\
110 	}
111 
112 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
113 	{						\
114 		.num = ch_num,				\
115 		.name = ch_name,			\
116 		.num_elements = el_count,		\
117 		.event_ring = ev_ring,			\
118 		.dir = DMA_TO_DEVICE,			\
119 		.ee_mask = BIT(MHI_EE_AMSS),		\
120 		.pollcfg = 0,				\
121 		.doorbell = MHI_DB_BRST_ENABLE,	\
122 		.lpm_notify = false,			\
123 		.offload_channel = false,		\
124 		.doorbell_mode_switch = true,		\
125 	}						\
126 
127 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
128 	{						\
129 		.num = ch_num,				\
130 		.name = ch_name,			\
131 		.num_elements = el_count,		\
132 		.event_ring = ev_ring,			\
133 		.dir = DMA_FROM_DEVICE,			\
134 		.ee_mask = BIT(MHI_EE_AMSS),		\
135 		.pollcfg = 0,				\
136 		.doorbell = MHI_DB_BRST_ENABLE,	\
137 		.lpm_notify = false,			\
138 		.offload_channel = false,		\
139 		.doorbell_mode_switch = true,		\
140 	}
141 
142 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
143 	{						\
144 		.num = ch_num,				\
145 		.name = ch_name,			\
146 		.num_elements = el_count,		\
147 		.event_ring = ev_ring,			\
148 		.dir = DMA_TO_DEVICE,			\
149 		.ee_mask = BIT(MHI_EE_SBL),		\
150 		.pollcfg = 0,				\
151 		.doorbell = MHI_DB_BRST_DISABLE,	\
152 		.lpm_notify = false,			\
153 		.offload_channel = false,		\
154 		.doorbell_mode_switch = false,		\
155 	}						\
156 
157 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
158 	{						\
159 		.num = ch_num,				\
160 		.name = ch_name,			\
161 		.num_elements = el_count,		\
162 		.event_ring = ev_ring,			\
163 		.dir = DMA_FROM_DEVICE,			\
164 		.ee_mask = BIT(MHI_EE_SBL),		\
165 		.pollcfg = 0,				\
166 		.doorbell = MHI_DB_BRST_DISABLE,	\
167 		.lpm_notify = false,			\
168 		.offload_channel = false,		\
169 		.doorbell_mode_switch = false,		\
170 	}
171 
172 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
173 	{						\
174 		.num = ch_num,				\
175 		.name = ch_name,			\
176 		.num_elements = el_count,		\
177 		.event_ring = ev_ring,			\
178 		.dir = DMA_TO_DEVICE,			\
179 		.ee_mask = BIT(MHI_EE_FP),		\
180 		.pollcfg = 0,				\
181 		.doorbell = MHI_DB_BRST_DISABLE,	\
182 		.lpm_notify = false,			\
183 		.offload_channel = false,		\
184 		.doorbell_mode_switch = false,		\
185 	}						\
186 
187 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
188 	{						\
189 		.num = ch_num,				\
190 		.name = ch_name,			\
191 		.num_elements = el_count,		\
192 		.event_ring = ev_ring,			\
193 		.dir = DMA_FROM_DEVICE,			\
194 		.ee_mask = BIT(MHI_EE_FP),		\
195 		.pollcfg = 0,				\
196 		.doorbell = MHI_DB_BRST_DISABLE,	\
197 		.lpm_notify = false,			\
198 		.offload_channel = false,		\
199 		.doorbell_mode_switch = false,		\
200 	}
201 
202 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
203 	{					\
204 		.num_elements = el_count,	\
205 		.irq_moderation_ms = 5,		\
206 		.irq = (ev_ring) + 1,		\
207 		.priority = 1,			\
208 		.mode = MHI_DB_BRST_DISABLE,	\
209 		.data_type = MHI_ER_DATA,	\
210 		.hardware_event = false,	\
211 		.client_managed = false,	\
212 		.offload_channel = false,	\
213 	}
214 
215 #define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
216 	{					\
217 		.num_elements = el_count,	\
218 		.irq_moderation_ms = 0,		\
219 		.irq = (ev_ring) + 1,		\
220 		.priority = 1,			\
221 		.mode = MHI_DB_BRST_DISABLE,	\
222 		.data_type = MHI_ER_DATA,	\
223 		.hardware_event = false,	\
224 		.client_managed = false,	\
225 		.offload_channel = false,	\
226 	}
227 
228 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
229 	{					\
230 		.num_elements = el_count,	\
231 		.irq_moderation_ms = 1,		\
232 		.irq = (ev_ring) + 1,		\
233 		.priority = 1,			\
234 		.mode = MHI_DB_BRST_DISABLE,	\
235 		.data_type = MHI_ER_DATA,	\
236 		.hardware_event = true,		\
237 		.client_managed = false,	\
238 		.offload_channel = false,	\
239 		.channel = ch_num,		\
240 	}
241 
242 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
243 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
244 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
245 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
246 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
247 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
248 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
249 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
250 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
251 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
252 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
253 	MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
254 	MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
255 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
256 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
257 };
258 
259 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
260 	/* first ring is control+data ring */
261 	MHI_EVENT_CONFIG_CTRL(0, 64),
262 	/* DIAG dedicated event ring */
263 	MHI_EVENT_CONFIG_DATA(1, 128),
264 	/* Software channels dedicated event ring */
265 	MHI_EVENT_CONFIG_SW_DATA(2, 64),
266 	MHI_EVENT_CONFIG_SW_DATA(3, 64),
267 	/* Hardware channels request dedicated hardware event rings */
268 	MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
269 	MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
270 };
271 
272 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
273 	.max_channels = 128,
274 	.timeout_ms = 8000,
275 	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
276 	.ch_cfg = modem_qcom_v1_mhi_channels,
277 	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
278 	.event_cfg = modem_qcom_v1_mhi_events,
279 };
280 
281 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
282 	.name = "qcom-sdx65m",
283 	.fw = "qcom/sdx65m/xbl.elf",
284 	.edl = "qcom/sdx65m/edl.mbn",
285 	.config = &modem_qcom_v1_mhiv_config,
286 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
287 	.dma_data_width = 32,
288 	.sideband_wake = false,
289 };
290 
291 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
292 	.name = "qcom-sdx55m",
293 	.fw = "qcom/sdx55m/sbl1.mbn",
294 	.edl = "qcom/sdx55m/edl.mbn",
295 	.config = &modem_qcom_v1_mhiv_config,
296 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
297 	.dma_data_width = 32,
298 	.mru_default = 32768,
299 	.sideband_wake = false,
300 };
301 
302 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
303 	.name = "qcom-sdx24",
304 	.edl = "qcom/prog_firehose_sdx24.mbn",
305 	.config = &modem_qcom_v1_mhiv_config,
306 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
307 	.dma_data_width = 32,
308 	.sideband_wake = true,
309 };
310 
311 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
312 	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
313 	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
314 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
315 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
316 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
317 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
318 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
319 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
320 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
321 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
322 	/* The EDL firmware is a flash-programmer exposing firehose protocol */
323 	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
324 	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
325 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
326 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
327 };
328 
329 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
330 	MHI_EVENT_CONFIG_CTRL(0, 128),
331 	MHI_EVENT_CONFIG_DATA(1, 128),
332 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
333 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
334 };
335 
336 static const struct mhi_controller_config modem_quectel_em1xx_config = {
337 	.max_channels = 128,
338 	.timeout_ms = 20000,
339 	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
340 	.ch_cfg = mhi_quectel_em1xx_channels,
341 	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
342 	.event_cfg = mhi_quectel_em1xx_events,
343 };
344 
345 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
346 	.name = "quectel-em1xx",
347 	.edl = "qcom/prog_firehose_sdx24.mbn",
348 	.config = &modem_quectel_em1xx_config,
349 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
350 	.dma_data_width = 32,
351 	.mru_default = 32768,
352 	.sideband_wake = true,
353 };
354 
355 static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
356 	.name = "quectel-rm5xx",
357 	.edl = "qcom/prog_firehose_sdx6x.elf",
358 	.config = &modem_quectel_em1xx_config,
359 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
360 	.dma_data_width = 32,
361 	.mru_default = 32768,
362 	.sideband_wake = true,
363 };
364 
365 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
366 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
367 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
368 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
369 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
370 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
371 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
372 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
373 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
374 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
375 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
376 };
377 
378 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
379 	MHI_EVENT_CONFIG_CTRL(0, 128),
380 	MHI_EVENT_CONFIG_DATA(1, 128),
381 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
382 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
383 };
384 
385 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
386 	.max_channels = 128,
387 	.timeout_ms = 20000,
388 	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
389 	.ch_cfg = mhi_foxconn_sdx55_channels,
390 	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
391 	.event_cfg = mhi_foxconn_sdx55_events,
392 };
393 
394 static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
395 	.name = "foxconn-sdx24",
396 	.config = &modem_foxconn_sdx55_config,
397 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
398 	.dma_data_width = 32,
399 	.mru_default = 32768,
400 	.sideband_wake = false,
401 };
402 
403 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
404 	.name = "foxconn-sdx55",
405 	.fw = "qcom/sdx55m/sbl1.mbn",
406 	.edl = "qcom/sdx55m/edl.mbn",
407 	.config = &modem_foxconn_sdx55_config,
408 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
409 	.dma_data_width = 32,
410 	.mru_default = 32768,
411 	.sideband_wake = false,
412 };
413 
414 static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
415 	.name = "foxconn-sdx65",
416 	.config = &modem_foxconn_sdx55_config,
417 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
418 	.dma_data_width = 32,
419 	.mru_default = 32768,
420 	.sideband_wake = false,
421 };
422 
423 static const struct mhi_channel_config mhi_mv3x_channels[] = {
424 	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
425 	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
426 	/* MBIM Control Channel */
427 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
428 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
429 	/* MBIM Data Channel */
430 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
431 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
432 };
433 
434 static struct mhi_event_config mhi_mv3x_events[] = {
435 	MHI_EVENT_CONFIG_CTRL(0, 256),
436 	MHI_EVENT_CONFIG_DATA(1, 256),
437 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
438 	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
439 };
440 
441 static const struct mhi_controller_config modem_mv3x_config = {
442 	.max_channels = 128,
443 	.timeout_ms = 20000,
444 	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
445 	.ch_cfg = mhi_mv3x_channels,
446 	.num_events = ARRAY_SIZE(mhi_mv3x_events),
447 	.event_cfg = mhi_mv3x_events,
448 };
449 
450 static const struct mhi_pci_dev_info mhi_mv31_info = {
451 	.name = "cinterion-mv31",
452 	.config = &modem_mv3x_config,
453 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
454 	.dma_data_width = 32,
455 	.mru_default = 32768,
456 };
457 
458 static const struct mhi_pci_dev_info mhi_mv32_info = {
459 	.name = "cinterion-mv32",
460 	.config = &modem_mv3x_config,
461 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
462 	.dma_data_width = 32,
463 	.mru_default = 32768,
464 };
465 
466 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
467 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
468 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
469 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
470 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
471 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
472 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
473 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
474 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
475 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
476 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
477 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
478 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
479 };
480 
481 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
482 	/* first ring is control+data and DIAG ring */
483 	MHI_EVENT_CONFIG_CTRL(0, 2048),
484 	/* Hardware channels request dedicated hardware event rings */
485 	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
486 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
487 };
488 
489 static const struct mhi_controller_config modem_sierra_em919x_config = {
490 	.max_channels = 128,
491 	.timeout_ms = 24000,
492 	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
493 	.ch_cfg = mhi_sierra_em919x_channels,
494 	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
495 	.event_cfg = modem_sierra_em919x_mhi_events,
496 };
497 
498 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
499 	.name = "sierra-em919x",
500 	.config = &modem_sierra_em919x_config,
501 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
502 	.dma_data_width = 32,
503 	.sideband_wake = false,
504 };
505 
506 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
507 	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
508 	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
509 	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
510 	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
511 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
512 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
513 };
514 
515 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
516 	MHI_EVENT_CONFIG_CTRL(0, 128),
517 	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
518 	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
519 };
520 
521 static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
522 	.max_channels = 128,
523 	.timeout_ms = 20000,
524 	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
525 	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
526 	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
527 	.event_cfg = mhi_telit_fn980_hw_v1_events,
528 };
529 
530 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
531 	.name = "telit-fn980-hwv1",
532 	.fw = "qcom/sdx55m/sbl1.mbn",
533 	.edl = "qcom/sdx55m/edl.mbn",
534 	.config = &modem_telit_fn980_hw_v1_config,
535 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
536 	.dma_data_width = 32,
537 	.mru_default = 32768,
538 	.sideband_wake = false,
539 };
540 
541 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
542 	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
543 	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
544 	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
545 	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
546 	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
547 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
548 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
549 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
550 	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
551 	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
552 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
553 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
554 };
555 
556 static struct mhi_event_config mhi_telit_fn990_events[] = {
557 	MHI_EVENT_CONFIG_CTRL(0, 128),
558 	MHI_EVENT_CONFIG_DATA(1, 128),
559 	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
560 	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
561 };
562 
563 static const struct mhi_controller_config modem_telit_fn990_config = {
564 	.max_channels = 128,
565 	.timeout_ms = 20000,
566 	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
567 	.ch_cfg = mhi_telit_fn990_channels,
568 	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
569 	.event_cfg = mhi_telit_fn990_events,
570 };
571 
572 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
573 	.name = "telit-fn990",
574 	.config = &modem_telit_fn990_config,
575 	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
576 	.dma_data_width = 32,
577 	.sideband_wake = false,
578 	.mru_default = 32768,
579 };
580 
581 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
582 static const struct pci_device_id mhi_pci_id_table[] = {
583 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
584 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
585 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
586 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
587 	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
588 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
589 		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
590 	/* Telit FN980 hardware revision v1 */
591 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
592 		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
593 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
594 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
595 	/* Telit FN990 */
596 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
597 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
598 	/* Telit FE990 */
599 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
600 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
601 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
602 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
603 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
604 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
605 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
606 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
607 	/* RM520N-GL (sdx6x), eSIM */
608 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
609 		.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
610 	/* RM520N-GL (sdx6x), Lenovo variant */
611 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
612 		.driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
613 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
614 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
615 	{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
616 		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
617 	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
618 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
619 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
620 	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
621 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
622 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
623 	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
624 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
625 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
626 	/* T99W175 (sdx55), Based on Qualcomm new baseline */
627 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
628 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
629 	/* T99W175 (sdx55) */
630 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
631 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
632 	/* T99W368 (sdx65) */
633 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
634 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
635 	/* T99W373 (sdx62) */
636 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
637 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
638 	/* T99W510 (sdx24), variant 1 */
639 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
640 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
641 	/* T99W510 (sdx24), variant 2 */
642 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
643 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
644 	/* T99W510 (sdx24), variant 3 */
645 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
646 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
647 	/* DW5932e-eSIM (sdx62), With eSIM */
648 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
649 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
650 	/* DW5932e (sdx62), Non-eSIM */
651 	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
652 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
653 	/* MV31-W (Cinterion) */
654 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
655 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
656 	/* MV31-W (Cinterion), based on new baseline */
657 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
658 		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
659 	/* MV32-WA (Cinterion) */
660 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
661 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
662 	/* MV32-WB (Cinterion) */
663 	{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
664 		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
665 	/* T99W175 (sdx55), HP variant */
666 	{ PCI_DEVICE(0x03f0, 0x0a6c),
667 		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
668 	{  }
669 };
670 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
671 
672 enum mhi_pci_device_status {
673 	MHI_PCI_DEV_STARTED,
674 	MHI_PCI_DEV_SUSPENDED,
675 };
676 
677 struct mhi_pci_device {
678 	struct mhi_controller mhi_cntrl;
679 	struct pci_saved_state *pci_state;
680 	struct work_struct recovery_work;
681 	struct timer_list health_check_timer;
682 	unsigned long status;
683 };
684 
685 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
686 			    void __iomem *addr, u32 *out)
687 {
688 	*out = readl(addr);
689 	return 0;
690 }
691 
692 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
693 			      void __iomem *addr, u32 val)
694 {
695 	writel(val, addr);
696 }
697 
698 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
699 			      enum mhi_callback cb)
700 {
701 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
702 
703 	/* Nothing to do for now */
704 	switch (cb) {
705 	case MHI_CB_FATAL_ERROR:
706 	case MHI_CB_SYS_ERROR:
707 		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
708 		pm_runtime_forbid(&pdev->dev);
709 		break;
710 	case MHI_CB_EE_MISSION_MODE:
711 		pm_runtime_allow(&pdev->dev);
712 		break;
713 	default:
714 		break;
715 	}
716 }
717 
718 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
719 {
720 	/* no-op */
721 }
722 
723 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
724 {
725 	/* no-op */
726 }
727 
728 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
729 {
730 	/* no-op */
731 }
732 
733 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
734 {
735 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
736 	u16 vendor = 0;
737 
738 	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
739 		return false;
740 
741 	if (vendor == (u16) ~0 || vendor == 0)
742 		return false;
743 
744 	return true;
745 }
746 
747 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
748 			 unsigned int bar_num, u64 dma_mask)
749 {
750 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
751 	int err;
752 
753 	err = pci_assign_resource(pdev, bar_num);
754 	if (err)
755 		return err;
756 
757 	err = pcim_enable_device(pdev);
758 	if (err) {
759 		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
760 		return err;
761 	}
762 
763 	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
764 	if (err) {
765 		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
766 		return err;
767 	}
768 	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
769 	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
770 
771 	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
772 	if (err) {
773 		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
774 		return err;
775 	}
776 
777 	pci_set_master(pdev);
778 
779 	return 0;
780 }
781 
782 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
783 			    const struct mhi_controller_config *mhi_cntrl_config)
784 {
785 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
786 	int nr_vectors, i;
787 	int *irq;
788 
789 	/*
790 	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
791 	 * No explicit pci_free_irq_vectors required, done by pcim_release.
792 	 */
793 	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
794 
795 	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
796 	if (nr_vectors < 0) {
797 		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
798 			nr_vectors);
799 		return nr_vectors;
800 	}
801 
802 	if (nr_vectors < mhi_cntrl->nr_irqs) {
803 		dev_warn(&pdev->dev, "using shared MSI\n");
804 
805 		/* Patch msi vectors, use only one (shared) */
806 		for (i = 0; i < mhi_cntrl_config->num_events; i++)
807 			mhi_cntrl_config->event_cfg[i].irq = 0;
808 		mhi_cntrl->nr_irqs = 1;
809 	}
810 
811 	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
812 	if (!irq)
813 		return -ENOMEM;
814 
815 	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
816 		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
817 
818 		irq[i] = pci_irq_vector(pdev, vector);
819 	}
820 
821 	mhi_cntrl->irq = irq;
822 
823 	return 0;
824 }
825 
826 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
827 {
828 	/* The runtime_get() MHI callback means:
829 	 *    Do whatever is requested to leave M3.
830 	 */
831 	return pm_runtime_get(mhi_cntrl->cntrl_dev);
832 }
833 
834 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
835 {
836 	/* The runtime_put() MHI callback means:
837 	 *    Device can be moved in M3 state.
838 	 */
839 	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
840 	pm_runtime_put(mhi_cntrl->cntrl_dev);
841 }
842 
843 static void mhi_pci_recovery_work(struct work_struct *work)
844 {
845 	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
846 						       recovery_work);
847 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
848 	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
849 	int err;
850 
851 	dev_warn(&pdev->dev, "device recovery started\n");
852 
853 	del_timer(&mhi_pdev->health_check_timer);
854 	pm_runtime_forbid(&pdev->dev);
855 
856 	/* Clean up MHI state */
857 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
858 		mhi_power_down(mhi_cntrl, false);
859 		mhi_unprepare_after_power_down(mhi_cntrl);
860 	}
861 
862 	pci_set_power_state(pdev, PCI_D0);
863 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
864 	pci_restore_state(pdev);
865 
866 	if (!mhi_pci_is_alive(mhi_cntrl))
867 		goto err_try_reset;
868 
869 	err = mhi_prepare_for_power_up(mhi_cntrl);
870 	if (err)
871 		goto err_try_reset;
872 
873 	err = mhi_sync_power_up(mhi_cntrl);
874 	if (err)
875 		goto err_unprepare;
876 
877 	dev_dbg(&pdev->dev, "Recovery completed\n");
878 
879 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
880 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
881 	return;
882 
883 err_unprepare:
884 	mhi_unprepare_after_power_down(mhi_cntrl);
885 err_try_reset:
886 	if (pci_reset_function(pdev))
887 		dev_err(&pdev->dev, "Recovery failed\n");
888 }
889 
890 static void health_check(struct timer_list *t)
891 {
892 	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
893 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
894 
895 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
896 			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
897 		return;
898 
899 	if (!mhi_pci_is_alive(mhi_cntrl)) {
900 		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
901 		queue_work(system_long_wq, &mhi_pdev->recovery_work);
902 		return;
903 	}
904 
905 	/* reschedule in two seconds */
906 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
907 }
908 
909 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
910 {
911 	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
912 	const struct mhi_controller_config *mhi_cntrl_config;
913 	struct mhi_pci_device *mhi_pdev;
914 	struct mhi_controller *mhi_cntrl;
915 	int err;
916 
917 	dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
918 
919 	/* mhi_pdev.mhi_cntrl must be zero-initialized */
920 	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
921 	if (!mhi_pdev)
922 		return -ENOMEM;
923 
924 	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
925 	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
926 
927 	mhi_cntrl_config = info->config;
928 	mhi_cntrl = &mhi_pdev->mhi_cntrl;
929 
930 	mhi_cntrl->cntrl_dev = &pdev->dev;
931 	mhi_cntrl->iova_start = 0;
932 	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
933 	mhi_cntrl->fw_image = info->fw;
934 	mhi_cntrl->edl_image = info->edl;
935 
936 	mhi_cntrl->read_reg = mhi_pci_read_reg;
937 	mhi_cntrl->write_reg = mhi_pci_write_reg;
938 	mhi_cntrl->status_cb = mhi_pci_status_cb;
939 	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
940 	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
941 	mhi_cntrl->mru = info->mru_default;
942 
943 	if (info->sideband_wake) {
944 		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
945 		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
946 		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
947 	}
948 
949 	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
950 	if (err)
951 		return err;
952 
953 	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
954 	if (err)
955 		return err;
956 
957 	pci_set_drvdata(pdev, mhi_pdev);
958 
959 	/* Have stored pci confspace at hand for restore in sudden PCI error.
960 	 * cache the state locally and discard the PCI core one.
961 	 */
962 	pci_save_state(pdev);
963 	mhi_pdev->pci_state = pci_store_saved_state(pdev);
964 	pci_load_saved_state(pdev, NULL);
965 
966 	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
967 	if (err)
968 		return err;
969 
970 	/* MHI bus does not power up the controller by default */
971 	err = mhi_prepare_for_power_up(mhi_cntrl);
972 	if (err) {
973 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
974 		goto err_unregister;
975 	}
976 
977 	err = mhi_sync_power_up(mhi_cntrl);
978 	if (err) {
979 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
980 		goto err_unprepare;
981 	}
982 
983 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
984 
985 	/* start health check */
986 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
987 
988 	/* Only allow runtime-suspend if PME capable (for wakeup) */
989 	if (pci_pme_capable(pdev, PCI_D3hot)) {
990 		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
991 		pm_runtime_use_autosuspend(&pdev->dev);
992 		pm_runtime_mark_last_busy(&pdev->dev);
993 		pm_runtime_put_noidle(&pdev->dev);
994 	}
995 
996 	return 0;
997 
998 err_unprepare:
999 	mhi_unprepare_after_power_down(mhi_cntrl);
1000 err_unregister:
1001 	mhi_unregister_controller(mhi_cntrl);
1002 
1003 	return err;
1004 }
1005 
1006 static void mhi_pci_remove(struct pci_dev *pdev)
1007 {
1008 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1009 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1010 
1011 	del_timer_sync(&mhi_pdev->health_check_timer);
1012 	cancel_work_sync(&mhi_pdev->recovery_work);
1013 
1014 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1015 		mhi_power_down(mhi_cntrl, true);
1016 		mhi_unprepare_after_power_down(mhi_cntrl);
1017 	}
1018 
1019 	/* balancing probe put_noidle */
1020 	if (pci_pme_capable(pdev, PCI_D3hot))
1021 		pm_runtime_get_noresume(&pdev->dev);
1022 
1023 	mhi_unregister_controller(mhi_cntrl);
1024 }
1025 
1026 static void mhi_pci_shutdown(struct pci_dev *pdev)
1027 {
1028 	mhi_pci_remove(pdev);
1029 	pci_set_power_state(pdev, PCI_D3hot);
1030 }
1031 
1032 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
1033 {
1034 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1035 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1036 
1037 	dev_info(&pdev->dev, "reset\n");
1038 
1039 	del_timer(&mhi_pdev->health_check_timer);
1040 
1041 	/* Clean up MHI state */
1042 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1043 		mhi_power_down(mhi_cntrl, false);
1044 		mhi_unprepare_after_power_down(mhi_cntrl);
1045 	}
1046 
1047 	/* cause internal device reset */
1048 	mhi_soc_reset(mhi_cntrl);
1049 
1050 	/* Be sure device reset has been executed */
1051 	msleep(MHI_POST_RESET_DELAY_MS);
1052 }
1053 
1054 static void mhi_pci_reset_done(struct pci_dev *pdev)
1055 {
1056 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1057 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1058 	int err;
1059 
1060 	/* Restore initial known working PCI state */
1061 	pci_load_saved_state(pdev, mhi_pdev->pci_state);
1062 	pci_restore_state(pdev);
1063 
1064 	/* Is device status available ? */
1065 	if (!mhi_pci_is_alive(mhi_cntrl)) {
1066 		dev_err(&pdev->dev, "reset failed\n");
1067 		return;
1068 	}
1069 
1070 	err = mhi_prepare_for_power_up(mhi_cntrl);
1071 	if (err) {
1072 		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1073 		return;
1074 	}
1075 
1076 	err = mhi_sync_power_up(mhi_cntrl);
1077 	if (err) {
1078 		dev_err(&pdev->dev, "failed to power up MHI controller\n");
1079 		mhi_unprepare_after_power_down(mhi_cntrl);
1080 		return;
1081 	}
1082 
1083 	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1084 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1085 }
1086 
1087 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1088 					       pci_channel_state_t state)
1089 {
1090 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1091 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1092 
1093 	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1094 
1095 	if (state == pci_channel_io_perm_failure)
1096 		return PCI_ERS_RESULT_DISCONNECT;
1097 
1098 	/* Clean up MHI state */
1099 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1100 		mhi_power_down(mhi_cntrl, false);
1101 		mhi_unprepare_after_power_down(mhi_cntrl);
1102 	} else {
1103 		/* Nothing to do */
1104 		return PCI_ERS_RESULT_RECOVERED;
1105 	}
1106 
1107 	pci_disable_device(pdev);
1108 
1109 	return PCI_ERS_RESULT_NEED_RESET;
1110 }
1111 
1112 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1113 {
1114 	if (pci_enable_device(pdev)) {
1115 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1116 		return PCI_ERS_RESULT_DISCONNECT;
1117 	}
1118 
1119 	return PCI_ERS_RESULT_RECOVERED;
1120 }
1121 
1122 static void mhi_pci_io_resume(struct pci_dev *pdev)
1123 {
1124 	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1125 
1126 	dev_err(&pdev->dev, "PCI slot reset done\n");
1127 
1128 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1129 }
1130 
1131 static const struct pci_error_handlers mhi_pci_err_handler = {
1132 	.error_detected = mhi_pci_error_detected,
1133 	.slot_reset = mhi_pci_slot_reset,
1134 	.resume = mhi_pci_io_resume,
1135 	.reset_prepare = mhi_pci_reset_prepare,
1136 	.reset_done = mhi_pci_reset_done,
1137 };
1138 
1139 static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1140 {
1141 	struct pci_dev *pdev = to_pci_dev(dev);
1142 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1143 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1144 	int err;
1145 
1146 	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1147 		return 0;
1148 
1149 	del_timer(&mhi_pdev->health_check_timer);
1150 	cancel_work_sync(&mhi_pdev->recovery_work);
1151 
1152 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1153 			mhi_cntrl->ee != MHI_EE_AMSS)
1154 		goto pci_suspend; /* Nothing to do at MHI level */
1155 
1156 	/* Transition to M3 state */
1157 	err = mhi_pm_suspend(mhi_cntrl);
1158 	if (err) {
1159 		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1160 		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1161 		return -EBUSY;
1162 	}
1163 
1164 pci_suspend:
1165 	pci_disable_device(pdev);
1166 	pci_wake_from_d3(pdev, true);
1167 
1168 	return 0;
1169 }
1170 
1171 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1172 {
1173 	struct pci_dev *pdev = to_pci_dev(dev);
1174 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1175 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1176 	int err;
1177 
1178 	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1179 		return 0;
1180 
1181 	err = pci_enable_device(pdev);
1182 	if (err)
1183 		goto err_recovery;
1184 
1185 	pci_set_master(pdev);
1186 	pci_wake_from_d3(pdev, false);
1187 
1188 	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1189 			mhi_cntrl->ee != MHI_EE_AMSS)
1190 		return 0; /* Nothing to do at MHI level */
1191 
1192 	/* Exit M3, transition to M0 state */
1193 	err = mhi_pm_resume(mhi_cntrl);
1194 	if (err) {
1195 		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1196 		goto err_recovery;
1197 	}
1198 
1199 	/* Resume health check */
1200 	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1201 
1202 	/* It can be a remote wakeup (no mhi runtime_get), update access time */
1203 	pm_runtime_mark_last_busy(dev);
1204 
1205 	return 0;
1206 
1207 err_recovery:
1208 	/* Do not fail to not mess up our PCI device state, the device likely
1209 	 * lost power (d3cold) and we simply need to reset it from the recovery
1210 	 * procedure, trigger the recovery asynchronously to prevent system
1211 	 * suspend exit delaying.
1212 	 */
1213 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1214 	pm_runtime_mark_last_busy(dev);
1215 
1216 	return 0;
1217 }
1218 
1219 static int  __maybe_unused mhi_pci_suspend(struct device *dev)
1220 {
1221 	pm_runtime_disable(dev);
1222 	return mhi_pci_runtime_suspend(dev);
1223 }
1224 
1225 static int __maybe_unused mhi_pci_resume(struct device *dev)
1226 {
1227 	int ret;
1228 
1229 	/* Depending the platform, device may have lost power (d3cold), we need
1230 	 * to resume it now to check its state and recover when necessary.
1231 	 */
1232 	ret = mhi_pci_runtime_resume(dev);
1233 	pm_runtime_enable(dev);
1234 
1235 	return ret;
1236 }
1237 
1238 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1239 {
1240 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1241 	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1242 
1243 	/* We want to stop all operations, hibernation does not guarantee that
1244 	 * device will be in the same state as before freezing, especially if
1245 	 * the intermediate restore kernel reinitializes MHI device with new
1246 	 * context.
1247 	 */
1248 	flush_work(&mhi_pdev->recovery_work);
1249 	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1250 		mhi_power_down(mhi_cntrl, true);
1251 		mhi_unprepare_after_power_down(mhi_cntrl);
1252 	}
1253 
1254 	return 0;
1255 }
1256 
1257 static int __maybe_unused mhi_pci_restore(struct device *dev)
1258 {
1259 	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1260 
1261 	/* Reinitialize the device */
1262 	queue_work(system_long_wq, &mhi_pdev->recovery_work);
1263 
1264 	return 0;
1265 }
1266 
1267 static const struct dev_pm_ops mhi_pci_pm_ops = {
1268 	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1269 #ifdef CONFIG_PM_SLEEP
1270 	.suspend = mhi_pci_suspend,
1271 	.resume = mhi_pci_resume,
1272 	.freeze = mhi_pci_freeze,
1273 	.thaw = mhi_pci_restore,
1274 	.poweroff = mhi_pci_freeze,
1275 	.restore = mhi_pci_restore,
1276 #endif
1277 };
1278 
1279 static struct pci_driver mhi_pci_driver = {
1280 	.name		= "mhi-pci-generic",
1281 	.id_table	= mhi_pci_id_table,
1282 	.probe		= mhi_pci_probe,
1283 	.remove		= mhi_pci_remove,
1284 	.shutdown	= mhi_pci_shutdown,
1285 	.err_handler	= &mhi_pci_err_handler,
1286 	.driver.pm	= &mhi_pci_pm_ops
1287 };
1288 module_pci_driver(mhi_pci_driver);
1289 
1290 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1291 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1292 MODULE_LICENSE("GPL");
1293