1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MHI PCI driver - MHI over PCI controller driver
4 *
5 * This module is a generic driver for registering MHI-over-PCI devices,
6 * such as PCIe QCOM modems.
7 *
8 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
9 */
10
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
19
20 #define MHI_PCI_DEFAULT_BAR_NUM 0
21
22 #define MHI_POST_RESET_DELAY_MS 2000
23
24 #define HEALTH_CHECK_PERIOD (HZ * 2)
25
26 /* PCI VID definitions */
27 #define PCI_VENDOR_ID_THALES 0x1269
28 #define PCI_VENDOR_ID_QUECTEL 0x1eac
29 #define PCI_VENDOR_ID_NETPRISMA 0x203e
30
31 #define MHI_EDL_DB 91
32 #define MHI_EDL_COOKIE 0xEDEDEDED
33
34 /**
35 * struct mhi_pci_dev_info - MHI PCI device specific information
36 * @config: MHI controller configuration
37 * @name: name of the PCI module
38 * @fw: firmware path (if any)
39 * @edl: emergency download mode firmware path (if any)
40 * @edl_trigger: capable of triggering EDL mode in the device (if supported)
41 * @bar_num: PCI base address register to use for MHI MMIO register space
42 * @dma_data_width: DMA transfer word size (32 or 64 bits)
43 * @mru_default: default MRU size for MBIM network packets
44 * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
45 * of inband wake support (such as sdx24)
46 */
47 struct mhi_pci_dev_info {
48 const struct mhi_controller_config *config;
49 const char *name;
50 const char *fw;
51 const char *edl;
52 bool edl_trigger;
53 unsigned int bar_num;
54 unsigned int dma_data_width;
55 unsigned int mru_default;
56 bool sideband_wake;
57 };
58
59 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
60 { \
61 .num = ch_num, \
62 .name = ch_name, \
63 .num_elements = el_count, \
64 .event_ring = ev_ring, \
65 .dir = DMA_TO_DEVICE, \
66 .ee_mask = BIT(MHI_EE_AMSS), \
67 .pollcfg = 0, \
68 .doorbell = MHI_DB_BRST_DISABLE, \
69 .lpm_notify = false, \
70 .offload_channel = false, \
71 .doorbell_mode_switch = false, \
72 } \
73
74 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
75 { \
76 .num = ch_num, \
77 .name = ch_name, \
78 .num_elements = el_count, \
79 .event_ring = ev_ring, \
80 .dir = DMA_FROM_DEVICE, \
81 .ee_mask = BIT(MHI_EE_AMSS), \
82 .pollcfg = 0, \
83 .doorbell = MHI_DB_BRST_DISABLE, \
84 .lpm_notify = false, \
85 .offload_channel = false, \
86 .doorbell_mode_switch = false, \
87 }
88
89 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
90 { \
91 .num = ch_num, \
92 .name = ch_name, \
93 .num_elements = el_count, \
94 .event_ring = ev_ring, \
95 .dir = DMA_FROM_DEVICE, \
96 .ee_mask = BIT(MHI_EE_AMSS), \
97 .pollcfg = 0, \
98 .doorbell = MHI_DB_BRST_DISABLE, \
99 .lpm_notify = false, \
100 .offload_channel = false, \
101 .doorbell_mode_switch = false, \
102 .auto_queue = true, \
103 }
104
105 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
106 { \
107 .num_elements = el_count, \
108 .irq_moderation_ms = 0, \
109 .irq = (ev_ring) + 1, \
110 .priority = 1, \
111 .mode = MHI_DB_BRST_DISABLE, \
112 .data_type = MHI_ER_CTRL, \
113 .hardware_event = false, \
114 .client_managed = false, \
115 .offload_channel = false, \
116 }
117
118 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
119 { \
120 .num = ch_num, \
121 .name = ch_name, \
122 .num_elements = el_count, \
123 .event_ring = ev_ring, \
124 .dir = DMA_TO_DEVICE, \
125 .ee_mask = BIT(MHI_EE_AMSS), \
126 .pollcfg = 0, \
127 .doorbell = MHI_DB_BRST_ENABLE, \
128 .lpm_notify = false, \
129 .offload_channel = false, \
130 .doorbell_mode_switch = true, \
131 } \
132
133 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
134 { \
135 .num = ch_num, \
136 .name = ch_name, \
137 .num_elements = el_count, \
138 .event_ring = ev_ring, \
139 .dir = DMA_FROM_DEVICE, \
140 .ee_mask = BIT(MHI_EE_AMSS), \
141 .pollcfg = 0, \
142 .doorbell = MHI_DB_BRST_ENABLE, \
143 .lpm_notify = false, \
144 .offload_channel = false, \
145 .doorbell_mode_switch = true, \
146 }
147
148 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
149 { \
150 .num = ch_num, \
151 .name = ch_name, \
152 .num_elements = el_count, \
153 .event_ring = ev_ring, \
154 .dir = DMA_TO_DEVICE, \
155 .ee_mask = BIT(MHI_EE_SBL), \
156 .pollcfg = 0, \
157 .doorbell = MHI_DB_BRST_DISABLE, \
158 .lpm_notify = false, \
159 .offload_channel = false, \
160 .doorbell_mode_switch = false, \
161 } \
162
163 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
164 { \
165 .num = ch_num, \
166 .name = ch_name, \
167 .num_elements = el_count, \
168 .event_ring = ev_ring, \
169 .dir = DMA_FROM_DEVICE, \
170 .ee_mask = BIT(MHI_EE_SBL), \
171 .pollcfg = 0, \
172 .doorbell = MHI_DB_BRST_DISABLE, \
173 .lpm_notify = false, \
174 .offload_channel = false, \
175 .doorbell_mode_switch = false, \
176 }
177
178 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
179 { \
180 .num = ch_num, \
181 .name = ch_name, \
182 .num_elements = el_count, \
183 .event_ring = ev_ring, \
184 .dir = DMA_TO_DEVICE, \
185 .ee_mask = BIT(MHI_EE_FP), \
186 .pollcfg = 0, \
187 .doorbell = MHI_DB_BRST_DISABLE, \
188 .lpm_notify = false, \
189 .offload_channel = false, \
190 .doorbell_mode_switch = false, \
191 } \
192
193 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
194 { \
195 .num = ch_num, \
196 .name = ch_name, \
197 .num_elements = el_count, \
198 .event_ring = ev_ring, \
199 .dir = DMA_FROM_DEVICE, \
200 .ee_mask = BIT(MHI_EE_FP), \
201 .pollcfg = 0, \
202 .doorbell = MHI_DB_BRST_DISABLE, \
203 .lpm_notify = false, \
204 .offload_channel = false, \
205 .doorbell_mode_switch = false, \
206 }
207
208 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
209 { \
210 .num_elements = el_count, \
211 .irq_moderation_ms = 5, \
212 .irq = (ev_ring) + 1, \
213 .priority = 1, \
214 .mode = MHI_DB_BRST_DISABLE, \
215 .data_type = MHI_ER_DATA, \
216 .hardware_event = false, \
217 .client_managed = false, \
218 .offload_channel = false, \
219 }
220
221 #define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
222 { \
223 .num_elements = el_count, \
224 .irq_moderation_ms = 0, \
225 .irq = (ev_ring) + 1, \
226 .priority = 1, \
227 .mode = MHI_DB_BRST_DISABLE, \
228 .data_type = MHI_ER_DATA, \
229 .hardware_event = false, \
230 .client_managed = false, \
231 .offload_channel = false, \
232 }
233
234 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
235 { \
236 .num_elements = el_count, \
237 .irq_moderation_ms = 1, \
238 .irq = (ev_ring) + 1, \
239 .priority = 1, \
240 .mode = MHI_DB_BRST_DISABLE, \
241 .data_type = MHI_ER_DATA, \
242 .hardware_event = true, \
243 .client_managed = false, \
244 .offload_channel = false, \
245 .channel = ch_num, \
246 }
247
248 static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
249 MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
250 MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
251 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
252 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
253 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
254 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
255 MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
256 MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
257 MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
258 MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
259 MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
260 MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
261 MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
262 MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
263 MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
264 };
265
266 static struct mhi_event_config mhi_qcom_qdu100_events[] = {
267 /* first ring is control+data ring */
268 MHI_EVENT_CONFIG_CTRL(0, 64),
269 /* SAHARA dedicated event ring */
270 MHI_EVENT_CONFIG_SW_DATA(1, 256),
271 /* Software channels dedicated event ring */
272 MHI_EVENT_CONFIG_SW_DATA(2, 64),
273 MHI_EVENT_CONFIG_SW_DATA(3, 256),
274 MHI_EVENT_CONFIG_SW_DATA(4, 256),
275 /* Software IP channels dedicated event ring */
276 MHI_EVENT_CONFIG_SW_DATA(5, 512),
277 MHI_EVENT_CONFIG_SW_DATA(6, 512),
278 MHI_EVENT_CONFIG_SW_DATA(7, 512),
279 };
280
281 static const struct mhi_controller_config mhi_qcom_qdu100_config = {
282 .max_channels = 128,
283 .timeout_ms = 120000,
284 .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
285 .ch_cfg = mhi_qcom_qdu100_channels,
286 .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
287 .event_cfg = mhi_qcom_qdu100_events,
288 };
289
290 static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
291 .name = "qcom-qdu100",
292 .fw = "qcom/qdu100/xbl_s.melf",
293 .edl_trigger = true,
294 .config = &mhi_qcom_qdu100_config,
295 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
296 .dma_data_width = 32,
297 .sideband_wake = false,
298 };
299
300 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
301 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
302 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
303 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
304 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
305 MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
306 MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
307 MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
308 MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
309 MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
310 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
311 MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
312 MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
313 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
314 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
315 };
316
317 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
318 /* first ring is control+data ring */
319 MHI_EVENT_CONFIG_CTRL(0, 64),
320 /* DIAG dedicated event ring */
321 MHI_EVENT_CONFIG_DATA(1, 128),
322 /* Software channels dedicated event ring */
323 MHI_EVENT_CONFIG_SW_DATA(2, 64),
324 MHI_EVENT_CONFIG_SW_DATA(3, 64),
325 /* Hardware channels request dedicated hardware event rings */
326 MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
327 MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
328 };
329
330 static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
331 .max_channels = 128,
332 .timeout_ms = 8000,
333 .ready_timeout_ms = 50000,
334 .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
335 .ch_cfg = modem_qcom_v1_mhi_channels,
336 .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
337 .event_cfg = modem_qcom_v1_mhi_events,
338 };
339
340 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
341 .max_channels = 128,
342 .timeout_ms = 8000,
343 .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
344 .ch_cfg = modem_qcom_v1_mhi_channels,
345 .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
346 .event_cfg = modem_qcom_v1_mhi_events,
347 };
348
349 static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
350 .name = "qcom-sdx75m",
351 .fw = "qcom/sdx75m/xbl.elf",
352 .edl = "qcom/sdx75m/edl.mbn",
353 .edl_trigger = true,
354 .config = &modem_qcom_v2_mhiv_config,
355 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
356 .dma_data_width = 32,
357 .sideband_wake = false,
358 };
359
360 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
361 .name = "qcom-sdx65m",
362 .fw = "qcom/sdx65m/xbl.elf",
363 .edl = "qcom/sdx65m/edl.mbn",
364 .edl_trigger = true,
365 .config = &modem_qcom_v1_mhiv_config,
366 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
367 .dma_data_width = 32,
368 .sideband_wake = false,
369 };
370
371 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
372 .name = "qcom-sdx55m",
373 .fw = "qcom/sdx55m/sbl1.mbn",
374 .edl = "qcom/sdx55m/edl.mbn",
375 .edl_trigger = true,
376 .config = &modem_qcom_v1_mhiv_config,
377 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
378 .dma_data_width = 32,
379 .mru_default = 32768,
380 .sideband_wake = false,
381 };
382
383 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
384 .name = "qcom-sdx24",
385 .edl = "qcom/prog_firehose_sdx24.mbn",
386 .config = &modem_qcom_v1_mhiv_config,
387 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
388 .dma_data_width = 32,
389 .sideband_wake = true,
390 };
391
392 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
393 MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
394 MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
395 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
396 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
397 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
398 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
399 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
400 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
401 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
402 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
403 /* The EDL firmware is a flash-programmer exposing firehose protocol */
404 MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
405 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
406 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
407 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
408 };
409
410 static struct mhi_event_config mhi_quectel_em1xx_events[] = {
411 MHI_EVENT_CONFIG_CTRL(0, 128),
412 MHI_EVENT_CONFIG_DATA(1, 128),
413 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
414 MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
415 };
416
417 static const struct mhi_controller_config modem_quectel_em1xx_config = {
418 .max_channels = 128,
419 .timeout_ms = 20000,
420 .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
421 .ch_cfg = mhi_quectel_em1xx_channels,
422 .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
423 .event_cfg = mhi_quectel_em1xx_events,
424 };
425
426 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
427 .name = "quectel-em1xx",
428 .edl = "qcom/prog_firehose_sdx24.mbn",
429 .config = &modem_quectel_em1xx_config,
430 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
431 .dma_data_width = 32,
432 .mru_default = 32768,
433 .sideband_wake = true,
434 };
435
436 static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
437 .name = "quectel-rm5xx",
438 .edl = "qcom/prog_firehose_sdx6x.elf",
439 .config = &modem_quectel_em1xx_config,
440 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
441 .dma_data_width = 32,
442 .mru_default = 32768,
443 .sideband_wake = true,
444 };
445
446 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
447 MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
448 MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
449 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
450 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
451 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
452 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
453 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
454 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
455 MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
456 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
457 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
458 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
459 };
460
461 static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
462 MHI_EVENT_CONFIG_CTRL(0, 128),
463 MHI_EVENT_CONFIG_DATA(1, 128),
464 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
465 MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
466 };
467
468 static const struct mhi_controller_config modem_foxconn_sdx55_config = {
469 .max_channels = 128,
470 .timeout_ms = 20000,
471 .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
472 .ch_cfg = mhi_foxconn_sdx55_channels,
473 .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
474 .event_cfg = mhi_foxconn_sdx55_events,
475 };
476
477 static const struct mhi_controller_config modem_foxconn_sdx72_config = {
478 .max_channels = 128,
479 .timeout_ms = 20000,
480 .ready_timeout_ms = 50000,
481 .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
482 .ch_cfg = mhi_foxconn_sdx55_channels,
483 .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
484 .event_cfg = mhi_foxconn_sdx55_events,
485 };
486
487 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
488 .name = "foxconn-sdx55",
489 .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
490 .edl_trigger = true,
491 .config = &modem_foxconn_sdx55_config,
492 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
493 .dma_data_width = 32,
494 .mru_default = 32768,
495 .sideband_wake = false,
496 };
497
498 static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
499 .name = "foxconn-t99w175",
500 .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
501 .edl_trigger = true,
502 .config = &modem_foxconn_sdx55_config,
503 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
504 .dma_data_width = 32,
505 .mru_default = 32768,
506 .sideband_wake = false,
507 };
508
509 static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
510 .name = "foxconn-dw5930e",
511 .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
512 .edl_trigger = true,
513 .config = &modem_foxconn_sdx55_config,
514 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
515 .dma_data_width = 32,
516 .mru_default = 32768,
517 .sideband_wake = false,
518 };
519
520 static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
521 .name = "foxconn-t99w368",
522 .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
523 .edl_trigger = true,
524 .config = &modem_foxconn_sdx55_config,
525 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
526 .dma_data_width = 32,
527 .mru_default = 32768,
528 .sideband_wake = false,
529 };
530
531 static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
532 .name = "foxconn-t99w373",
533 .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
534 .edl_trigger = true,
535 .config = &modem_foxconn_sdx55_config,
536 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
537 .dma_data_width = 32,
538 .mru_default = 32768,
539 .sideband_wake = false,
540 };
541
542 static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
543 .name = "foxconn-t99w510",
544 .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn",
545 .edl_trigger = true,
546 .config = &modem_foxconn_sdx55_config,
547 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
548 .dma_data_width = 32,
549 .mru_default = 32768,
550 .sideband_wake = false,
551 };
552
553 static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
554 .name = "foxconn-dw5932e",
555 .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
556 .edl_trigger = true,
557 .config = &modem_foxconn_sdx55_config,
558 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
559 .dma_data_width = 32,
560 .mru_default = 32768,
561 .sideband_wake = false,
562 };
563
564 static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
565 .name = "foxconn-t99w515",
566 .edl = "qcom/sdx72m/foxconn/edl.mbn",
567 .edl_trigger = true,
568 .config = &modem_foxconn_sdx72_config,
569 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
570 .dma_data_width = 32,
571 .mru_default = 32768,
572 .sideband_wake = false,
573 };
574
575 static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
576 .name = "foxconn-dw5934e",
577 .edl = "qcom/sdx72m/foxconn/edl.mbn",
578 .edl_trigger = true,
579 .config = &modem_foxconn_sdx72_config,
580 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
581 .dma_data_width = 32,
582 .mru_default = 32768,
583 .sideband_wake = false,
584 };
585
586 static const struct mhi_channel_config mhi_mv3x_channels[] = {
587 MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
588 MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
589 /* MBIM Control Channel */
590 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
591 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
592 /* MBIM Data Channel */
593 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
594 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
595 };
596
597 static struct mhi_event_config mhi_mv3x_events[] = {
598 MHI_EVENT_CONFIG_CTRL(0, 256),
599 MHI_EVENT_CONFIG_DATA(1, 256),
600 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
601 MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
602 };
603
604 static const struct mhi_controller_config modem_mv3x_config = {
605 .max_channels = 128,
606 .timeout_ms = 20000,
607 .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
608 .ch_cfg = mhi_mv3x_channels,
609 .num_events = ARRAY_SIZE(mhi_mv3x_events),
610 .event_cfg = mhi_mv3x_events,
611 };
612
613 static const struct mhi_pci_dev_info mhi_mv31_info = {
614 .name = "cinterion-mv31",
615 .config = &modem_mv3x_config,
616 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
617 .dma_data_width = 32,
618 .mru_default = 32768,
619 };
620
621 static const struct mhi_pci_dev_info mhi_mv32_info = {
622 .name = "cinterion-mv32",
623 .config = &modem_mv3x_config,
624 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
625 .dma_data_width = 32,
626 .mru_default = 32768,
627 };
628
629 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
630 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
631 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
632 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
633 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
634 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
635 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
636 MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
637 MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
638 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
639 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
640 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
641 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
642 };
643
644 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
645 /* first ring is control+data and DIAG ring */
646 MHI_EVENT_CONFIG_CTRL(0, 2048),
647 /* Hardware channels request dedicated hardware event rings */
648 MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
649 MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
650 };
651
652 static const struct mhi_controller_config modem_sierra_em919x_config = {
653 .max_channels = 128,
654 .timeout_ms = 24000,
655 .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
656 .ch_cfg = mhi_sierra_em919x_channels,
657 .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
658 .event_cfg = modem_sierra_em919x_mhi_events,
659 };
660
661 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
662 .name = "sierra-em919x",
663 .config = &modem_sierra_em919x_config,
664 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
665 .dma_data_width = 32,
666 .sideband_wake = false,
667 };
668
669 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
670 MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
671 MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
672 MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
673 MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
674 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
675 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
676 };
677
678 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
679 MHI_EVENT_CONFIG_CTRL(0, 128),
680 MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
681 MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
682 };
683
684 static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
685 .max_channels = 128,
686 .timeout_ms = 20000,
687 .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
688 .ch_cfg = mhi_telit_fn980_hw_v1_channels,
689 .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
690 .event_cfg = mhi_telit_fn980_hw_v1_events,
691 };
692
693 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
694 .name = "telit-fn980-hwv1",
695 .fw = "qcom/sdx55m/sbl1.mbn",
696 .edl = "qcom/sdx55m/edl.mbn",
697 .config = &modem_telit_fn980_hw_v1_config,
698 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
699 .dma_data_width = 32,
700 .mru_default = 32768,
701 .sideband_wake = false,
702 };
703
704 static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
705 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
706 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
707 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
708 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
709 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
710 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
711 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
712 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
713 MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
714 MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
715 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
716 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
717 };
718
719 static struct mhi_event_config mhi_telit_fn990_events[] = {
720 MHI_EVENT_CONFIG_CTRL(0, 128),
721 MHI_EVENT_CONFIG_DATA(1, 128),
722 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
723 MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
724 };
725
726 static const struct mhi_controller_config modem_telit_fn990_config = {
727 .max_channels = 128,
728 .timeout_ms = 20000,
729 .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
730 .ch_cfg = mhi_telit_fn990_channels,
731 .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
732 .event_cfg = mhi_telit_fn990_events,
733 };
734
735 static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
736 .name = "telit-fn990",
737 .config = &modem_telit_fn990_config,
738 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
739 .dma_data_width = 32,
740 .sideband_wake = false,
741 .mru_default = 32768,
742 };
743
744 static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
745 .name = "telit-fe990a",
746 .config = &modem_telit_fn990_config,
747 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
748 .dma_data_width = 32,
749 .sideband_wake = false,
750 .mru_default = 32768,
751 };
752
753 static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
754 .name = "netprisma-lcur57",
755 .edl = "qcom/prog_firehose_sdx24.mbn",
756 .config = &modem_quectel_em1xx_config,
757 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
758 .dma_data_width = 32,
759 .mru_default = 32768,
760 .sideband_wake = true,
761 };
762
763 static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
764 .name = "netprisma-fcun69",
765 .edl = "qcom/prog_firehose_sdx6x.elf",
766 .config = &modem_quectel_em1xx_config,
767 .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
768 .dma_data_width = 32,
769 .mru_default = 32768,
770 .sideband_wake = true,
771 };
772
773 /* Keep the list sorted based on the PID. New VID should be added as the last entry */
774 static const struct pci_device_id mhi_pci_id_table[] = {
775 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
776 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
777 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
778 .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
779 /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
780 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
781 .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
782 /* Telit FN980 hardware revision v1 */
783 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
784 .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
785 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
786 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
787 /* Telit FN990 */
788 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
789 .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
790 /* Telit FE990A */
791 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
792 .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
793 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
794 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
795 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
796 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
797 /* QDU100, x100-DU */
798 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
799 .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
800 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
801 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
802 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
803 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
804 /* RM520N-GL (sdx6x), eSIM */
805 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
806 .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
807 /* RM520N-GL (sdx6x), Lenovo variant */
808 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
809 .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
810 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
811 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
812 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
813 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
814 /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
815 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
816 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
817 /* DW5930e (sdx55), With eSIM, It's also T99W175 */
818 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
819 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
820 /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
821 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
822 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
823 /* T99W175 (sdx55), Based on Qualcomm new baseline */
824 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
825 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
826 /* T99W175 (sdx55) */
827 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
828 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
829 /* T99W368 (sdx65) */
830 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
831 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w368_info },
832 /* T99W373 (sdx62) */
833 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
834 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w373_info },
835 /* T99W510 (sdx24), variant 1 */
836 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
837 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
838 /* T99W510 (sdx24), variant 2 */
839 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
840 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
841 /* T99W510 (sdx24), variant 3 */
842 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
843 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
844 /* DW5932e-eSIM (sdx62), With eSIM */
845 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
846 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
847 /* DW5932e (sdx62), Non-eSIM */
848 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
849 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
850 /* T99W515 (sdx72) */
851 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
852 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info },
853 /* DW5934e(sdx72), With eSIM */
854 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
855 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
856 /* DW5934e(sdx72), Non-eSIM */
857 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e),
858 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
859 /* MV31-W (Cinterion) */
860 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
861 .driver_data = (kernel_ulong_t) &mhi_mv31_info },
862 /* MV31-W (Cinterion), based on new baseline */
863 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
864 .driver_data = (kernel_ulong_t) &mhi_mv31_info },
865 /* MV32-WA (Cinterion) */
866 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
867 .driver_data = (kernel_ulong_t) &mhi_mv32_info },
868 /* MV32-WB (Cinterion) */
869 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
870 .driver_data = (kernel_ulong_t) &mhi_mv32_info },
871 /* T99W175 (sdx55), HP variant */
872 { PCI_DEVICE(0x03f0, 0x0a6c),
873 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
874 /* NETPRISMA LCUR57 (SDX24) */
875 { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000),
876 .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info },
877 /* NETPRISMA FCUN69 (SDX6X) */
878 { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001),
879 .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info },
880 { }
881 };
882 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
883
884 enum mhi_pci_device_status {
885 MHI_PCI_DEV_STARTED,
886 MHI_PCI_DEV_SUSPENDED,
887 };
888
889 struct mhi_pci_device {
890 struct mhi_controller mhi_cntrl;
891 struct pci_saved_state *pci_state;
892 struct work_struct recovery_work;
893 struct timer_list health_check_timer;
894 unsigned long status;
895 };
896
mhi_pci_read_reg(struct mhi_controller * mhi_cntrl,void __iomem * addr,u32 * out)897 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
898 void __iomem *addr, u32 *out)
899 {
900 *out = readl(addr);
901 return 0;
902 }
903
mhi_pci_write_reg(struct mhi_controller * mhi_cntrl,void __iomem * addr,u32 val)904 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
905 void __iomem *addr, u32 val)
906 {
907 writel(val, addr);
908 }
909
mhi_pci_status_cb(struct mhi_controller * mhi_cntrl,enum mhi_callback cb)910 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
911 enum mhi_callback cb)
912 {
913 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
914
915 /* Nothing to do for now */
916 switch (cb) {
917 case MHI_CB_FATAL_ERROR:
918 case MHI_CB_SYS_ERROR:
919 dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
920 pm_runtime_forbid(&pdev->dev);
921 break;
922 case MHI_CB_EE_MISSION_MODE:
923 pm_runtime_allow(&pdev->dev);
924 break;
925 default:
926 break;
927 }
928 }
929
mhi_pci_wake_get_nop(struct mhi_controller * mhi_cntrl,bool force)930 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
931 {
932 /* no-op */
933 }
934
mhi_pci_wake_put_nop(struct mhi_controller * mhi_cntrl,bool override)935 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
936 {
937 /* no-op */
938 }
939
mhi_pci_wake_toggle_nop(struct mhi_controller * mhi_cntrl)940 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
941 {
942 /* no-op */
943 }
944
mhi_pci_is_alive(struct mhi_controller * mhi_cntrl)945 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
946 {
947 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
948 u16 vendor = 0;
949
950 if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
951 return false;
952
953 if (vendor == (u16) ~0 || vendor == 0)
954 return false;
955
956 return true;
957 }
958
mhi_pci_claim(struct mhi_controller * mhi_cntrl,unsigned int bar_num,u64 dma_mask)959 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
960 unsigned int bar_num, u64 dma_mask)
961 {
962 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
963 int err;
964
965 err = pci_assign_resource(pdev, bar_num);
966 if (err)
967 return err;
968
969 err = pcim_enable_device(pdev);
970 if (err) {
971 dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
972 return err;
973 }
974
975 mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
976 if (IS_ERR(mhi_cntrl->regs)) {
977 err = PTR_ERR(mhi_cntrl->regs);
978 dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
979 return err;
980 }
981 mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
982
983 err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
984 if (err) {
985 dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
986 return err;
987 }
988
989 pci_set_master(pdev);
990
991 return 0;
992 }
993
mhi_pci_get_irqs(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * mhi_cntrl_config)994 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
995 const struct mhi_controller_config *mhi_cntrl_config)
996 {
997 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
998 int nr_vectors, i;
999 int *irq;
1000
1001 /*
1002 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
1003 * No explicit pci_free_irq_vectors required, done by pcim_release.
1004 */
1005 mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
1006
1007 nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
1008 if (nr_vectors < 0) {
1009 dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
1010 nr_vectors);
1011 return nr_vectors;
1012 }
1013
1014 if (nr_vectors < mhi_cntrl->nr_irqs) {
1015 dev_warn(&pdev->dev, "using shared MSI\n");
1016
1017 /* Patch msi vectors, use only one (shared) */
1018 for (i = 0; i < mhi_cntrl_config->num_events; i++)
1019 mhi_cntrl_config->event_cfg[i].irq = 0;
1020 mhi_cntrl->nr_irqs = 1;
1021 }
1022
1023 irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
1024 if (!irq)
1025 return -ENOMEM;
1026
1027 for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
1028 int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
1029
1030 irq[i] = pci_irq_vector(pdev, vector);
1031 }
1032
1033 mhi_cntrl->irq = irq;
1034
1035 return 0;
1036 }
1037
mhi_pci_runtime_get(struct mhi_controller * mhi_cntrl)1038 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
1039 {
1040 /* The runtime_get() MHI callback means:
1041 * Do whatever is requested to leave M3.
1042 */
1043 return pm_runtime_get(mhi_cntrl->cntrl_dev);
1044 }
1045
mhi_pci_runtime_put(struct mhi_controller * mhi_cntrl)1046 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
1047 {
1048 /* The runtime_put() MHI callback means:
1049 * Device can be moved in M3 state.
1050 */
1051 pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
1052 pm_runtime_put(mhi_cntrl->cntrl_dev);
1053 }
1054
mhi_pci_recovery_work(struct work_struct * work)1055 static void mhi_pci_recovery_work(struct work_struct *work)
1056 {
1057 struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
1058 recovery_work);
1059 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1060 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
1061 int err;
1062
1063 dev_warn(&pdev->dev, "device recovery started\n");
1064
1065 del_timer(&mhi_pdev->health_check_timer);
1066 pm_runtime_forbid(&pdev->dev);
1067
1068 /* Clean up MHI state */
1069 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1070 mhi_power_down(mhi_cntrl, false);
1071 mhi_unprepare_after_power_down(mhi_cntrl);
1072 }
1073
1074 pci_set_power_state(pdev, PCI_D0);
1075 pci_load_saved_state(pdev, mhi_pdev->pci_state);
1076 pci_restore_state(pdev);
1077
1078 if (!mhi_pci_is_alive(mhi_cntrl))
1079 goto err_try_reset;
1080
1081 err = mhi_prepare_for_power_up(mhi_cntrl);
1082 if (err)
1083 goto err_try_reset;
1084
1085 err = mhi_sync_power_up(mhi_cntrl);
1086 if (err)
1087 goto err_unprepare;
1088
1089 dev_dbg(&pdev->dev, "Recovery completed\n");
1090
1091 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1092 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1093 return;
1094
1095 err_unprepare:
1096 mhi_unprepare_after_power_down(mhi_cntrl);
1097 err_try_reset:
1098 err = pci_try_reset_function(pdev);
1099 if (err)
1100 dev_err(&pdev->dev, "Recovery failed: %d\n", err);
1101 }
1102
health_check(struct timer_list * t)1103 static void health_check(struct timer_list *t)
1104 {
1105 struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
1106 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1107
1108 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1109 test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1110 return;
1111
1112 if (!mhi_pci_is_alive(mhi_cntrl)) {
1113 dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
1114 queue_work(system_long_wq, &mhi_pdev->recovery_work);
1115 return;
1116 }
1117
1118 /* reschedule in two seconds */
1119 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1120 }
1121
mhi_pci_generic_edl_trigger(struct mhi_controller * mhi_cntrl)1122 static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl)
1123 {
1124 void __iomem *base = mhi_cntrl->regs;
1125 void __iomem *edl_db;
1126 int ret;
1127 u32 val;
1128
1129 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1130 if (ret) {
1131 dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n");
1132 return ret;
1133 }
1134
1135 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
1136 mhi_cntrl->runtime_get(mhi_cntrl);
1137
1138 ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
1139 if (ret)
1140 goto err_get_chdb;
1141
1142 edl_db = base + val + (8 * MHI_EDL_DB);
1143
1144 mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE));
1145 mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE));
1146
1147 mhi_soc_reset(mhi_cntrl);
1148
1149 err_get_chdb:
1150 mhi_cntrl->runtime_put(mhi_cntrl);
1151 mhi_device_put(mhi_cntrl->mhi_dev);
1152
1153 return ret;
1154 }
1155
mhi_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1156 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1157 {
1158 const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
1159 const struct mhi_controller_config *mhi_cntrl_config;
1160 struct mhi_pci_device *mhi_pdev;
1161 struct mhi_controller *mhi_cntrl;
1162 int err;
1163
1164 dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
1165
1166 /* mhi_pdev.mhi_cntrl must be zero-initialized */
1167 mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
1168 if (!mhi_pdev)
1169 return -ENOMEM;
1170
1171 INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
1172 timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
1173
1174 mhi_cntrl_config = info->config;
1175 mhi_cntrl = &mhi_pdev->mhi_cntrl;
1176
1177 mhi_cntrl->cntrl_dev = &pdev->dev;
1178 mhi_cntrl->iova_start = 0;
1179 mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
1180 mhi_cntrl->fw_image = info->fw;
1181 mhi_cntrl->edl_image = info->edl;
1182
1183 mhi_cntrl->read_reg = mhi_pci_read_reg;
1184 mhi_cntrl->write_reg = mhi_pci_write_reg;
1185 mhi_cntrl->status_cb = mhi_pci_status_cb;
1186 mhi_cntrl->runtime_get = mhi_pci_runtime_get;
1187 mhi_cntrl->runtime_put = mhi_pci_runtime_put;
1188 mhi_cntrl->mru = info->mru_default;
1189 mhi_cntrl->name = info->name;
1190
1191 if (info->edl_trigger)
1192 mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
1193
1194 if (info->sideband_wake) {
1195 mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
1196 mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
1197 mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
1198 }
1199
1200 err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
1201 if (err)
1202 return err;
1203
1204 err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
1205 if (err)
1206 return err;
1207
1208 pci_set_drvdata(pdev, mhi_pdev);
1209
1210 /* Have stored pci confspace at hand for restore in sudden PCI error.
1211 * cache the state locally and discard the PCI core one.
1212 */
1213 pci_save_state(pdev);
1214 mhi_pdev->pci_state = pci_store_saved_state(pdev);
1215 pci_load_saved_state(pdev, NULL);
1216
1217 err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
1218 if (err)
1219 return err;
1220
1221 /* MHI bus does not power up the controller by default */
1222 err = mhi_prepare_for_power_up(mhi_cntrl);
1223 if (err) {
1224 dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1225 goto err_unregister;
1226 }
1227
1228 err = mhi_sync_power_up(mhi_cntrl);
1229 if (err) {
1230 dev_err(&pdev->dev, "failed to power up MHI controller\n");
1231 goto err_unprepare;
1232 }
1233
1234 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1235
1236 /* start health check */
1237 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1238
1239 /* Only allow runtime-suspend if PME capable (for wakeup) */
1240 if (pci_pme_capable(pdev, PCI_D3hot)) {
1241 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
1242 pm_runtime_use_autosuspend(&pdev->dev);
1243 pm_runtime_mark_last_busy(&pdev->dev);
1244 pm_runtime_put_noidle(&pdev->dev);
1245 }
1246
1247 return 0;
1248
1249 err_unprepare:
1250 mhi_unprepare_after_power_down(mhi_cntrl);
1251 err_unregister:
1252 mhi_unregister_controller(mhi_cntrl);
1253
1254 return err;
1255 }
1256
mhi_pci_remove(struct pci_dev * pdev)1257 static void mhi_pci_remove(struct pci_dev *pdev)
1258 {
1259 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1260 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1261
1262 del_timer_sync(&mhi_pdev->health_check_timer);
1263 cancel_work_sync(&mhi_pdev->recovery_work);
1264
1265 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1266 mhi_power_down(mhi_cntrl, true);
1267 mhi_unprepare_after_power_down(mhi_cntrl);
1268 }
1269
1270 /* balancing probe put_noidle */
1271 if (pci_pme_capable(pdev, PCI_D3hot))
1272 pm_runtime_get_noresume(&pdev->dev);
1273
1274 mhi_unregister_controller(mhi_cntrl);
1275 }
1276
mhi_pci_shutdown(struct pci_dev * pdev)1277 static void mhi_pci_shutdown(struct pci_dev *pdev)
1278 {
1279 mhi_pci_remove(pdev);
1280 pci_set_power_state(pdev, PCI_D3hot);
1281 }
1282
mhi_pci_reset_prepare(struct pci_dev * pdev)1283 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
1284 {
1285 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1286 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1287
1288 dev_info(&pdev->dev, "reset\n");
1289
1290 del_timer(&mhi_pdev->health_check_timer);
1291
1292 /* Clean up MHI state */
1293 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1294 mhi_power_down(mhi_cntrl, false);
1295 mhi_unprepare_after_power_down(mhi_cntrl);
1296 }
1297
1298 /* cause internal device reset */
1299 mhi_soc_reset(mhi_cntrl);
1300
1301 /* Be sure device reset has been executed */
1302 msleep(MHI_POST_RESET_DELAY_MS);
1303 }
1304
mhi_pci_reset_done(struct pci_dev * pdev)1305 static void mhi_pci_reset_done(struct pci_dev *pdev)
1306 {
1307 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1308 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1309 int err;
1310
1311 /* Restore initial known working PCI state */
1312 pci_load_saved_state(pdev, mhi_pdev->pci_state);
1313 pci_restore_state(pdev);
1314
1315 /* Is device status available ? */
1316 if (!mhi_pci_is_alive(mhi_cntrl)) {
1317 dev_err(&pdev->dev, "reset failed\n");
1318 return;
1319 }
1320
1321 err = mhi_prepare_for_power_up(mhi_cntrl);
1322 if (err) {
1323 dev_err(&pdev->dev, "failed to prepare MHI controller\n");
1324 return;
1325 }
1326
1327 err = mhi_sync_power_up(mhi_cntrl);
1328 if (err) {
1329 dev_err(&pdev->dev, "failed to power up MHI controller\n");
1330 mhi_unprepare_after_power_down(mhi_cntrl);
1331 return;
1332 }
1333
1334 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
1335 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1336 }
1337
mhi_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)1338 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
1339 pci_channel_state_t state)
1340 {
1341 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1342 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1343
1344 dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
1345
1346 if (state == pci_channel_io_perm_failure)
1347 return PCI_ERS_RESULT_DISCONNECT;
1348
1349 /* Clean up MHI state */
1350 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1351 mhi_power_down(mhi_cntrl, false);
1352 mhi_unprepare_after_power_down(mhi_cntrl);
1353 } else {
1354 /* Nothing to do */
1355 return PCI_ERS_RESULT_RECOVERED;
1356 }
1357
1358 pci_disable_device(pdev);
1359
1360 return PCI_ERS_RESULT_NEED_RESET;
1361 }
1362
mhi_pci_slot_reset(struct pci_dev * pdev)1363 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
1364 {
1365 if (pci_enable_device(pdev)) {
1366 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
1367 return PCI_ERS_RESULT_DISCONNECT;
1368 }
1369
1370 return PCI_ERS_RESULT_RECOVERED;
1371 }
1372
mhi_pci_io_resume(struct pci_dev * pdev)1373 static void mhi_pci_io_resume(struct pci_dev *pdev)
1374 {
1375 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
1376
1377 dev_err(&pdev->dev, "PCI slot reset done\n");
1378
1379 queue_work(system_long_wq, &mhi_pdev->recovery_work);
1380 }
1381
1382 static const struct pci_error_handlers mhi_pci_err_handler = {
1383 .error_detected = mhi_pci_error_detected,
1384 .slot_reset = mhi_pci_slot_reset,
1385 .resume = mhi_pci_io_resume,
1386 .reset_prepare = mhi_pci_reset_prepare,
1387 .reset_done = mhi_pci_reset_done,
1388 };
1389
mhi_pci_runtime_suspend(struct device * dev)1390 static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
1391 {
1392 struct pci_dev *pdev = to_pci_dev(dev);
1393 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1394 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1395 int err;
1396
1397 if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1398 return 0;
1399
1400 del_timer(&mhi_pdev->health_check_timer);
1401 cancel_work_sync(&mhi_pdev->recovery_work);
1402
1403 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1404 mhi_cntrl->ee != MHI_EE_AMSS)
1405 goto pci_suspend; /* Nothing to do at MHI level */
1406
1407 /* Transition to M3 state */
1408 err = mhi_pm_suspend(mhi_cntrl);
1409 if (err) {
1410 dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
1411 clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
1412 return -EBUSY;
1413 }
1414
1415 pci_suspend:
1416 pci_disable_device(pdev);
1417 pci_wake_from_d3(pdev, true);
1418
1419 return 0;
1420 }
1421
mhi_pci_runtime_resume(struct device * dev)1422 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
1423 {
1424 struct pci_dev *pdev = to_pci_dev(dev);
1425 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1426 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1427 int err;
1428
1429 if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
1430 return 0;
1431
1432 err = pci_enable_device(pdev);
1433 if (err)
1434 goto err_recovery;
1435
1436 pci_set_master(pdev);
1437 pci_wake_from_d3(pdev, false);
1438
1439 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
1440 mhi_cntrl->ee != MHI_EE_AMSS)
1441 return 0; /* Nothing to do at MHI level */
1442
1443 /* Exit M3, transition to M0 state */
1444 err = mhi_pm_resume(mhi_cntrl);
1445 if (err) {
1446 dev_err(&pdev->dev, "failed to resume device: %d\n", err);
1447 goto err_recovery;
1448 }
1449
1450 /* Resume health check */
1451 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
1452
1453 /* It can be a remote wakeup (no mhi runtime_get), update access time */
1454 pm_runtime_mark_last_busy(dev);
1455
1456 return 0;
1457
1458 err_recovery:
1459 /* Do not fail to not mess up our PCI device state, the device likely
1460 * lost power (d3cold) and we simply need to reset it from the recovery
1461 * procedure, trigger the recovery asynchronously to prevent system
1462 * suspend exit delaying.
1463 */
1464 queue_work(system_long_wq, &mhi_pdev->recovery_work);
1465 pm_runtime_mark_last_busy(dev);
1466
1467 return 0;
1468 }
1469
mhi_pci_suspend(struct device * dev)1470 static int __maybe_unused mhi_pci_suspend(struct device *dev)
1471 {
1472 pm_runtime_disable(dev);
1473 return mhi_pci_runtime_suspend(dev);
1474 }
1475
mhi_pci_resume(struct device * dev)1476 static int __maybe_unused mhi_pci_resume(struct device *dev)
1477 {
1478 int ret;
1479
1480 /* Depending the platform, device may have lost power (d3cold), we need
1481 * to resume it now to check its state and recover when necessary.
1482 */
1483 ret = mhi_pci_runtime_resume(dev);
1484 pm_runtime_enable(dev);
1485
1486 return ret;
1487 }
1488
mhi_pci_freeze(struct device * dev)1489 static int __maybe_unused mhi_pci_freeze(struct device *dev)
1490 {
1491 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1492 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
1493
1494 /* We want to stop all operations, hibernation does not guarantee that
1495 * device will be in the same state as before freezing, especially if
1496 * the intermediate restore kernel reinitializes MHI device with new
1497 * context.
1498 */
1499 flush_work(&mhi_pdev->recovery_work);
1500 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
1501 mhi_power_down(mhi_cntrl, true);
1502 mhi_unprepare_after_power_down(mhi_cntrl);
1503 }
1504
1505 return 0;
1506 }
1507
mhi_pci_restore(struct device * dev)1508 static int __maybe_unused mhi_pci_restore(struct device *dev)
1509 {
1510 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
1511
1512 /* Reinitialize the device */
1513 queue_work(system_long_wq, &mhi_pdev->recovery_work);
1514
1515 return 0;
1516 }
1517
1518 static const struct dev_pm_ops mhi_pci_pm_ops = {
1519 SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
1520 #ifdef CONFIG_PM_SLEEP
1521 .suspend = mhi_pci_suspend,
1522 .resume = mhi_pci_resume,
1523 .freeze = mhi_pci_freeze,
1524 .thaw = mhi_pci_restore,
1525 .poweroff = mhi_pci_freeze,
1526 .restore = mhi_pci_restore,
1527 #endif
1528 };
1529
1530 static struct pci_driver mhi_pci_driver = {
1531 .name = "mhi-pci-generic",
1532 .id_table = mhi_pci_id_table,
1533 .probe = mhi_pci_probe,
1534 .remove = mhi_pci_remove,
1535 .shutdown = mhi_pci_shutdown,
1536 .err_handler = &mhi_pci_err_handler,
1537 .driver.pm = &mhi_pci_pm_ops
1538 };
1539 module_pci_driver(mhi_pci_driver);
1540
1541 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1542 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
1543 MODULE_LICENSE("GPL");
1544