1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * MHI PCI driver - MHI over PCI controller driver 4 * 5 * This module is a generic driver for registering MHI-over-PCI devices, 6 * such as PCIe QCOM modems. 7 * 8 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/mhi.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/timer.h> 18 #include <linux/workqueue.h> 19 20 #define MHI_PCI_DEFAULT_BAR_NUM 0 21 22 #define MHI_POST_RESET_DELAY_MS 2000 23 24 #define HEALTH_CHECK_PERIOD (HZ * 2) 25 26 /* PCI VID definitions */ 27 #define PCI_VENDOR_ID_THALES 0x1269 28 #define PCI_VENDOR_ID_QUECTEL 0x1eac 29 #define PCI_VENDOR_ID_NETPRISMA 0x203e 30 31 #define MHI_EDL_DB 91 32 #define MHI_EDL_COOKIE 0xEDEDEDED 33 34 /** 35 * struct mhi_pci_dev_info - MHI PCI device specific information 36 * @config: MHI controller configuration 37 * @name: name of the PCI module 38 * @fw: firmware path (if any) 39 * @edl: emergency download mode firmware path (if any) 40 * @edl_trigger: capable of triggering EDL mode in the device (if supported) 41 * @bar_num: PCI base address register to use for MHI MMIO register space 42 * @dma_data_width: DMA transfer word size (32 or 64 bits) 43 * @mru_default: default MRU size for MBIM network packets 44 * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead 45 * of inband wake support (such as sdx24) 46 */ 47 struct mhi_pci_dev_info { 48 const struct mhi_controller_config *config; 49 const char *name; 50 const char *fw; 51 const char *edl; 52 bool edl_trigger; 53 unsigned int bar_num; 54 unsigned int dma_data_width; 55 unsigned int mru_default; 56 bool sideband_wake; 57 }; 58 59 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ 60 { \ 61 .num = ch_num, \ 62 .name = ch_name, \ 63 .num_elements = el_count, \ 64 .event_ring = ev_ring, \ 65 .dir = DMA_TO_DEVICE, \ 66 .ee_mask = BIT(MHI_EE_AMSS), \ 67 .pollcfg = 0, \ 68 .doorbell = MHI_DB_BRST_DISABLE, \ 69 .lpm_notify = false, \ 70 .offload_channel = false, \ 71 .doorbell_mode_switch = false, \ 72 } \ 73 74 #define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ 75 { \ 76 .num = ch_num, \ 77 .name = ch_name, \ 78 .num_elements = el_count, \ 79 .event_ring = ev_ring, \ 80 .dir = DMA_FROM_DEVICE, \ 81 .ee_mask = BIT(MHI_EE_AMSS), \ 82 .pollcfg = 0, \ 83 .doorbell = MHI_DB_BRST_DISABLE, \ 84 .lpm_notify = false, \ 85 .offload_channel = false, \ 86 .doorbell_mode_switch = false, \ 87 } 88 89 #define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ 90 { \ 91 .num = ch_num, \ 92 .name = ch_name, \ 93 .num_elements = el_count, \ 94 .event_ring = ev_ring, \ 95 .dir = DMA_FROM_DEVICE, \ 96 .ee_mask = BIT(MHI_EE_AMSS), \ 97 .pollcfg = 0, \ 98 .doorbell = MHI_DB_BRST_DISABLE, \ 99 .lpm_notify = false, \ 100 .offload_channel = false, \ 101 .doorbell_mode_switch = false, \ 102 .auto_queue = true, \ 103 } 104 105 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ 106 { \ 107 .num_elements = el_count, \ 108 .irq_moderation_ms = 0, \ 109 .irq = (ev_ring) + 1, \ 110 .priority = 1, \ 111 .mode = MHI_DB_BRST_DISABLE, \ 112 .data_type = MHI_ER_CTRL, \ 113 .hardware_event = false, \ 114 .client_managed = false, \ 115 .offload_channel = false, \ 116 } 117 118 #define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \ 119 { \ 120 .num = ch_num, \ 121 .name = ch_name, \ 122 .num_elements = el_count, \ 123 .event_ring = ev_ring, \ 124 .dir = DMA_TO_DEVICE, \ 125 .ee_mask = BIT(MHI_EE_AMSS), \ 126 .pollcfg = 0, \ 127 .doorbell = MHI_DB_BRST_ENABLE, \ 128 .lpm_notify = false, \ 129 .offload_channel = false, \ 130 .doorbell_mode_switch = true, \ 131 } \ 132 133 #define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \ 134 { \ 135 .num = ch_num, \ 136 .name = ch_name, \ 137 .num_elements = el_count, \ 138 .event_ring = ev_ring, \ 139 .dir = DMA_FROM_DEVICE, \ 140 .ee_mask = BIT(MHI_EE_AMSS), \ 141 .pollcfg = 0, \ 142 .doorbell = MHI_DB_BRST_ENABLE, \ 143 .lpm_notify = false, \ 144 .offload_channel = false, \ 145 .doorbell_mode_switch = true, \ 146 } 147 148 #define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \ 149 { \ 150 .num = ch_num, \ 151 .name = ch_name, \ 152 .num_elements = el_count, \ 153 .event_ring = ev_ring, \ 154 .dir = DMA_TO_DEVICE, \ 155 .ee_mask = BIT(MHI_EE_SBL), \ 156 .pollcfg = 0, \ 157 .doorbell = MHI_DB_BRST_DISABLE, \ 158 .lpm_notify = false, \ 159 .offload_channel = false, \ 160 .doorbell_mode_switch = false, \ 161 } \ 162 163 #define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \ 164 { \ 165 .num = ch_num, \ 166 .name = ch_name, \ 167 .num_elements = el_count, \ 168 .event_ring = ev_ring, \ 169 .dir = DMA_FROM_DEVICE, \ 170 .ee_mask = BIT(MHI_EE_SBL), \ 171 .pollcfg = 0, \ 172 .doorbell = MHI_DB_BRST_DISABLE, \ 173 .lpm_notify = false, \ 174 .offload_channel = false, \ 175 .doorbell_mode_switch = false, \ 176 } 177 178 #define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \ 179 { \ 180 .num = ch_num, \ 181 .name = ch_name, \ 182 .num_elements = el_count, \ 183 .event_ring = ev_ring, \ 184 .dir = DMA_TO_DEVICE, \ 185 .ee_mask = BIT(MHI_EE_FP), \ 186 .pollcfg = 0, \ 187 .doorbell = MHI_DB_BRST_DISABLE, \ 188 .lpm_notify = false, \ 189 .offload_channel = false, \ 190 .doorbell_mode_switch = false, \ 191 } \ 192 193 #define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \ 194 { \ 195 .num = ch_num, \ 196 .name = ch_name, \ 197 .num_elements = el_count, \ 198 .event_ring = ev_ring, \ 199 .dir = DMA_FROM_DEVICE, \ 200 .ee_mask = BIT(MHI_EE_FP), \ 201 .pollcfg = 0, \ 202 .doorbell = MHI_DB_BRST_DISABLE, \ 203 .lpm_notify = false, \ 204 .offload_channel = false, \ 205 .doorbell_mode_switch = false, \ 206 } 207 208 #define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \ 209 { \ 210 .num_elements = el_count, \ 211 .irq_moderation_ms = 5, \ 212 .irq = (ev_ring) + 1, \ 213 .priority = 1, \ 214 .mode = MHI_DB_BRST_DISABLE, \ 215 .data_type = MHI_ER_DATA, \ 216 .hardware_event = false, \ 217 .client_managed = false, \ 218 .offload_channel = false, \ 219 } 220 221 #define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \ 222 { \ 223 .num_elements = el_count, \ 224 .irq_moderation_ms = 0, \ 225 .irq = (ev_ring) + 1, \ 226 .priority = 1, \ 227 .mode = MHI_DB_BRST_DISABLE, \ 228 .data_type = MHI_ER_DATA, \ 229 .hardware_event = false, \ 230 .client_managed = false, \ 231 .offload_channel = false, \ 232 } 233 234 #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ 235 { \ 236 .num_elements = el_count, \ 237 .irq_moderation_ms = 1, \ 238 .irq = (ev_ring) + 1, \ 239 .priority = 1, \ 240 .mode = MHI_DB_BRST_DISABLE, \ 241 .data_type = MHI_ER_DATA, \ 242 .hardware_event = true, \ 243 .client_managed = false, \ 244 .offload_channel = false, \ 245 .channel = ch_num, \ 246 } 247 248 static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { 249 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), 250 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), 251 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), 252 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), 253 MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), 254 MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), 255 MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), 256 MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), 257 MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), 258 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), 259 MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2), 260 MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3), 261 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4), 262 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5), 263 }; 264 265 static struct mhi_event_config modem_qcom_v1_mhi_events[] = { 266 /* first ring is control+data ring */ 267 MHI_EVENT_CONFIG_CTRL(0, 64), 268 /* DIAG dedicated event ring */ 269 MHI_EVENT_CONFIG_DATA(1, 128), 270 /* Software channels dedicated event ring */ 271 MHI_EVENT_CONFIG_SW_DATA(2, 64), 272 MHI_EVENT_CONFIG_SW_DATA(3, 64), 273 /* Hardware channels request dedicated hardware event rings */ 274 MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100), 275 MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101) 276 }; 277 278 static const struct mhi_controller_config modem_qcom_v2_mhiv_config = { 279 .max_channels = 128, 280 .timeout_ms = 8000, 281 .ready_timeout_ms = 50000, 282 .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), 283 .ch_cfg = modem_qcom_v1_mhi_channels, 284 .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), 285 .event_cfg = modem_qcom_v1_mhi_events, 286 }; 287 288 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { 289 .max_channels = 128, 290 .timeout_ms = 8000, 291 .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), 292 .ch_cfg = modem_qcom_v1_mhi_channels, 293 .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), 294 .event_cfg = modem_qcom_v1_mhi_events, 295 }; 296 297 static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = { 298 .name = "qcom-sdx75m", 299 .fw = "qcom/sdx75m/xbl.elf", 300 .edl = "qcom/sdx75m/edl.mbn", 301 .edl_trigger = true, 302 .config = &modem_qcom_v2_mhiv_config, 303 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 304 .dma_data_width = 32, 305 .sideband_wake = false, 306 }; 307 308 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { 309 .name = "qcom-sdx65m", 310 .fw = "qcom/sdx65m/xbl.elf", 311 .edl = "qcom/sdx65m/edl.mbn", 312 .edl_trigger = true, 313 .config = &modem_qcom_v1_mhiv_config, 314 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 315 .dma_data_width = 32, 316 .sideband_wake = false, 317 }; 318 319 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { 320 .name = "qcom-sdx55m", 321 .fw = "qcom/sdx55m/sbl1.mbn", 322 .edl = "qcom/sdx55m/edl.mbn", 323 .edl_trigger = true, 324 .config = &modem_qcom_v1_mhiv_config, 325 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 326 .dma_data_width = 32, 327 .mru_default = 32768, 328 .sideband_wake = false, 329 }; 330 331 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { 332 .name = "qcom-sdx24", 333 .edl = "qcom/prog_firehose_sdx24.mbn", 334 .config = &modem_qcom_v1_mhiv_config, 335 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 336 .dma_data_width = 32, 337 .sideband_wake = true, 338 }; 339 340 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { 341 MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0), 342 MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0), 343 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), 344 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), 345 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), 346 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), 347 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), 348 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), 349 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), 350 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), 351 /* The EDL firmware is a flash-programmer exposing firehose protocol */ 352 MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), 353 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), 354 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), 355 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), 356 }; 357 358 static struct mhi_event_config mhi_quectel_em1xx_events[] = { 359 MHI_EVENT_CONFIG_CTRL(0, 128), 360 MHI_EVENT_CONFIG_DATA(1, 128), 361 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), 362 MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) 363 }; 364 365 static const struct mhi_controller_config modem_quectel_em1xx_config = { 366 .max_channels = 128, 367 .timeout_ms = 20000, 368 .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels), 369 .ch_cfg = mhi_quectel_em1xx_channels, 370 .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events), 371 .event_cfg = mhi_quectel_em1xx_events, 372 }; 373 374 static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { 375 .name = "quectel-em1xx", 376 .edl = "qcom/prog_firehose_sdx24.mbn", 377 .config = &modem_quectel_em1xx_config, 378 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 379 .dma_data_width = 32, 380 .mru_default = 32768, 381 .sideband_wake = true, 382 }; 383 384 static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = { 385 .name = "quectel-rm5xx", 386 .edl = "qcom/prog_firehose_sdx6x.elf", 387 .config = &modem_quectel_em1xx_config, 388 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 389 .dma_data_width = 32, 390 .mru_default = 32768, 391 .sideband_wake = true, 392 }; 393 394 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { 395 MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), 396 MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), 397 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), 398 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), 399 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), 400 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), 401 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), 402 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), 403 MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), 404 MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), 405 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), 406 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), 407 }; 408 409 static struct mhi_event_config mhi_foxconn_sdx55_events[] = { 410 MHI_EVENT_CONFIG_CTRL(0, 128), 411 MHI_EVENT_CONFIG_DATA(1, 128), 412 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), 413 MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) 414 }; 415 416 static const struct mhi_controller_config modem_foxconn_sdx55_config = { 417 .max_channels = 128, 418 .timeout_ms = 20000, 419 .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), 420 .ch_cfg = mhi_foxconn_sdx55_channels, 421 .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), 422 .event_cfg = mhi_foxconn_sdx55_events, 423 }; 424 425 static const struct mhi_controller_config modem_foxconn_sdx72_config = { 426 .max_channels = 128, 427 .timeout_ms = 20000, 428 .ready_timeout_ms = 50000, 429 .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), 430 .ch_cfg = mhi_foxconn_sdx55_channels, 431 .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), 432 .event_cfg = mhi_foxconn_sdx55_events, 433 }; 434 435 static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { 436 .name = "foxconn-sdx55", 437 .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", 438 .edl_trigger = true, 439 .config = &modem_foxconn_sdx55_config, 440 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 441 .dma_data_width = 32, 442 .mru_default = 32768, 443 .sideband_wake = false, 444 }; 445 446 static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = { 447 .name = "foxconn-t99w175", 448 .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", 449 .edl_trigger = true, 450 .config = &modem_foxconn_sdx55_config, 451 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 452 .dma_data_width = 32, 453 .mru_default = 32768, 454 .sideband_wake = false, 455 }; 456 457 static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = { 458 .name = "foxconn-dw5930e", 459 .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", 460 .edl_trigger = true, 461 .config = &modem_foxconn_sdx55_config, 462 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 463 .dma_data_width = 32, 464 .mru_default = 32768, 465 .sideband_wake = false, 466 }; 467 468 static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = { 469 .name = "foxconn-t99w368", 470 .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", 471 .edl_trigger = true, 472 .config = &modem_foxconn_sdx55_config, 473 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 474 .dma_data_width = 32, 475 .mru_default = 32768, 476 .sideband_wake = false, 477 }; 478 479 static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = { 480 .name = "foxconn-t99w373", 481 .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", 482 .edl_trigger = true, 483 .config = &modem_foxconn_sdx55_config, 484 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 485 .dma_data_width = 32, 486 .mru_default = 32768, 487 .sideband_wake = false, 488 }; 489 490 static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = { 491 .name = "foxconn-t99w510", 492 .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn", 493 .edl_trigger = true, 494 .config = &modem_foxconn_sdx55_config, 495 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 496 .dma_data_width = 32, 497 .mru_default = 32768, 498 .sideband_wake = false, 499 }; 500 501 static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = { 502 .name = "foxconn-dw5932e", 503 .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", 504 .edl_trigger = true, 505 .config = &modem_foxconn_sdx55_config, 506 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 507 .dma_data_width = 32, 508 .mru_default = 32768, 509 .sideband_wake = false, 510 }; 511 512 static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = { 513 .name = "foxconn-t99w515", 514 .edl = "qcom/sdx72m/foxconn/edl.mbn", 515 .edl_trigger = true, 516 .config = &modem_foxconn_sdx72_config, 517 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 518 .dma_data_width = 32, 519 .mru_default = 32768, 520 .sideband_wake = false, 521 }; 522 523 static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = { 524 .name = "foxconn-dw5934e", 525 .edl = "qcom/sdx72m/foxconn/edl.mbn", 526 .edl_trigger = true, 527 .config = &modem_foxconn_sdx72_config, 528 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 529 .dma_data_width = 32, 530 .mru_default = 32768, 531 .sideband_wake = false, 532 }; 533 534 static const struct mhi_channel_config mhi_mv3x_channels[] = { 535 MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), 536 MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), 537 /* MBIM Control Channel */ 538 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), 539 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), 540 /* MBIM Data Channel */ 541 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), 542 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), 543 }; 544 545 static struct mhi_event_config mhi_mv3x_events[] = { 546 MHI_EVENT_CONFIG_CTRL(0, 256), 547 MHI_EVENT_CONFIG_DATA(1, 256), 548 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), 549 MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), 550 }; 551 552 static const struct mhi_controller_config modem_mv3x_config = { 553 .max_channels = 128, 554 .timeout_ms = 20000, 555 .num_channels = ARRAY_SIZE(mhi_mv3x_channels), 556 .ch_cfg = mhi_mv3x_channels, 557 .num_events = ARRAY_SIZE(mhi_mv3x_events), 558 .event_cfg = mhi_mv3x_events, 559 }; 560 561 static const struct mhi_pci_dev_info mhi_mv31_info = { 562 .name = "cinterion-mv31", 563 .config = &modem_mv3x_config, 564 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 565 .dma_data_width = 32, 566 .mru_default = 32768, 567 }; 568 569 static const struct mhi_pci_dev_info mhi_mv32_info = { 570 .name = "cinterion-mv32", 571 .config = &modem_mv3x_config, 572 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 573 .dma_data_width = 32, 574 .mru_default = 32768, 575 }; 576 577 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { 578 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), 579 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), 580 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), 581 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), 582 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), 583 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), 584 MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), 585 MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), 586 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), 587 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), 588 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), 589 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), 590 }; 591 592 static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { 593 /* first ring is control+data and DIAG ring */ 594 MHI_EVENT_CONFIG_CTRL(0, 2048), 595 /* Hardware channels request dedicated hardware event rings */ 596 MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), 597 MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) 598 }; 599 600 static const struct mhi_controller_config modem_sierra_em919x_config = { 601 .max_channels = 128, 602 .timeout_ms = 24000, 603 .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), 604 .ch_cfg = mhi_sierra_em919x_channels, 605 .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), 606 .event_cfg = modem_sierra_em919x_mhi_events, 607 }; 608 609 static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { 610 .name = "sierra-em919x", 611 .config = &modem_sierra_em919x_config, 612 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 613 .dma_data_width = 32, 614 .sideband_wake = false, 615 }; 616 617 static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { 618 MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), 619 MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), 620 MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), 621 MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), 622 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), 623 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), 624 }; 625 626 static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { 627 MHI_EVENT_CONFIG_CTRL(0, 128), 628 MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), 629 MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) 630 }; 631 632 static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = { 633 .max_channels = 128, 634 .timeout_ms = 20000, 635 .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), 636 .ch_cfg = mhi_telit_fn980_hw_v1_channels, 637 .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), 638 .event_cfg = mhi_telit_fn980_hw_v1_events, 639 }; 640 641 static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { 642 .name = "telit-fn980-hwv1", 643 .fw = "qcom/sdx55m/sbl1.mbn", 644 .edl = "qcom/sdx55m/edl.mbn", 645 .config = &modem_telit_fn980_hw_v1_config, 646 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 647 .dma_data_width = 32, 648 .mru_default = 32768, 649 .sideband_wake = false, 650 }; 651 652 static const struct mhi_channel_config mhi_telit_fn990_channels[] = { 653 MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), 654 MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), 655 MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), 656 MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), 657 MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), 658 MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), 659 MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), 660 MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), 661 MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1), 662 MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1), 663 MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), 664 MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), 665 }; 666 667 static struct mhi_event_config mhi_telit_fn990_events[] = { 668 MHI_EVENT_CONFIG_CTRL(0, 128), 669 MHI_EVENT_CONFIG_DATA(1, 128), 670 MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), 671 MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) 672 }; 673 674 static const struct mhi_controller_config modem_telit_fn990_config = { 675 .max_channels = 128, 676 .timeout_ms = 20000, 677 .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), 678 .ch_cfg = mhi_telit_fn990_channels, 679 .num_events = ARRAY_SIZE(mhi_telit_fn990_events), 680 .event_cfg = mhi_telit_fn990_events, 681 }; 682 683 static const struct mhi_pci_dev_info mhi_telit_fn990_info = { 684 .name = "telit-fn990", 685 .config = &modem_telit_fn990_config, 686 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 687 .dma_data_width = 32, 688 .sideband_wake = false, 689 .mru_default = 32768, 690 }; 691 692 static const struct mhi_pci_dev_info mhi_telit_fe990a_info = { 693 .name = "telit-fe990a", 694 .config = &modem_telit_fn990_config, 695 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 696 .dma_data_width = 32, 697 .sideband_wake = false, 698 .mru_default = 32768, 699 }; 700 701 static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = { 702 .name = "netprisma-lcur57", 703 .edl = "qcom/prog_firehose_sdx24.mbn", 704 .config = &modem_quectel_em1xx_config, 705 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 706 .dma_data_width = 32, 707 .mru_default = 32768, 708 .sideband_wake = true, 709 }; 710 711 static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = { 712 .name = "netprisma-fcun69", 713 .edl = "qcom/prog_firehose_sdx6x.elf", 714 .config = &modem_quectel_em1xx_config, 715 .bar_num = MHI_PCI_DEFAULT_BAR_NUM, 716 .dma_data_width = 32, 717 .mru_default = 32768, 718 .sideband_wake = true, 719 }; 720 721 /* Keep the list sorted based on the PID. New VID should be added as the last entry */ 722 static const struct pci_device_id mhi_pci_id_table[] = { 723 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), 724 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, 725 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c), 726 .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, 727 /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ 728 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), 729 .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, 730 /* Telit FN980 hardware revision v1 */ 731 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), 732 .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, 733 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), 734 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, 735 /* Telit FN990 */ 736 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), 737 .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, 738 /* Telit FE990A */ 739 { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), 740 .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, 741 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), 742 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, 743 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), 744 .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info }, 745 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ 746 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, 747 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */ 748 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, 749 /* RM520N-GL (sdx6x), eSIM */ 750 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004), 751 .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info }, 752 /* RM520N-GL (sdx6x), Lenovo variant */ 753 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007), 754 .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info }, 755 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */ 756 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, 757 { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */ 758 .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, 759 /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ 760 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), 761 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, 762 /* DW5930e (sdx55), With eSIM, It's also T99W175 */ 763 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), 764 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info }, 765 /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ 766 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), 767 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info }, 768 /* T99W175 (sdx55), Based on Qualcomm new baseline */ 769 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), 770 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, 771 /* T99W175 (sdx55) */ 772 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3), 773 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, 774 /* T99W368 (sdx65) */ 775 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8), 776 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w368_info }, 777 /* T99W373 (sdx62) */ 778 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), 779 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w373_info }, 780 /* T99W510 (sdx24), variant 1 */ 781 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0), 782 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info }, 783 /* T99W510 (sdx24), variant 2 */ 784 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1), 785 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info }, 786 /* T99W510 (sdx24), variant 3 */ 787 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2), 788 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info }, 789 /* DW5932e-eSIM (sdx62), With eSIM */ 790 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5), 791 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info }, 792 /* DW5932e (sdx62), Non-eSIM */ 793 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9), 794 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info }, 795 /* T99W515 (sdx72) */ 796 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118), 797 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info }, 798 /* DW5934e(sdx72), With eSIM */ 799 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d), 800 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info }, 801 /* DW5934e(sdx72), Non-eSIM */ 802 { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e), 803 .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info }, 804 /* MV31-W (Cinterion) */ 805 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3), 806 .driver_data = (kernel_ulong_t) &mhi_mv31_info }, 807 /* MV31-W (Cinterion), based on new baseline */ 808 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4), 809 .driver_data = (kernel_ulong_t) &mhi_mv31_info }, 810 /* MV32-WA (Cinterion) */ 811 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba), 812 .driver_data = (kernel_ulong_t) &mhi_mv32_info }, 813 /* MV32-WB (Cinterion) */ 814 { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb), 815 .driver_data = (kernel_ulong_t) &mhi_mv32_info }, 816 /* T99W175 (sdx55), HP variant */ 817 { PCI_DEVICE(0x03f0, 0x0a6c), 818 .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, 819 /* NETPRISMA LCUR57 (SDX24) */ 820 { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000), 821 .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info }, 822 /* NETPRISMA FCUN69 (SDX6X) */ 823 { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001), 824 .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info }, 825 { } 826 }; 827 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); 828 829 enum mhi_pci_device_status { 830 MHI_PCI_DEV_STARTED, 831 MHI_PCI_DEV_SUSPENDED, 832 }; 833 834 struct mhi_pci_device { 835 struct mhi_controller mhi_cntrl; 836 struct pci_saved_state *pci_state; 837 struct work_struct recovery_work; 838 struct timer_list health_check_timer; 839 unsigned long status; 840 }; 841 842 static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, 843 void __iomem *addr, u32 *out) 844 { 845 *out = readl(addr); 846 return 0; 847 } 848 849 static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, 850 void __iomem *addr, u32 val) 851 { 852 writel(val, addr); 853 } 854 855 static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, 856 enum mhi_callback cb) 857 { 858 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); 859 860 /* Nothing to do for now */ 861 switch (cb) { 862 case MHI_CB_FATAL_ERROR: 863 case MHI_CB_SYS_ERROR: 864 dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb); 865 pm_runtime_forbid(&pdev->dev); 866 break; 867 case MHI_CB_EE_MISSION_MODE: 868 pm_runtime_allow(&pdev->dev); 869 break; 870 default: 871 break; 872 } 873 } 874 875 static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force) 876 { 877 /* no-op */ 878 } 879 880 static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override) 881 { 882 /* no-op */ 883 } 884 885 static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl) 886 { 887 /* no-op */ 888 } 889 890 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) 891 { 892 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); 893 u16 vendor = 0; 894 895 if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) 896 return false; 897 898 if (vendor == (u16) ~0 || vendor == 0) 899 return false; 900 901 return true; 902 } 903 904 static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, 905 unsigned int bar_num, u64 dma_mask) 906 { 907 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); 908 int err; 909 910 err = pci_assign_resource(pdev, bar_num); 911 if (err) 912 return err; 913 914 err = pcim_enable_device(pdev); 915 if (err) { 916 dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); 917 return err; 918 } 919 920 err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); 921 if (err) { 922 dev_err(&pdev->dev, "failed to map pci region: %d\n", err); 923 return err; 924 } 925 mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; 926 mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); 927 928 err = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 929 if (err) { 930 dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); 931 return err; 932 } 933 934 pci_set_master(pdev); 935 936 return 0; 937 } 938 939 static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, 940 const struct mhi_controller_config *mhi_cntrl_config) 941 { 942 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); 943 int nr_vectors, i; 944 int *irq; 945 946 /* 947 * Alloc one MSI vector for BHI + one vector per event ring, ideally... 948 * No explicit pci_free_irq_vectors required, done by pcim_release. 949 */ 950 mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; 951 952 nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); 953 if (nr_vectors < 0) { 954 dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", 955 nr_vectors); 956 return nr_vectors; 957 } 958 959 if (nr_vectors < mhi_cntrl->nr_irqs) { 960 dev_warn(&pdev->dev, "using shared MSI\n"); 961 962 /* Patch msi vectors, use only one (shared) */ 963 for (i = 0; i < mhi_cntrl_config->num_events; i++) 964 mhi_cntrl_config->event_cfg[i].irq = 0; 965 mhi_cntrl->nr_irqs = 1; 966 } 967 968 irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); 969 if (!irq) 970 return -ENOMEM; 971 972 for (i = 0; i < mhi_cntrl->nr_irqs; i++) { 973 int vector = i >= nr_vectors ? (nr_vectors - 1) : i; 974 975 irq[i] = pci_irq_vector(pdev, vector); 976 } 977 978 mhi_cntrl->irq = irq; 979 980 return 0; 981 } 982 983 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) 984 { 985 /* The runtime_get() MHI callback means: 986 * Do whatever is requested to leave M3. 987 */ 988 return pm_runtime_get(mhi_cntrl->cntrl_dev); 989 } 990 991 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) 992 { 993 /* The runtime_put() MHI callback means: 994 * Device can be moved in M3 state. 995 */ 996 pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); 997 pm_runtime_put(mhi_cntrl->cntrl_dev); 998 } 999 1000 static void mhi_pci_recovery_work(struct work_struct *work) 1001 { 1002 struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, 1003 recovery_work); 1004 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1005 struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); 1006 int err; 1007 1008 dev_warn(&pdev->dev, "device recovery started\n"); 1009 1010 del_timer(&mhi_pdev->health_check_timer); 1011 pm_runtime_forbid(&pdev->dev); 1012 1013 /* Clean up MHI state */ 1014 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { 1015 mhi_power_down(mhi_cntrl, false); 1016 mhi_unprepare_after_power_down(mhi_cntrl); 1017 } 1018 1019 pci_set_power_state(pdev, PCI_D0); 1020 pci_load_saved_state(pdev, mhi_pdev->pci_state); 1021 pci_restore_state(pdev); 1022 1023 if (!mhi_pci_is_alive(mhi_cntrl)) 1024 goto err_try_reset; 1025 1026 err = mhi_prepare_for_power_up(mhi_cntrl); 1027 if (err) 1028 goto err_try_reset; 1029 1030 err = mhi_sync_power_up(mhi_cntrl); 1031 if (err) 1032 goto err_unprepare; 1033 1034 dev_dbg(&pdev->dev, "Recovery completed\n"); 1035 1036 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); 1037 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1038 return; 1039 1040 err_unprepare: 1041 mhi_unprepare_after_power_down(mhi_cntrl); 1042 err_try_reset: 1043 if (pci_reset_function(pdev)) 1044 dev_err(&pdev->dev, "Recovery failed\n"); 1045 } 1046 1047 static void health_check(struct timer_list *t) 1048 { 1049 struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); 1050 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1051 1052 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || 1053 test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) 1054 return; 1055 1056 if (!mhi_pci_is_alive(mhi_cntrl)) { 1057 dev_err(mhi_cntrl->cntrl_dev, "Device died\n"); 1058 queue_work(system_long_wq, &mhi_pdev->recovery_work); 1059 return; 1060 } 1061 1062 /* reschedule in two seconds */ 1063 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1064 } 1065 1066 static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl) 1067 { 1068 void __iomem *base = mhi_cntrl->regs; 1069 void __iomem *edl_db; 1070 int ret; 1071 u32 val; 1072 1073 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); 1074 if (ret) { 1075 dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n"); 1076 return ret; 1077 } 1078 1079 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); 1080 mhi_cntrl->runtime_get(mhi_cntrl); 1081 1082 ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); 1083 if (ret) 1084 goto err_get_chdb; 1085 1086 edl_db = base + val + (8 * MHI_EDL_DB); 1087 1088 mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE)); 1089 mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE)); 1090 1091 mhi_soc_reset(mhi_cntrl); 1092 1093 err_get_chdb: 1094 mhi_cntrl->runtime_put(mhi_cntrl); 1095 mhi_device_put(mhi_cntrl->mhi_dev); 1096 1097 return ret; 1098 } 1099 1100 static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1101 { 1102 const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; 1103 const struct mhi_controller_config *mhi_cntrl_config; 1104 struct mhi_pci_device *mhi_pdev; 1105 struct mhi_controller *mhi_cntrl; 1106 int err; 1107 1108 dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name); 1109 1110 /* mhi_pdev.mhi_cntrl must be zero-initialized */ 1111 mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); 1112 if (!mhi_pdev) 1113 return -ENOMEM; 1114 1115 INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); 1116 timer_setup(&mhi_pdev->health_check_timer, health_check, 0); 1117 1118 mhi_cntrl_config = info->config; 1119 mhi_cntrl = &mhi_pdev->mhi_cntrl; 1120 1121 mhi_cntrl->cntrl_dev = &pdev->dev; 1122 mhi_cntrl->iova_start = 0; 1123 mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); 1124 mhi_cntrl->fw_image = info->fw; 1125 mhi_cntrl->edl_image = info->edl; 1126 1127 mhi_cntrl->read_reg = mhi_pci_read_reg; 1128 mhi_cntrl->write_reg = mhi_pci_write_reg; 1129 mhi_cntrl->status_cb = mhi_pci_status_cb; 1130 mhi_cntrl->runtime_get = mhi_pci_runtime_get; 1131 mhi_cntrl->runtime_put = mhi_pci_runtime_put; 1132 mhi_cntrl->mru = info->mru_default; 1133 mhi_cntrl->name = info->name; 1134 1135 if (info->edl_trigger) 1136 mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger; 1137 1138 if (info->sideband_wake) { 1139 mhi_cntrl->wake_get = mhi_pci_wake_get_nop; 1140 mhi_cntrl->wake_put = mhi_pci_wake_put_nop; 1141 mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; 1142 } 1143 1144 err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); 1145 if (err) 1146 return err; 1147 1148 err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); 1149 if (err) 1150 return err; 1151 1152 pci_set_drvdata(pdev, mhi_pdev); 1153 1154 /* Have stored pci confspace at hand for restore in sudden PCI error. 1155 * cache the state locally and discard the PCI core one. 1156 */ 1157 pci_save_state(pdev); 1158 mhi_pdev->pci_state = pci_store_saved_state(pdev); 1159 pci_load_saved_state(pdev, NULL); 1160 1161 err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); 1162 if (err) 1163 return err; 1164 1165 /* MHI bus does not power up the controller by default */ 1166 err = mhi_prepare_for_power_up(mhi_cntrl); 1167 if (err) { 1168 dev_err(&pdev->dev, "failed to prepare MHI controller\n"); 1169 goto err_unregister; 1170 } 1171 1172 err = mhi_sync_power_up(mhi_cntrl); 1173 if (err) { 1174 dev_err(&pdev->dev, "failed to power up MHI controller\n"); 1175 goto err_unprepare; 1176 } 1177 1178 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); 1179 1180 /* start health check */ 1181 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1182 1183 /* Only allow runtime-suspend if PME capable (for wakeup) */ 1184 if (pci_pme_capable(pdev, PCI_D3hot)) { 1185 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); 1186 pm_runtime_use_autosuspend(&pdev->dev); 1187 pm_runtime_mark_last_busy(&pdev->dev); 1188 pm_runtime_put_noidle(&pdev->dev); 1189 } 1190 1191 return 0; 1192 1193 err_unprepare: 1194 mhi_unprepare_after_power_down(mhi_cntrl); 1195 err_unregister: 1196 mhi_unregister_controller(mhi_cntrl); 1197 1198 return err; 1199 } 1200 1201 static void mhi_pci_remove(struct pci_dev *pdev) 1202 { 1203 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1204 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1205 1206 del_timer_sync(&mhi_pdev->health_check_timer); 1207 cancel_work_sync(&mhi_pdev->recovery_work); 1208 1209 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { 1210 mhi_power_down(mhi_cntrl, true); 1211 mhi_unprepare_after_power_down(mhi_cntrl); 1212 } 1213 1214 /* balancing probe put_noidle */ 1215 if (pci_pme_capable(pdev, PCI_D3hot)) 1216 pm_runtime_get_noresume(&pdev->dev); 1217 1218 mhi_unregister_controller(mhi_cntrl); 1219 } 1220 1221 static void mhi_pci_shutdown(struct pci_dev *pdev) 1222 { 1223 mhi_pci_remove(pdev); 1224 pci_set_power_state(pdev, PCI_D3hot); 1225 } 1226 1227 static void mhi_pci_reset_prepare(struct pci_dev *pdev) 1228 { 1229 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1230 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1231 1232 dev_info(&pdev->dev, "reset\n"); 1233 1234 del_timer(&mhi_pdev->health_check_timer); 1235 1236 /* Clean up MHI state */ 1237 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { 1238 mhi_power_down(mhi_cntrl, false); 1239 mhi_unprepare_after_power_down(mhi_cntrl); 1240 } 1241 1242 /* cause internal device reset */ 1243 mhi_soc_reset(mhi_cntrl); 1244 1245 /* Be sure device reset has been executed */ 1246 msleep(MHI_POST_RESET_DELAY_MS); 1247 } 1248 1249 static void mhi_pci_reset_done(struct pci_dev *pdev) 1250 { 1251 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1252 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1253 int err; 1254 1255 /* Restore initial known working PCI state */ 1256 pci_load_saved_state(pdev, mhi_pdev->pci_state); 1257 pci_restore_state(pdev); 1258 1259 /* Is device status available ? */ 1260 if (!mhi_pci_is_alive(mhi_cntrl)) { 1261 dev_err(&pdev->dev, "reset failed\n"); 1262 return; 1263 } 1264 1265 err = mhi_prepare_for_power_up(mhi_cntrl); 1266 if (err) { 1267 dev_err(&pdev->dev, "failed to prepare MHI controller\n"); 1268 return; 1269 } 1270 1271 err = mhi_sync_power_up(mhi_cntrl); 1272 if (err) { 1273 dev_err(&pdev->dev, "failed to power up MHI controller\n"); 1274 mhi_unprepare_after_power_down(mhi_cntrl); 1275 return; 1276 } 1277 1278 set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); 1279 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1280 } 1281 1282 static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, 1283 pci_channel_state_t state) 1284 { 1285 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1286 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1287 1288 dev_err(&pdev->dev, "PCI error detected, state = %u\n", state); 1289 1290 if (state == pci_channel_io_perm_failure) 1291 return PCI_ERS_RESULT_DISCONNECT; 1292 1293 /* Clean up MHI state */ 1294 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { 1295 mhi_power_down(mhi_cntrl, false); 1296 mhi_unprepare_after_power_down(mhi_cntrl); 1297 } else { 1298 /* Nothing to do */ 1299 return PCI_ERS_RESULT_RECOVERED; 1300 } 1301 1302 pci_disable_device(pdev); 1303 1304 return PCI_ERS_RESULT_NEED_RESET; 1305 } 1306 1307 static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev) 1308 { 1309 if (pci_enable_device(pdev)) { 1310 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); 1311 return PCI_ERS_RESULT_DISCONNECT; 1312 } 1313 1314 return PCI_ERS_RESULT_RECOVERED; 1315 } 1316 1317 static void mhi_pci_io_resume(struct pci_dev *pdev) 1318 { 1319 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1320 1321 dev_err(&pdev->dev, "PCI slot reset done\n"); 1322 1323 queue_work(system_long_wq, &mhi_pdev->recovery_work); 1324 } 1325 1326 static const struct pci_error_handlers mhi_pci_err_handler = { 1327 .error_detected = mhi_pci_error_detected, 1328 .slot_reset = mhi_pci_slot_reset, 1329 .resume = mhi_pci_io_resume, 1330 .reset_prepare = mhi_pci_reset_prepare, 1331 .reset_done = mhi_pci_reset_done, 1332 }; 1333 1334 static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) 1335 { 1336 struct pci_dev *pdev = to_pci_dev(dev); 1337 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); 1338 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1339 int err; 1340 1341 if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) 1342 return 0; 1343 1344 del_timer(&mhi_pdev->health_check_timer); 1345 cancel_work_sync(&mhi_pdev->recovery_work); 1346 1347 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || 1348 mhi_cntrl->ee != MHI_EE_AMSS) 1349 goto pci_suspend; /* Nothing to do at MHI level */ 1350 1351 /* Transition to M3 state */ 1352 err = mhi_pm_suspend(mhi_cntrl); 1353 if (err) { 1354 dev_err(&pdev->dev, "failed to suspend device: %d\n", err); 1355 clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status); 1356 return -EBUSY; 1357 } 1358 1359 pci_suspend: 1360 pci_disable_device(pdev); 1361 pci_wake_from_d3(pdev, true); 1362 1363 return 0; 1364 } 1365 1366 static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) 1367 { 1368 struct pci_dev *pdev = to_pci_dev(dev); 1369 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); 1370 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1371 int err; 1372 1373 if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) 1374 return 0; 1375 1376 err = pci_enable_device(pdev); 1377 if (err) 1378 goto err_recovery; 1379 1380 pci_set_master(pdev); 1381 pci_wake_from_d3(pdev, false); 1382 1383 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || 1384 mhi_cntrl->ee != MHI_EE_AMSS) 1385 return 0; /* Nothing to do at MHI level */ 1386 1387 /* Exit M3, transition to M0 state */ 1388 err = mhi_pm_resume(mhi_cntrl); 1389 if (err) { 1390 dev_err(&pdev->dev, "failed to resume device: %d\n", err); 1391 goto err_recovery; 1392 } 1393 1394 /* Resume health check */ 1395 mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); 1396 1397 /* It can be a remote wakeup (no mhi runtime_get), update access time */ 1398 pm_runtime_mark_last_busy(dev); 1399 1400 return 0; 1401 1402 err_recovery: 1403 /* Do not fail to not mess up our PCI device state, the device likely 1404 * lost power (d3cold) and we simply need to reset it from the recovery 1405 * procedure, trigger the recovery asynchronously to prevent system 1406 * suspend exit delaying. 1407 */ 1408 queue_work(system_long_wq, &mhi_pdev->recovery_work); 1409 pm_runtime_mark_last_busy(dev); 1410 1411 return 0; 1412 } 1413 1414 static int __maybe_unused mhi_pci_suspend(struct device *dev) 1415 { 1416 pm_runtime_disable(dev); 1417 return mhi_pci_runtime_suspend(dev); 1418 } 1419 1420 static int __maybe_unused mhi_pci_resume(struct device *dev) 1421 { 1422 int ret; 1423 1424 /* Depending the platform, device may have lost power (d3cold), we need 1425 * to resume it now to check its state and recover when necessary. 1426 */ 1427 ret = mhi_pci_runtime_resume(dev); 1428 pm_runtime_enable(dev); 1429 1430 return ret; 1431 } 1432 1433 static int __maybe_unused mhi_pci_freeze(struct device *dev) 1434 { 1435 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); 1436 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1437 1438 /* We want to stop all operations, hibernation does not guarantee that 1439 * device will be in the same state as before freezing, especially if 1440 * the intermediate restore kernel reinitializes MHI device with new 1441 * context. 1442 */ 1443 flush_work(&mhi_pdev->recovery_work); 1444 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { 1445 mhi_power_down(mhi_cntrl, true); 1446 mhi_unprepare_after_power_down(mhi_cntrl); 1447 } 1448 1449 return 0; 1450 } 1451 1452 static int __maybe_unused mhi_pci_restore(struct device *dev) 1453 { 1454 struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); 1455 1456 /* Reinitialize the device */ 1457 queue_work(system_long_wq, &mhi_pdev->recovery_work); 1458 1459 return 0; 1460 } 1461 1462 static const struct dev_pm_ops mhi_pci_pm_ops = { 1463 SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL) 1464 #ifdef CONFIG_PM_SLEEP 1465 .suspend = mhi_pci_suspend, 1466 .resume = mhi_pci_resume, 1467 .freeze = mhi_pci_freeze, 1468 .thaw = mhi_pci_restore, 1469 .poweroff = mhi_pci_freeze, 1470 .restore = mhi_pci_restore, 1471 #endif 1472 }; 1473 1474 static struct pci_driver mhi_pci_driver = { 1475 .name = "mhi-pci-generic", 1476 .id_table = mhi_pci_id_table, 1477 .probe = mhi_pci_probe, 1478 .remove = mhi_pci_remove, 1479 .shutdown = mhi_pci_shutdown, 1480 .err_handler = &mhi_pci_err_handler, 1481 .driver.pm = &mhi_pci_pm_ops 1482 }; 1483 module_pci_driver(mhi_pci_driver); 1484 1485 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); 1486 MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); 1487 MODULE_LICENSE("GPL"); 1488