1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/errno.h> 11 #include <linux/types.h> 12 #include <linux/pci.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/workqueue.h> 17 #include <linux/pm_domain.h> 18 #include <linux/pm_runtime.h> 19 20 #include <linux/mei.h> 21 22 23 #include "mei_dev.h" 24 #include "hw-txe.h" 25 26 static const struct pci_device_id mei_txe_pci_tbl[] = { 27 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ 28 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */ 29 30 {0, } 31 }; 32 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 33 34 #ifdef CONFIG_PM 35 static inline void mei_txe_set_pm_domain(struct mei_device *dev); 36 static inline void mei_txe_unset_pm_domain(struct mei_device *dev); 37 #else 38 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} 39 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 40 #endif /* CONFIG_PM */ 41 42 /** 43 * mei_txe_probe - Device Initialization Routine 44 * 45 * @pdev: PCI device structure 46 * @ent: entry in mei_txe_pci_tbl 47 * 48 * Return: 0 on success, <0 on failure. 49 */ 50 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 51 { 52 struct mei_device *dev; 53 struct mei_txe_hw *hw; 54 const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR); 55 int err; 56 57 /* enable pci dev */ 58 err = pcim_enable_device(pdev); 59 if (err) { 60 dev_err(&pdev->dev, "failed to enable pci device.\n"); 61 goto end; 62 } 63 /* set PCI host mastering */ 64 pci_set_master(pdev); 65 /* pci request regions and mapping IO device memory for mei driver */ 66 err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME); 67 if (err) { 68 dev_err(&pdev->dev, "failed to get pci regions.\n"); 69 goto end; 70 } 71 72 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); 73 if (err) { 74 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 75 if (err) { 76 dev_err(&pdev->dev, "No suitable DMA available.\n"); 77 goto end; 78 } 79 } 80 81 /* allocates and initializes the mei dev structure */ 82 dev = mei_txe_dev_init(pdev); 83 if (!dev) { 84 err = -ENOMEM; 85 goto end; 86 } 87 hw = to_txe_hw(dev); 88 hw->mem_addr = pcim_iomap_table(pdev); 89 90 pci_enable_msi(pdev); 91 92 /* clear spurious interrupts */ 93 mei_clear_interrupts(dev); 94 95 /* request and enable interrupt */ 96 if (pci_dev_msi_enabled(pdev)) 97 err = request_threaded_irq(pdev->irq, 98 NULL, 99 mei_txe_irq_thread_handler, 100 IRQF_ONESHOT, KBUILD_MODNAME, dev); 101 else 102 err = request_threaded_irq(pdev->irq, 103 mei_txe_irq_quick_handler, 104 mei_txe_irq_thread_handler, 105 IRQF_SHARED, KBUILD_MODNAME, dev); 106 if (err) { 107 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 108 pdev->irq); 109 goto end; 110 } 111 112 if (mei_start(dev)) { 113 dev_err(&pdev->dev, "init hw failure.\n"); 114 err = -ENODEV; 115 goto release_irq; 116 } 117 118 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 119 pm_runtime_use_autosuspend(&pdev->dev); 120 121 err = mei_register(dev, &pdev->dev); 122 if (err) 123 goto stop; 124 125 pci_set_drvdata(pdev, dev); 126 127 /* 128 * MEI requires to resume from runtime suspend mode 129 * in order to perform link reset flow upon system suspend. 130 */ 131 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 132 133 /* 134 * TXE maps runtime suspend/resume to own power gating states, 135 * hence we need to go around native PCI runtime service which 136 * eventually brings the device into D3cold/hot state. 137 * But the TXE device cannot wake up from D3 unlike from own 138 * power gating. To get around PCI device native runtime pm, 139 * TXE uses runtime pm domain handlers which take precedence. 140 */ 141 mei_txe_set_pm_domain(dev); 142 143 pm_runtime_put_noidle(&pdev->dev); 144 145 return 0; 146 147 stop: 148 mei_stop(dev); 149 release_irq: 150 mei_cancel_work(dev); 151 mei_disable_interrupts(dev); 152 free_irq(pdev->irq, dev); 153 end: 154 dev_err(&pdev->dev, "initialization failed.\n"); 155 return err; 156 } 157 158 /** 159 * mei_txe_shutdown- Device Shutdown Routine 160 * 161 * @pdev: PCI device structure 162 * 163 * mei_txe_shutdown is called from the reboot notifier 164 * it's a simplified version of remove so we go down 165 * faster. 166 */ 167 static void mei_txe_shutdown(struct pci_dev *pdev) 168 { 169 struct mei_device *dev = pci_get_drvdata(pdev); 170 171 dev_dbg(&pdev->dev, "shutdown\n"); 172 mei_stop(dev); 173 174 mei_txe_unset_pm_domain(dev); 175 176 mei_disable_interrupts(dev); 177 free_irq(pdev->irq, dev); 178 } 179 180 /** 181 * mei_txe_remove - Device Removal Routine 182 * 183 * @pdev: PCI device structure 184 * 185 * mei_remove is called by the PCI subsystem to alert the driver 186 * that it should release a PCI device. 187 */ 188 static void mei_txe_remove(struct pci_dev *pdev) 189 { 190 struct mei_device *dev = pci_get_drvdata(pdev); 191 192 pm_runtime_get_noresume(&pdev->dev); 193 194 mei_stop(dev); 195 196 mei_txe_unset_pm_domain(dev); 197 198 mei_disable_interrupts(dev); 199 free_irq(pdev->irq, dev); 200 201 mei_deregister(dev); 202 } 203 204 205 #ifdef CONFIG_PM_SLEEP 206 static int mei_txe_pci_suspend(struct device *device) 207 { 208 struct pci_dev *pdev = to_pci_dev(device); 209 struct mei_device *dev = pci_get_drvdata(pdev); 210 211 dev_dbg(&pdev->dev, "suspend\n"); 212 213 mei_stop(dev); 214 215 mei_disable_interrupts(dev); 216 217 free_irq(pdev->irq, dev); 218 pci_disable_msi(pdev); 219 220 return 0; 221 } 222 223 static int mei_txe_pci_resume(struct device *device) 224 { 225 struct pci_dev *pdev = to_pci_dev(device); 226 struct mei_device *dev = pci_get_drvdata(pdev); 227 int err; 228 229 pci_enable_msi(pdev); 230 231 mei_clear_interrupts(dev); 232 233 /* request and enable interrupt */ 234 if (pci_dev_msi_enabled(pdev)) 235 err = request_threaded_irq(pdev->irq, 236 NULL, 237 mei_txe_irq_thread_handler, 238 IRQF_ONESHOT, KBUILD_MODNAME, dev); 239 else 240 err = request_threaded_irq(pdev->irq, 241 mei_txe_irq_quick_handler, 242 mei_txe_irq_thread_handler, 243 IRQF_SHARED, KBUILD_MODNAME, dev); 244 if (err) { 245 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 246 pdev->irq); 247 return err; 248 } 249 250 err = mei_restart(dev); 251 252 return err; 253 } 254 #endif /* CONFIG_PM_SLEEP */ 255 256 #ifdef CONFIG_PM 257 static int mei_txe_pm_runtime_idle(struct device *device) 258 { 259 struct mei_device *dev = dev_get_drvdata(device); 260 261 dev_dbg(device, "rpm: txe: runtime_idle\n"); 262 263 if (mei_write_is_idle(dev)) 264 pm_runtime_autosuspend(device); 265 266 return -EBUSY; 267 } 268 static int mei_txe_pm_runtime_suspend(struct device *device) 269 { 270 struct mei_device *dev = dev_get_drvdata(device); 271 int ret; 272 273 dev_dbg(device, "rpm: txe: runtime suspend\n"); 274 275 mutex_lock(&dev->device_lock); 276 277 if (mei_write_is_idle(dev)) 278 ret = mei_txe_aliveness_set_sync(dev, 0); 279 else 280 ret = -EAGAIN; 281 282 /* keep irq on we are staying in D0 */ 283 284 dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret); 285 286 mutex_unlock(&dev->device_lock); 287 288 if (ret && ret != -EAGAIN) 289 schedule_work(&dev->reset_work); 290 291 return ret; 292 } 293 294 static int mei_txe_pm_runtime_resume(struct device *device) 295 { 296 struct mei_device *dev = dev_get_drvdata(device); 297 int ret; 298 299 dev_dbg(device, "rpm: txe: runtime resume\n"); 300 301 mutex_lock(&dev->device_lock); 302 303 mei_enable_interrupts(dev); 304 305 ret = mei_txe_aliveness_set_sync(dev, 1); 306 307 mutex_unlock(&dev->device_lock); 308 309 dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret); 310 311 if (ret) 312 schedule_work(&dev->reset_work); 313 314 return ret; 315 } 316 317 /** 318 * mei_txe_set_pm_domain - fill and set pm domain structure for device 319 * 320 * @dev: mei_device 321 */ 322 static inline void mei_txe_set_pm_domain(struct mei_device *dev) 323 { 324 struct pci_dev *pdev = to_pci_dev(dev->dev); 325 326 if (pdev->dev.bus && pdev->dev.bus->pm) { 327 dev->pg_domain.ops = *pdev->dev.bus->pm; 328 329 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; 330 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; 331 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; 332 333 dev_pm_domain_set(&pdev->dev, &dev->pg_domain); 334 } 335 } 336 337 /** 338 * mei_txe_unset_pm_domain - clean pm domain structure for device 339 * 340 * @dev: mei_device 341 */ 342 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) 343 { 344 /* stop using pm callbacks if any */ 345 dev_pm_domain_set(dev->dev, NULL); 346 } 347 348 static const struct dev_pm_ops mei_txe_pm_ops = { 349 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 350 mei_txe_pci_resume) 351 SET_RUNTIME_PM_OPS( 352 mei_txe_pm_runtime_suspend, 353 mei_txe_pm_runtime_resume, 354 mei_txe_pm_runtime_idle) 355 }; 356 357 #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) 358 #else 359 #define MEI_TXE_PM_OPS NULL 360 #endif /* CONFIG_PM */ 361 362 /* 363 * PCI driver structure 364 */ 365 static struct pci_driver mei_txe_driver = { 366 .name = KBUILD_MODNAME, 367 .id_table = mei_txe_pci_tbl, 368 .probe = mei_txe_probe, 369 .remove = mei_txe_remove, 370 .shutdown = mei_txe_shutdown, 371 .driver.pm = MEI_TXE_PM_OPS, 372 }; 373 374 module_pci_driver(mei_txe_driver); 375 376 MODULE_AUTHOR("Intel Corporation"); 377 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); 378 MODULE_LICENSE("GPL v2"); 379