1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/errno.h> 11 #include <linux/types.h> 12 #include <linux/pci.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/workqueue.h> 17 #include <linux/pm_domain.h> 18 #include <linux/pm_runtime.h> 19 20 #include <linux/mei.h> 21 22 23 #include "mei_dev.h" 24 #include "hw-txe.h" 25 26 static const struct pci_device_id mei_txe_pci_tbl[] = { 27 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ 28 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */ 29 30 {0, } 31 }; 32 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 33 34 #ifdef CONFIG_PM 35 static inline void mei_txe_set_pm_domain(struct mei_device *dev); 36 static inline void mei_txe_unset_pm_domain(struct mei_device *dev); 37 #else 38 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} 39 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 40 #endif /* CONFIG_PM */ 41 42 /** 43 * mei_txe_probe - Device Initialization Routine 44 * 45 * @pdev: PCI device structure 46 * @ent: entry in mei_txe_pci_tbl 47 * 48 * Return: 0 on success, <0 on failure. 49 */ 50 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 51 { 52 struct mei_device *dev; 53 struct mei_txe_hw *hw; 54 const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR); 55 int err; 56 57 /* enable pci dev */ 58 err = pcim_enable_device(pdev); 59 if (err) { 60 dev_err(&pdev->dev, "failed to enable pci device.\n"); 61 goto end; 62 } 63 /* set PCI host mastering */ 64 pci_set_master(pdev); 65 /* pci request regions and mapping IO device memory for mei driver */ 66 err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME); 67 if (err) { 68 dev_err(&pdev->dev, "failed to get pci regions.\n"); 69 goto end; 70 } 71 72 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); 73 if (err) { 74 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 75 if (err) { 76 dev_err(&pdev->dev, "No suitable DMA available.\n"); 77 goto end; 78 } 79 } 80 81 /* allocates and initializes the mei dev structure */ 82 dev = mei_txe_dev_init(pdev); 83 if (!dev) { 84 err = -ENOMEM; 85 goto end; 86 } 87 hw = to_txe_hw(dev); 88 hw->mem_addr = pcim_iomap_table(pdev); 89 90 err = mei_register(dev, &pdev->dev); 91 if (err) 92 goto end; 93 94 pci_enable_msi(pdev); 95 96 /* clear spurious interrupts */ 97 mei_clear_interrupts(dev); 98 99 /* request and enable interrupt */ 100 if (pci_dev_msi_enabled(pdev)) 101 err = request_threaded_irq(pdev->irq, 102 NULL, 103 mei_txe_irq_thread_handler, 104 IRQF_ONESHOT, KBUILD_MODNAME, dev); 105 else 106 err = request_threaded_irq(pdev->irq, 107 mei_txe_irq_quick_handler, 108 mei_txe_irq_thread_handler, 109 IRQF_SHARED, KBUILD_MODNAME, dev); 110 if (err) { 111 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 112 pdev->irq); 113 goto deregister; 114 } 115 116 if (mei_start(dev)) { 117 dev_err(&pdev->dev, "init hw failure.\n"); 118 err = -ENODEV; 119 goto deregister; 120 } 121 122 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 123 pm_runtime_use_autosuspend(&pdev->dev); 124 125 pci_set_drvdata(pdev, dev); 126 127 /* 128 * MEI requires to resume from runtime suspend mode 129 * in order to perform link reset flow upon system suspend. 130 */ 131 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 132 133 /* 134 * TXE maps runtime suspend/resume to own power gating states, 135 * hence we need to go around native PCI runtime service which 136 * eventually brings the device into D3cold/hot state. 137 * But the TXE device cannot wake up from D3 unlike from own 138 * power gating. To get around PCI device native runtime pm, 139 * TXE uses runtime pm domain handlers which take precedence. 140 */ 141 mei_txe_set_pm_domain(dev); 142 143 pm_runtime_put_noidle(&pdev->dev); 144 145 return 0; 146 147 deregister: 148 mei_cancel_work(dev); 149 mei_disable_interrupts(dev); 150 free_irq(pdev->irq, dev); 151 mei_deregister(dev); 152 end: 153 dev_err(&pdev->dev, "initialization failed.\n"); 154 return err; 155 } 156 157 /** 158 * mei_txe_shutdown- Device Shutdown Routine 159 * 160 * @pdev: PCI device structure 161 * 162 * mei_txe_shutdown is called from the reboot notifier 163 * it's a simplified version of remove so we go down 164 * faster. 165 */ 166 static void mei_txe_shutdown(struct pci_dev *pdev) 167 { 168 struct mei_device *dev = pci_get_drvdata(pdev); 169 170 dev_dbg(&pdev->dev, "shutdown\n"); 171 mei_stop(dev); 172 173 mei_txe_unset_pm_domain(dev); 174 175 mei_disable_interrupts(dev); 176 free_irq(pdev->irq, dev); 177 } 178 179 /** 180 * mei_txe_remove - Device Removal Routine 181 * 182 * @pdev: PCI device structure 183 * 184 * mei_remove is called by the PCI subsystem to alert the driver 185 * that it should release a PCI device. 186 */ 187 static void mei_txe_remove(struct pci_dev *pdev) 188 { 189 struct mei_device *dev = pci_get_drvdata(pdev); 190 191 pm_runtime_get_noresume(&pdev->dev); 192 193 mei_stop(dev); 194 195 mei_txe_unset_pm_domain(dev); 196 197 mei_disable_interrupts(dev); 198 free_irq(pdev->irq, dev); 199 200 mei_deregister(dev); 201 } 202 203 204 #ifdef CONFIG_PM_SLEEP 205 static int mei_txe_pci_suspend(struct device *device) 206 { 207 struct pci_dev *pdev = to_pci_dev(device); 208 struct mei_device *dev = pci_get_drvdata(pdev); 209 210 dev_dbg(&pdev->dev, "suspend\n"); 211 212 mei_stop(dev); 213 214 mei_disable_interrupts(dev); 215 216 free_irq(pdev->irq, dev); 217 pci_disable_msi(pdev); 218 219 return 0; 220 } 221 222 static int mei_txe_pci_resume(struct device *device) 223 { 224 struct pci_dev *pdev = to_pci_dev(device); 225 struct mei_device *dev = pci_get_drvdata(pdev); 226 int err; 227 228 pci_enable_msi(pdev); 229 230 mei_clear_interrupts(dev); 231 232 /* request and enable interrupt */ 233 if (pci_dev_msi_enabled(pdev)) 234 err = request_threaded_irq(pdev->irq, 235 NULL, 236 mei_txe_irq_thread_handler, 237 IRQF_ONESHOT, KBUILD_MODNAME, dev); 238 else 239 err = request_threaded_irq(pdev->irq, 240 mei_txe_irq_quick_handler, 241 mei_txe_irq_thread_handler, 242 IRQF_SHARED, KBUILD_MODNAME, dev); 243 if (err) { 244 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 245 pdev->irq); 246 return err; 247 } 248 249 err = mei_restart(dev); 250 251 return err; 252 } 253 #endif /* CONFIG_PM_SLEEP */ 254 255 #ifdef CONFIG_PM 256 static int mei_txe_pm_runtime_idle(struct device *device) 257 { 258 struct mei_device *dev = dev_get_drvdata(device); 259 260 dev_dbg(device, "rpm: txe: runtime_idle\n"); 261 262 if (mei_write_is_idle(dev)) 263 pm_runtime_autosuspend(device); 264 265 return -EBUSY; 266 } 267 static int mei_txe_pm_runtime_suspend(struct device *device) 268 { 269 struct mei_device *dev = dev_get_drvdata(device); 270 int ret; 271 272 dev_dbg(device, "rpm: txe: runtime suspend\n"); 273 274 mutex_lock(&dev->device_lock); 275 276 if (mei_write_is_idle(dev)) 277 ret = mei_txe_aliveness_set_sync(dev, 0); 278 else 279 ret = -EAGAIN; 280 281 /* keep irq on we are staying in D0 */ 282 283 dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret); 284 285 mutex_unlock(&dev->device_lock); 286 287 if (ret && ret != -EAGAIN) 288 schedule_work(&dev->reset_work); 289 290 return ret; 291 } 292 293 static int mei_txe_pm_runtime_resume(struct device *device) 294 { 295 struct mei_device *dev = dev_get_drvdata(device); 296 int ret; 297 298 dev_dbg(device, "rpm: txe: runtime resume\n"); 299 300 mutex_lock(&dev->device_lock); 301 302 mei_enable_interrupts(dev); 303 304 ret = mei_txe_aliveness_set_sync(dev, 1); 305 306 mutex_unlock(&dev->device_lock); 307 308 dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret); 309 310 if (ret) 311 schedule_work(&dev->reset_work); 312 313 return ret; 314 } 315 316 /** 317 * mei_txe_set_pm_domain - fill and set pm domain structure for device 318 * 319 * @dev: mei_device 320 */ 321 static inline void mei_txe_set_pm_domain(struct mei_device *dev) 322 { 323 struct pci_dev *pdev = to_pci_dev(dev->parent); 324 325 if (pdev->dev.bus && pdev->dev.bus->pm) { 326 dev->pg_domain.ops = *pdev->dev.bus->pm; 327 328 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; 329 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; 330 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; 331 332 dev_pm_domain_set(&pdev->dev, &dev->pg_domain); 333 } 334 } 335 336 /** 337 * mei_txe_unset_pm_domain - clean pm domain structure for device 338 * 339 * @dev: mei_device 340 */ 341 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) 342 { 343 /* stop using pm callbacks if any */ 344 dev_pm_domain_set(dev->parent, NULL); 345 } 346 347 static const struct dev_pm_ops mei_txe_pm_ops = { 348 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 349 mei_txe_pci_resume) 350 SET_RUNTIME_PM_OPS( 351 mei_txe_pm_runtime_suspend, 352 mei_txe_pm_runtime_resume, 353 mei_txe_pm_runtime_idle) 354 }; 355 356 #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) 357 #else 358 #define MEI_TXE_PM_OPS NULL 359 #endif /* CONFIG_PM */ 360 361 /* 362 * PCI driver structure 363 */ 364 static struct pci_driver mei_txe_driver = { 365 .name = KBUILD_MODNAME, 366 .id_table = mei_txe_pci_tbl, 367 .probe = mei_txe_probe, 368 .remove = mei_txe_remove, 369 .shutdown = mei_txe_shutdown, 370 .driver.pm = MEI_TXE_PM_OPS, 371 }; 372 373 module_pci_driver(mei_txe_driver); 374 375 MODULE_AUTHOR("Intel Corporation"); 376 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); 377 MODULE_LICENSE("GPL v2"); 378