1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2013-2014, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/init.h> 25 #include <linux/sched.h> 26 #include <linux/uuid.h> 27 #include <linux/jiffies.h> 28 #include <linux/interrupt.h> 29 #include <linux/workqueue.h> 30 #include <linux/pm_runtime.h> 31 32 #include <linux/mei.h> 33 34 35 #include "mei_dev.h" 36 #include "hw-txe.h" 37 38 static const struct pci_device_id mei_txe_pci_tbl[] = { 39 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ 40 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */ 41 42 {0, } 43 }; 44 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 45 46 #ifdef CONFIG_PM 47 static inline void mei_txe_set_pm_domain(struct mei_device *dev); 48 static inline void mei_txe_unset_pm_domain(struct mei_device *dev); 49 #else 50 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} 51 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 52 #endif /* CONFIG_PM */ 53 54 static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) 55 { 56 int i; 57 58 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { 59 if (hw->mem_addr[i]) { 60 pci_iounmap(pdev, hw->mem_addr[i]); 61 hw->mem_addr[i] = NULL; 62 } 63 } 64 } 65 /** 66 * mei_probe - Device Initialization Routine 67 * 68 * @pdev: PCI device structure 69 * @ent: entry in mei_txe_pci_tbl 70 * 71 * Return: 0 on success, <0 on failure. 72 */ 73 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 74 { 75 struct mei_device *dev; 76 struct mei_txe_hw *hw; 77 int err; 78 int i; 79 80 /* enable pci dev */ 81 err = pci_enable_device(pdev); 82 if (err) { 83 dev_err(&pdev->dev, "failed to enable pci device.\n"); 84 goto end; 85 } 86 /* set PCI host mastering */ 87 pci_set_master(pdev); 88 /* pci request regions for mei driver */ 89 err = pci_request_regions(pdev, KBUILD_MODNAME); 90 if (err) { 91 dev_err(&pdev->dev, "failed to get pci regions.\n"); 92 goto disable_device; 93 } 94 95 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 96 if (err) { 97 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 98 if (err) { 99 dev_err(&pdev->dev, "No suitable DMA available.\n"); 100 goto release_regions; 101 } 102 } 103 104 /* allocates and initializes the mei dev structure */ 105 dev = mei_txe_dev_init(pdev); 106 if (!dev) { 107 err = -ENOMEM; 108 goto release_regions; 109 } 110 hw = to_txe_hw(dev); 111 112 /* mapping IO device memory */ 113 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { 114 hw->mem_addr[i] = pci_iomap(pdev, i, 0); 115 if (!hw->mem_addr[i]) { 116 dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); 117 err = -ENOMEM; 118 goto free_device; 119 } 120 } 121 122 123 pci_enable_msi(pdev); 124 125 /* clear spurious interrupts */ 126 mei_clear_interrupts(dev); 127 128 /* request and enable interrupt */ 129 if (pci_dev_msi_enabled(pdev)) 130 err = request_threaded_irq(pdev->irq, 131 NULL, 132 mei_txe_irq_thread_handler, 133 IRQF_ONESHOT, KBUILD_MODNAME, dev); 134 else 135 err = request_threaded_irq(pdev->irq, 136 mei_txe_irq_quick_handler, 137 mei_txe_irq_thread_handler, 138 IRQF_SHARED, KBUILD_MODNAME, dev); 139 if (err) { 140 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 141 pdev->irq); 142 goto free_device; 143 } 144 145 if (mei_start(dev)) { 146 dev_err(&pdev->dev, "init hw failure.\n"); 147 err = -ENODEV; 148 goto release_irq; 149 } 150 151 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 152 pm_runtime_use_autosuspend(&pdev->dev); 153 154 err = mei_register(dev, &pdev->dev); 155 if (err) 156 goto release_irq; 157 158 pci_set_drvdata(pdev, dev); 159 160 /* 161 * For not wake-able HW runtime pm framework 162 * can't be used on pci device level. 163 * Use domain runtime pm callbacks instead. 164 */ 165 if (!pci_dev_run_wake(pdev)) 166 mei_txe_set_pm_domain(dev); 167 168 pm_runtime_put_noidle(&pdev->dev); 169 170 return 0; 171 172 release_irq: 173 174 mei_cancel_work(dev); 175 176 /* disable interrupts */ 177 mei_disable_interrupts(dev); 178 179 free_irq(pdev->irq, dev); 180 pci_disable_msi(pdev); 181 182 free_device: 183 mei_txe_pci_iounmap(pdev, hw); 184 185 kfree(dev); 186 release_regions: 187 pci_release_regions(pdev); 188 disable_device: 189 pci_disable_device(pdev); 190 end: 191 dev_err(&pdev->dev, "initialization failed.\n"); 192 return err; 193 } 194 195 /** 196 * mei_remove - Device Removal Routine 197 * 198 * @pdev: PCI device structure 199 * 200 * mei_remove is called by the PCI subsystem to alert the driver 201 * that it should release a PCI device. 202 */ 203 static void mei_txe_remove(struct pci_dev *pdev) 204 { 205 struct mei_device *dev; 206 struct mei_txe_hw *hw; 207 208 dev = pci_get_drvdata(pdev); 209 if (!dev) { 210 dev_err(&pdev->dev, "mei: dev =NULL\n"); 211 return; 212 } 213 214 pm_runtime_get_noresume(&pdev->dev); 215 216 hw = to_txe_hw(dev); 217 218 mei_stop(dev); 219 220 if (!pci_dev_run_wake(pdev)) 221 mei_txe_unset_pm_domain(dev); 222 223 /* disable interrupts */ 224 mei_disable_interrupts(dev); 225 free_irq(pdev->irq, dev); 226 pci_disable_msi(pdev); 227 228 pci_set_drvdata(pdev, NULL); 229 230 mei_txe_pci_iounmap(pdev, hw); 231 232 mei_deregister(dev); 233 234 kfree(dev); 235 236 pci_release_regions(pdev); 237 pci_disable_device(pdev); 238 } 239 240 241 #ifdef CONFIG_PM_SLEEP 242 static int mei_txe_pci_suspend(struct device *device) 243 { 244 struct pci_dev *pdev = to_pci_dev(device); 245 struct mei_device *dev = pci_get_drvdata(pdev); 246 247 if (!dev) 248 return -ENODEV; 249 250 dev_dbg(&pdev->dev, "suspend\n"); 251 252 mei_stop(dev); 253 254 mei_disable_interrupts(dev); 255 256 free_irq(pdev->irq, dev); 257 pci_disable_msi(pdev); 258 259 return 0; 260 } 261 262 static int mei_txe_pci_resume(struct device *device) 263 { 264 struct pci_dev *pdev = to_pci_dev(device); 265 struct mei_device *dev; 266 int err; 267 268 dev = pci_get_drvdata(pdev); 269 if (!dev) 270 return -ENODEV; 271 272 pci_enable_msi(pdev); 273 274 mei_clear_interrupts(dev); 275 276 /* request and enable interrupt */ 277 if (pci_dev_msi_enabled(pdev)) 278 err = request_threaded_irq(pdev->irq, 279 NULL, 280 mei_txe_irq_thread_handler, 281 IRQF_ONESHOT, KBUILD_MODNAME, dev); 282 else 283 err = request_threaded_irq(pdev->irq, 284 mei_txe_irq_quick_handler, 285 mei_txe_irq_thread_handler, 286 IRQF_SHARED, KBUILD_MODNAME, dev); 287 if (err) { 288 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 289 pdev->irq); 290 return err; 291 } 292 293 err = mei_restart(dev); 294 295 return err; 296 } 297 #endif /* CONFIG_PM_SLEEP */ 298 299 #ifdef CONFIG_PM 300 static int mei_txe_pm_runtime_idle(struct device *device) 301 { 302 struct pci_dev *pdev = to_pci_dev(device); 303 struct mei_device *dev; 304 305 dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); 306 307 dev = pci_get_drvdata(pdev); 308 if (!dev) 309 return -ENODEV; 310 if (mei_write_is_idle(dev)) 311 pm_runtime_autosuspend(device); 312 313 return -EBUSY; 314 } 315 static int mei_txe_pm_runtime_suspend(struct device *device) 316 { 317 struct pci_dev *pdev = to_pci_dev(device); 318 struct mei_device *dev; 319 int ret; 320 321 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); 322 323 dev = pci_get_drvdata(pdev); 324 if (!dev) 325 return -ENODEV; 326 327 mutex_lock(&dev->device_lock); 328 329 if (mei_write_is_idle(dev)) 330 ret = mei_txe_aliveness_set_sync(dev, 0); 331 else 332 ret = -EAGAIN; 333 334 /* 335 * If everything is okay we're about to enter PCI low 336 * power state (D3) therefor we need to disable the 337 * interrupts towards host. 338 * However if device is not wakeable we do not enter 339 * D-low state and we need to keep the interrupt kicking 340 */ 341 if (!ret && pci_dev_run_wake(pdev)) 342 mei_disable_interrupts(dev); 343 344 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 345 346 mutex_unlock(&dev->device_lock); 347 return ret; 348 } 349 350 static int mei_txe_pm_runtime_resume(struct device *device) 351 { 352 struct pci_dev *pdev = to_pci_dev(device); 353 struct mei_device *dev; 354 int ret; 355 356 dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); 357 358 dev = pci_get_drvdata(pdev); 359 if (!dev) 360 return -ENODEV; 361 362 mutex_lock(&dev->device_lock); 363 364 mei_enable_interrupts(dev); 365 366 ret = mei_txe_aliveness_set_sync(dev, 1); 367 368 mutex_unlock(&dev->device_lock); 369 370 dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); 371 372 return ret; 373 } 374 375 /** 376 * mei_txe_set_pm_domain - fill and set pm domain structure for device 377 * 378 * @dev: mei_device 379 */ 380 static inline void mei_txe_set_pm_domain(struct mei_device *dev) 381 { 382 struct pci_dev *pdev = to_pci_dev(dev->dev); 383 384 if (pdev->dev.bus && pdev->dev.bus->pm) { 385 dev->pg_domain.ops = *pdev->dev.bus->pm; 386 387 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; 388 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; 389 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; 390 391 pdev->dev.pm_domain = &dev->pg_domain; 392 } 393 } 394 395 /** 396 * mei_txe_unset_pm_domain - clean pm domain structure for device 397 * 398 * @dev: mei_device 399 */ 400 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) 401 { 402 /* stop using pm callbacks if any */ 403 dev->dev->pm_domain = NULL; 404 } 405 406 static const struct dev_pm_ops mei_txe_pm_ops = { 407 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 408 mei_txe_pci_resume) 409 SET_RUNTIME_PM_OPS( 410 mei_txe_pm_runtime_suspend, 411 mei_txe_pm_runtime_resume, 412 mei_txe_pm_runtime_idle) 413 }; 414 415 #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) 416 #else 417 #define MEI_TXE_PM_OPS NULL 418 #endif /* CONFIG_PM */ 419 420 /* 421 * PCI driver structure 422 */ 423 static struct pci_driver mei_txe_driver = { 424 .name = KBUILD_MODNAME, 425 .id_table = mei_txe_pci_tbl, 426 .probe = mei_txe_probe, 427 .remove = mei_txe_remove, 428 .shutdown = mei_txe_remove, 429 .driver.pm = MEI_TXE_PM_OPS, 430 }; 431 432 module_pci_driver(mei_txe_driver); 433 434 MODULE_AUTHOR("Intel Corporation"); 435 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); 436 MODULE_LICENSE("GPL v2"); 437