1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/fcntl.h> 24 #include <linux/aio.h> 25 #include <linux/pci.h> 26 #include <linux/poll.h> 27 #include <linux/ioctl.h> 28 #include <linux/cdev.h> 29 #include <linux/sched.h> 30 #include <linux/uuid.h> 31 #include <linux/compat.h> 32 #include <linux/jiffies.h> 33 #include <linux/interrupt.h> 34 35 #include <linux/pm_runtime.h> 36 37 #include <linux/mei.h> 38 39 #include "mei_dev.h" 40 #include "client.h" 41 #include "hw-me-regs.h" 42 #include "hw-me.h" 43 44 /* mei_pci_tbl - PCI Device ID Table */ 45 static const struct pci_device_id mei_me_pci_tbl[] = { 46 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)}, 47 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)}, 48 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)}, 49 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)}, 50 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)}, 51 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)}, 52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)}, 53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)}, 54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)}, 55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)}, 56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)}, 57 58 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)}, 59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)}, 60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)}, 61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)}, 62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)}, 63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)}, 64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)}, 65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)}, 66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)}, 67 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)}, 68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)}, 69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)}, 70 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)}, 71 72 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)}, 73 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)}, 74 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)}, 75 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)}, 76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, 77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, 78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, 79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, 80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, 81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, 82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, 83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, 84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch_cfg)}, 85 86 /* required last entry */ 87 {0, } 88 }; 89 90 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); 91 92 #ifdef CONFIG_PM_RUNTIME 93 static inline void mei_me_set_pm_domain(struct mei_device *dev); 94 static inline void mei_me_unset_pm_domain(struct mei_device *dev); 95 #else 96 static inline void mei_me_set_pm_domain(struct mei_device *dev) {} 97 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} 98 #endif /* CONFIG_PM_RUNTIME */ 99 100 /** 101 * mei_quirk_probe - probe for devices that doesn't valid ME interface 102 * 103 * @pdev: PCI device structure 104 * @cfg: per generation config 105 * 106 * returns true if ME Interface is valid, false otherwise 107 */ 108 static bool mei_me_quirk_probe(struct pci_dev *pdev, 109 const struct mei_cfg *cfg) 110 { 111 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { 112 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); 113 return false; 114 } 115 116 return true; 117 } 118 119 /** 120 * mei_probe - Device Initialization Routine 121 * 122 * @pdev: PCI device structure 123 * @ent: entry in kcs_pci_tbl 124 * 125 * returns 0 on success, <0 on failure. 126 */ 127 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 128 { 129 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); 130 struct mei_device *dev; 131 struct mei_me_hw *hw; 132 int err; 133 134 135 if (!mei_me_quirk_probe(pdev, cfg)) 136 return -ENODEV; 137 138 /* enable pci dev */ 139 err = pci_enable_device(pdev); 140 if (err) { 141 dev_err(&pdev->dev, "failed to enable pci device.\n"); 142 goto end; 143 } 144 /* set PCI host mastering */ 145 pci_set_master(pdev); 146 /* pci request regions for mei driver */ 147 err = pci_request_regions(pdev, KBUILD_MODNAME); 148 if (err) { 149 dev_err(&pdev->dev, "failed to get pci regions.\n"); 150 goto disable_device; 151 } 152 153 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || 154 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 155 156 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 157 if (err) 158 err = dma_set_coherent_mask(&pdev->dev, 159 DMA_BIT_MASK(32)); 160 } 161 if (err) { 162 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 163 goto release_regions; 164 } 165 166 167 /* allocates and initializes the mei dev structure */ 168 dev = mei_me_dev_init(pdev, cfg); 169 if (!dev) { 170 err = -ENOMEM; 171 goto release_regions; 172 } 173 hw = to_me_hw(dev); 174 /* mapping IO device memory */ 175 hw->mem_addr = pci_iomap(pdev, 0, 0); 176 if (!hw->mem_addr) { 177 dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); 178 err = -ENOMEM; 179 goto free_device; 180 } 181 pci_enable_msi(pdev); 182 183 /* request and enable interrupt */ 184 if (pci_dev_msi_enabled(pdev)) 185 err = request_threaded_irq(pdev->irq, 186 NULL, 187 mei_me_irq_thread_handler, 188 IRQF_ONESHOT, KBUILD_MODNAME, dev); 189 else 190 err = request_threaded_irq(pdev->irq, 191 mei_me_irq_quick_handler, 192 mei_me_irq_thread_handler, 193 IRQF_SHARED, KBUILD_MODNAME, dev); 194 195 if (err) { 196 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 197 pdev->irq); 198 goto disable_msi; 199 } 200 201 if (mei_start(dev)) { 202 dev_err(&pdev->dev, "init hw failure.\n"); 203 err = -ENODEV; 204 goto release_irq; 205 } 206 207 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); 208 pm_runtime_use_autosuspend(&pdev->dev); 209 210 err = mei_register(dev, &pdev->dev); 211 if (err) 212 goto release_irq; 213 214 pci_set_drvdata(pdev, dev); 215 216 schedule_delayed_work(&dev->timer_work, HZ); 217 218 /* 219 * For not wake-able HW runtime pm framework 220 * can't be used on pci device level. 221 * Use domain runtime pm callbacks instead. 222 */ 223 if (!pci_dev_run_wake(pdev)) 224 mei_me_set_pm_domain(dev); 225 226 if (mei_pg_is_enabled(dev)) 227 pm_runtime_put_noidle(&pdev->dev); 228 229 dev_dbg(&pdev->dev, "initialization successful.\n"); 230 231 return 0; 232 233 release_irq: 234 mei_cancel_work(dev); 235 mei_disable_interrupts(dev); 236 free_irq(pdev->irq, dev); 237 disable_msi: 238 pci_disable_msi(pdev); 239 pci_iounmap(pdev, hw->mem_addr); 240 free_device: 241 kfree(dev); 242 release_regions: 243 pci_release_regions(pdev); 244 disable_device: 245 pci_disable_device(pdev); 246 end: 247 dev_err(&pdev->dev, "initialization failed.\n"); 248 return err; 249 } 250 251 /** 252 * mei_remove - Device Removal Routine 253 * 254 * @pdev: PCI device structure 255 * 256 * mei_remove is called by the PCI subsystem to alert the driver 257 * that it should release a PCI device. 258 */ 259 static void mei_me_remove(struct pci_dev *pdev) 260 { 261 struct mei_device *dev; 262 struct mei_me_hw *hw; 263 264 dev = pci_get_drvdata(pdev); 265 if (!dev) 266 return; 267 268 if (mei_pg_is_enabled(dev)) 269 pm_runtime_get_noresume(&pdev->dev); 270 271 hw = to_me_hw(dev); 272 273 274 dev_dbg(&pdev->dev, "stop\n"); 275 mei_stop(dev); 276 277 if (!pci_dev_run_wake(pdev)) 278 mei_me_unset_pm_domain(dev); 279 280 /* disable interrupts */ 281 mei_disable_interrupts(dev); 282 283 free_irq(pdev->irq, dev); 284 pci_disable_msi(pdev); 285 286 if (hw->mem_addr) 287 pci_iounmap(pdev, hw->mem_addr); 288 289 mei_deregister(dev); 290 291 kfree(dev); 292 293 pci_release_regions(pdev); 294 pci_disable_device(pdev); 295 296 297 } 298 #ifdef CONFIG_PM_SLEEP 299 static int mei_me_pci_suspend(struct device *device) 300 { 301 struct pci_dev *pdev = to_pci_dev(device); 302 struct mei_device *dev = pci_get_drvdata(pdev); 303 304 if (!dev) 305 return -ENODEV; 306 307 dev_dbg(&pdev->dev, "suspend\n"); 308 309 mei_stop(dev); 310 311 mei_disable_interrupts(dev); 312 313 free_irq(pdev->irq, dev); 314 pci_disable_msi(pdev); 315 316 return 0; 317 } 318 319 static int mei_me_pci_resume(struct device *device) 320 { 321 struct pci_dev *pdev = to_pci_dev(device); 322 struct mei_device *dev; 323 int err; 324 325 dev = pci_get_drvdata(pdev); 326 if (!dev) 327 return -ENODEV; 328 329 pci_enable_msi(pdev); 330 331 /* request and enable interrupt */ 332 if (pci_dev_msi_enabled(pdev)) 333 err = request_threaded_irq(pdev->irq, 334 NULL, 335 mei_me_irq_thread_handler, 336 IRQF_ONESHOT, KBUILD_MODNAME, dev); 337 else 338 err = request_threaded_irq(pdev->irq, 339 mei_me_irq_quick_handler, 340 mei_me_irq_thread_handler, 341 IRQF_SHARED, KBUILD_MODNAME, dev); 342 343 if (err) { 344 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 345 pdev->irq); 346 return err; 347 } 348 349 err = mei_restart(dev); 350 if (err) 351 return err; 352 353 /* Start timer if stopped in suspend */ 354 schedule_delayed_work(&dev->timer_work, HZ); 355 356 return 0; 357 } 358 #endif /* CONFIG_PM_SLEEP */ 359 360 #ifdef CONFIG_PM_RUNTIME 361 static int mei_me_pm_runtime_idle(struct device *device) 362 { 363 struct pci_dev *pdev = to_pci_dev(device); 364 struct mei_device *dev; 365 366 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); 367 368 dev = pci_get_drvdata(pdev); 369 if (!dev) 370 return -ENODEV; 371 if (mei_write_is_idle(dev)) 372 pm_runtime_autosuspend(device); 373 374 return -EBUSY; 375 } 376 377 static int mei_me_pm_runtime_suspend(struct device *device) 378 { 379 struct pci_dev *pdev = to_pci_dev(device); 380 struct mei_device *dev; 381 int ret; 382 383 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); 384 385 dev = pci_get_drvdata(pdev); 386 if (!dev) 387 return -ENODEV; 388 389 mutex_lock(&dev->device_lock); 390 391 if (mei_write_is_idle(dev)) 392 ret = mei_me_pg_set_sync(dev); 393 else 394 ret = -EAGAIN; 395 396 mutex_unlock(&dev->device_lock); 397 398 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); 399 400 return ret; 401 } 402 403 static int mei_me_pm_runtime_resume(struct device *device) 404 { 405 struct pci_dev *pdev = to_pci_dev(device); 406 struct mei_device *dev; 407 int ret; 408 409 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); 410 411 dev = pci_get_drvdata(pdev); 412 if (!dev) 413 return -ENODEV; 414 415 mutex_lock(&dev->device_lock); 416 417 ret = mei_me_pg_unset_sync(dev); 418 419 mutex_unlock(&dev->device_lock); 420 421 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); 422 423 return ret; 424 } 425 426 /** 427 * mei_me_set_pm_domain - fill and set pm domian stucture for device 428 * 429 * @dev: mei_device 430 */ 431 static inline void mei_me_set_pm_domain(struct mei_device *dev) 432 { 433 struct pci_dev *pdev = dev->pdev; 434 435 if (pdev->dev.bus && pdev->dev.bus->pm) { 436 dev->pg_domain.ops = *pdev->dev.bus->pm; 437 438 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; 439 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; 440 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; 441 442 pdev->dev.pm_domain = &dev->pg_domain; 443 } 444 } 445 446 /** 447 * mei_me_unset_pm_domain - clean pm domian stucture for device 448 * 449 * @dev: mei_device 450 */ 451 static inline void mei_me_unset_pm_domain(struct mei_device *dev) 452 { 453 /* stop using pm callbacks if any */ 454 dev->pdev->dev.pm_domain = NULL; 455 } 456 #endif /* CONFIG_PM_RUNTIME */ 457 458 #ifdef CONFIG_PM 459 static const struct dev_pm_ops mei_me_pm_ops = { 460 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, 461 mei_me_pci_resume) 462 SET_RUNTIME_PM_OPS( 463 mei_me_pm_runtime_suspend, 464 mei_me_pm_runtime_resume, 465 mei_me_pm_runtime_idle) 466 }; 467 468 #define MEI_ME_PM_OPS (&mei_me_pm_ops) 469 #else 470 #define MEI_ME_PM_OPS NULL 471 #endif /* CONFIG_PM */ 472 /* 473 * PCI driver structure 474 */ 475 static struct pci_driver mei_me_driver = { 476 .name = KBUILD_MODNAME, 477 .id_table = mei_me_pci_tbl, 478 .probe = mei_me_probe, 479 .remove = mei_me_remove, 480 .shutdown = mei_me_remove, 481 .driver.pm = MEI_ME_PM_OPS, 482 }; 483 484 module_pci_driver(mei_me_driver); 485 486 MODULE_AUTHOR("Intel Corporation"); 487 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 488 MODULE_LICENSE("GPL v2"); 489