1795536acSTomas Winkler /* 2795536acSTomas Winkler * 3795536acSTomas Winkler * Intel Management Engine Interface (Intel MEI) Linux driver 4795536acSTomas Winkler * Copyright (c) 2013-2014, Intel Corporation. 5795536acSTomas Winkler * 6795536acSTomas Winkler * This program is free software; you can redistribute it and/or modify it 7795536acSTomas Winkler * under the terms and conditions of the GNU General Public License, 8795536acSTomas Winkler * version 2, as published by the Free Software Foundation. 9795536acSTomas Winkler * 10795536acSTomas Winkler * This program is distributed in the hope it will be useful, but WITHOUT 11795536acSTomas Winkler * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12795536acSTomas Winkler * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13795536acSTomas Winkler * more details. 14795536acSTomas Winkler * 15795536acSTomas Winkler */ 16795536acSTomas Winkler 17795536acSTomas Winkler #include <linux/module.h> 18795536acSTomas Winkler #include <linux/kernel.h> 19795536acSTomas Winkler #include <linux/device.h> 20795536acSTomas Winkler #include <linux/fs.h> 21795536acSTomas Winkler #include <linux/errno.h> 22795536acSTomas Winkler #include <linux/types.h> 23795536acSTomas Winkler #include <linux/pci.h> 24795536acSTomas Winkler #include <linux/init.h> 25795536acSTomas Winkler #include <linux/sched.h> 26795536acSTomas Winkler #include <linux/uuid.h> 27795536acSTomas Winkler #include <linux/jiffies.h> 28795536acSTomas Winkler #include <linux/interrupt.h> 29795536acSTomas Winkler #include <linux/workqueue.h> 30*cfe5ab85SAlexander Usyskin #include <linux/pm_runtime.h> 31795536acSTomas Winkler 32795536acSTomas Winkler #include <linux/mei.h> 33795536acSTomas Winkler 34795536acSTomas Winkler 35795536acSTomas Winkler #include "mei_dev.h" 36795536acSTomas Winkler #include "hw-txe.h" 37795536acSTomas Winkler 38a05f8f86STomas Winkler static const struct pci_device_id mei_txe_pci_tbl[] = { 39795536acSTomas Winkler {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */ 40795536acSTomas Winkler {0, } 41795536acSTomas Winkler }; 42795536acSTomas Winkler MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 43795536acSTomas Winkler 44795536acSTomas Winkler 45795536acSTomas Winkler static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) 46795536acSTomas Winkler { 47795536acSTomas Winkler int i; 48795536acSTomas Winkler for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { 49795536acSTomas Winkler if (hw->mem_addr[i]) { 50795536acSTomas Winkler pci_iounmap(pdev, hw->mem_addr[i]); 51795536acSTomas Winkler hw->mem_addr[i] = NULL; 52795536acSTomas Winkler } 53795536acSTomas Winkler } 54795536acSTomas Winkler } 55795536acSTomas Winkler /** 56795536acSTomas Winkler * mei_probe - Device Initialization Routine 57795536acSTomas Winkler * 58795536acSTomas Winkler * @pdev: PCI device structure 59795536acSTomas Winkler * @ent: entry in mei_txe_pci_tbl 60795536acSTomas Winkler * 61795536acSTomas Winkler * returns 0 on success, <0 on failure. 62795536acSTomas Winkler */ 63795536acSTomas Winkler static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 64795536acSTomas Winkler { 65795536acSTomas Winkler struct mei_device *dev; 66795536acSTomas Winkler struct mei_txe_hw *hw; 67795536acSTomas Winkler int err; 68795536acSTomas Winkler int i; 69795536acSTomas Winkler 70795536acSTomas Winkler /* enable pci dev */ 71795536acSTomas Winkler err = pci_enable_device(pdev); 72795536acSTomas Winkler if (err) { 73795536acSTomas Winkler dev_err(&pdev->dev, "failed to enable pci device.\n"); 74795536acSTomas Winkler goto end; 75795536acSTomas Winkler } 76795536acSTomas Winkler /* set PCI host mastering */ 77795536acSTomas Winkler pci_set_master(pdev); 78795536acSTomas Winkler /* pci request regions for mei driver */ 79795536acSTomas Winkler err = pci_request_regions(pdev, KBUILD_MODNAME); 80795536acSTomas Winkler if (err) { 81795536acSTomas Winkler dev_err(&pdev->dev, "failed to get pci regions.\n"); 82795536acSTomas Winkler goto disable_device; 83795536acSTomas Winkler } 84795536acSTomas Winkler 85795536acSTomas Winkler err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 86795536acSTomas Winkler if (err) { 87795536acSTomas Winkler err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 88795536acSTomas Winkler if (err) { 89795536acSTomas Winkler dev_err(&pdev->dev, "No suitable DMA available.\n"); 90795536acSTomas Winkler goto release_regions; 91795536acSTomas Winkler } 92795536acSTomas Winkler } 93795536acSTomas Winkler 94795536acSTomas Winkler /* allocates and initializes the mei dev structure */ 95795536acSTomas Winkler dev = mei_txe_dev_init(pdev); 96795536acSTomas Winkler if (!dev) { 97795536acSTomas Winkler err = -ENOMEM; 98795536acSTomas Winkler goto release_regions; 99795536acSTomas Winkler } 100795536acSTomas Winkler hw = to_txe_hw(dev); 101795536acSTomas Winkler 102795536acSTomas Winkler /* mapping IO device memory */ 103795536acSTomas Winkler for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { 104795536acSTomas Winkler hw->mem_addr[i] = pci_iomap(pdev, i, 0); 105795536acSTomas Winkler if (!hw->mem_addr[i]) { 106795536acSTomas Winkler dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); 107795536acSTomas Winkler err = -ENOMEM; 108795536acSTomas Winkler goto free_device; 109795536acSTomas Winkler } 110795536acSTomas Winkler } 111795536acSTomas Winkler 112795536acSTomas Winkler 113795536acSTomas Winkler pci_enable_msi(pdev); 114795536acSTomas Winkler 115795536acSTomas Winkler /* clear spurious interrupts */ 116795536acSTomas Winkler mei_clear_interrupts(dev); 117795536acSTomas Winkler 118795536acSTomas Winkler /* request and enable interrupt */ 119795536acSTomas Winkler if (pci_dev_msi_enabled(pdev)) 120795536acSTomas Winkler err = request_threaded_irq(pdev->irq, 121795536acSTomas Winkler NULL, 122795536acSTomas Winkler mei_txe_irq_thread_handler, 123795536acSTomas Winkler IRQF_ONESHOT, KBUILD_MODNAME, dev); 124795536acSTomas Winkler else 125795536acSTomas Winkler err = request_threaded_irq(pdev->irq, 126795536acSTomas Winkler mei_txe_irq_quick_handler, 127795536acSTomas Winkler mei_txe_irq_thread_handler, 128795536acSTomas Winkler IRQF_SHARED, KBUILD_MODNAME, dev); 129795536acSTomas Winkler if (err) { 130795536acSTomas Winkler dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 131795536acSTomas Winkler pdev->irq); 132795536acSTomas Winkler goto free_device; 133795536acSTomas Winkler } 134795536acSTomas Winkler 135795536acSTomas Winkler if (mei_start(dev)) { 136795536acSTomas Winkler dev_err(&pdev->dev, "init hw failure.\n"); 137795536acSTomas Winkler err = -ENODEV; 138795536acSTomas Winkler goto release_irq; 139795536acSTomas Winkler } 140795536acSTomas Winkler 141*cfe5ab85SAlexander Usyskin pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 142*cfe5ab85SAlexander Usyskin pm_runtime_use_autosuspend(&pdev->dev); 143*cfe5ab85SAlexander Usyskin 144795536acSTomas Winkler err = mei_register(dev); 145795536acSTomas Winkler if (err) 146795536acSTomas Winkler goto release_irq; 147795536acSTomas Winkler 148795536acSTomas Winkler pci_set_drvdata(pdev, dev); 149795536acSTomas Winkler 150*cfe5ab85SAlexander Usyskin pm_runtime_put_noidle(&pdev->dev); 151*cfe5ab85SAlexander Usyskin 152795536acSTomas Winkler return 0; 153795536acSTomas Winkler 154795536acSTomas Winkler release_irq: 155795536acSTomas Winkler 156795536acSTomas Winkler mei_cancel_work(dev); 157795536acSTomas Winkler 158795536acSTomas Winkler /* disable interrupts */ 159795536acSTomas Winkler mei_disable_interrupts(dev); 160795536acSTomas Winkler 161795536acSTomas Winkler free_irq(pdev->irq, dev); 162795536acSTomas Winkler pci_disable_msi(pdev); 163795536acSTomas Winkler 164795536acSTomas Winkler free_device: 165795536acSTomas Winkler mei_txe_pci_iounmap(pdev, hw); 166795536acSTomas Winkler 167795536acSTomas Winkler kfree(dev); 168795536acSTomas Winkler release_regions: 169795536acSTomas Winkler pci_release_regions(pdev); 170795536acSTomas Winkler disable_device: 171795536acSTomas Winkler pci_disable_device(pdev); 172795536acSTomas Winkler end: 173795536acSTomas Winkler dev_err(&pdev->dev, "initialization failed.\n"); 174795536acSTomas Winkler return err; 175795536acSTomas Winkler } 176795536acSTomas Winkler 177795536acSTomas Winkler /** 178795536acSTomas Winkler * mei_remove - Device Removal Routine 179795536acSTomas Winkler * 180795536acSTomas Winkler * @pdev: PCI device structure 181795536acSTomas Winkler * 182795536acSTomas Winkler * mei_remove is called by the PCI subsystem to alert the driver 183795536acSTomas Winkler * that it should release a PCI device. 184795536acSTomas Winkler */ 185795536acSTomas Winkler static void mei_txe_remove(struct pci_dev *pdev) 186795536acSTomas Winkler { 187795536acSTomas Winkler struct mei_device *dev; 188795536acSTomas Winkler struct mei_txe_hw *hw; 189795536acSTomas Winkler 190795536acSTomas Winkler dev = pci_get_drvdata(pdev); 191795536acSTomas Winkler if (!dev) { 192795536acSTomas Winkler dev_err(&pdev->dev, "mei: dev =NULL\n"); 193795536acSTomas Winkler return; 194795536acSTomas Winkler } 195795536acSTomas Winkler 196*cfe5ab85SAlexander Usyskin pm_runtime_get_noresume(&pdev->dev); 197*cfe5ab85SAlexander Usyskin 198795536acSTomas Winkler hw = to_txe_hw(dev); 199795536acSTomas Winkler 200795536acSTomas Winkler mei_stop(dev); 201795536acSTomas Winkler 202795536acSTomas Winkler /* disable interrupts */ 203795536acSTomas Winkler mei_disable_interrupts(dev); 204795536acSTomas Winkler free_irq(pdev->irq, dev); 205795536acSTomas Winkler pci_disable_msi(pdev); 206795536acSTomas Winkler 207795536acSTomas Winkler pci_set_drvdata(pdev, NULL); 208795536acSTomas Winkler 209795536acSTomas Winkler mei_txe_pci_iounmap(pdev, hw); 210795536acSTomas Winkler 211795536acSTomas Winkler mei_deregister(dev); 212795536acSTomas Winkler 213795536acSTomas Winkler kfree(dev); 214795536acSTomas Winkler 215795536acSTomas Winkler pci_release_regions(pdev); 216795536acSTomas Winkler pci_disable_device(pdev); 217795536acSTomas Winkler } 218795536acSTomas Winkler 219795536acSTomas Winkler 220e0270addSTomas Winkler #ifdef CONFIG_PM_SLEEP 221795536acSTomas Winkler static int mei_txe_pci_suspend(struct device *device) 222795536acSTomas Winkler { 223795536acSTomas Winkler struct pci_dev *pdev = to_pci_dev(device); 224795536acSTomas Winkler struct mei_device *dev = pci_get_drvdata(pdev); 225795536acSTomas Winkler 226795536acSTomas Winkler if (!dev) 227795536acSTomas Winkler return -ENODEV; 228795536acSTomas Winkler 229795536acSTomas Winkler dev_dbg(&pdev->dev, "suspend\n"); 230795536acSTomas Winkler 231795536acSTomas Winkler mei_stop(dev); 232795536acSTomas Winkler 233795536acSTomas Winkler mei_disable_interrupts(dev); 234795536acSTomas Winkler 235795536acSTomas Winkler free_irq(pdev->irq, dev); 236795536acSTomas Winkler pci_disable_msi(pdev); 237795536acSTomas Winkler 238795536acSTomas Winkler return 0; 239795536acSTomas Winkler } 240795536acSTomas Winkler 241795536acSTomas Winkler static int mei_txe_pci_resume(struct device *device) 242795536acSTomas Winkler { 243795536acSTomas Winkler struct pci_dev *pdev = to_pci_dev(device); 244795536acSTomas Winkler struct mei_device *dev; 245795536acSTomas Winkler int err; 246795536acSTomas Winkler 247795536acSTomas Winkler dev = pci_get_drvdata(pdev); 248795536acSTomas Winkler if (!dev) 249795536acSTomas Winkler return -ENODEV; 250795536acSTomas Winkler 251795536acSTomas Winkler pci_enable_msi(pdev); 252795536acSTomas Winkler 253795536acSTomas Winkler mei_clear_interrupts(dev); 254795536acSTomas Winkler 255795536acSTomas Winkler /* request and enable interrupt */ 256795536acSTomas Winkler if (pci_dev_msi_enabled(pdev)) 257795536acSTomas Winkler err = request_threaded_irq(pdev->irq, 258795536acSTomas Winkler NULL, 259795536acSTomas Winkler mei_txe_irq_thread_handler, 260795536acSTomas Winkler IRQF_ONESHOT, KBUILD_MODNAME, dev); 261795536acSTomas Winkler else 262795536acSTomas Winkler err = request_threaded_irq(pdev->irq, 263795536acSTomas Winkler mei_txe_irq_quick_handler, 264795536acSTomas Winkler mei_txe_irq_thread_handler, 265795536acSTomas Winkler IRQF_SHARED, KBUILD_MODNAME, dev); 266795536acSTomas Winkler if (err) { 267795536acSTomas Winkler dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 268795536acSTomas Winkler pdev->irq); 269795536acSTomas Winkler return err; 270795536acSTomas Winkler } 271795536acSTomas Winkler 272795536acSTomas Winkler err = mei_restart(dev); 273795536acSTomas Winkler 274795536acSTomas Winkler return err; 275795536acSTomas Winkler } 276*cfe5ab85SAlexander Usyskin #endif /* CONFIG_PM_SLEEP */ 277795536acSTomas Winkler 278*cfe5ab85SAlexander Usyskin #ifdef CONFIG_PM_RUNTIME 279*cfe5ab85SAlexander Usyskin static int mei_txe_pm_runtime_idle(struct device *device) 280*cfe5ab85SAlexander Usyskin { 281*cfe5ab85SAlexander Usyskin struct pci_dev *pdev = to_pci_dev(device); 282*cfe5ab85SAlexander Usyskin struct mei_device *dev; 283*cfe5ab85SAlexander Usyskin 284*cfe5ab85SAlexander Usyskin dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); 285*cfe5ab85SAlexander Usyskin 286*cfe5ab85SAlexander Usyskin dev = pci_get_drvdata(pdev); 287*cfe5ab85SAlexander Usyskin if (!dev) 288*cfe5ab85SAlexander Usyskin return -ENODEV; 289*cfe5ab85SAlexander Usyskin if (mei_write_is_idle(dev)) 290*cfe5ab85SAlexander Usyskin pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); 291*cfe5ab85SAlexander Usyskin 292*cfe5ab85SAlexander Usyskin return -EBUSY; 293*cfe5ab85SAlexander Usyskin } 294*cfe5ab85SAlexander Usyskin static int mei_txe_pm_runtime_suspend(struct device *device) 295*cfe5ab85SAlexander Usyskin { 296*cfe5ab85SAlexander Usyskin struct pci_dev *pdev = to_pci_dev(device); 297*cfe5ab85SAlexander Usyskin struct mei_device *dev; 298*cfe5ab85SAlexander Usyskin int ret; 299*cfe5ab85SAlexander Usyskin 300*cfe5ab85SAlexander Usyskin dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); 301*cfe5ab85SAlexander Usyskin 302*cfe5ab85SAlexander Usyskin dev = pci_get_drvdata(pdev); 303*cfe5ab85SAlexander Usyskin if (!dev) 304*cfe5ab85SAlexander Usyskin return -ENODEV; 305*cfe5ab85SAlexander Usyskin 306*cfe5ab85SAlexander Usyskin mutex_lock(&dev->device_lock); 307*cfe5ab85SAlexander Usyskin 308*cfe5ab85SAlexander Usyskin if (mei_write_is_idle(dev)) 309*cfe5ab85SAlexander Usyskin ret = mei_txe_aliveness_set_sync(dev, 0); 310*cfe5ab85SAlexander Usyskin else 311*cfe5ab85SAlexander Usyskin ret = -EAGAIN; 312*cfe5ab85SAlexander Usyskin 313*cfe5ab85SAlexander Usyskin /* 314*cfe5ab85SAlexander Usyskin * If everything is okay we're about to enter PCI low 315*cfe5ab85SAlexander Usyskin * power state (D3) therefor we need to disable the 316*cfe5ab85SAlexander Usyskin * interrupts towards host. 317*cfe5ab85SAlexander Usyskin * However if device is not wakeable we do not enter 318*cfe5ab85SAlexander Usyskin * D-low state and we need to keep the interrupt kicking 319*cfe5ab85SAlexander Usyskin */ 320*cfe5ab85SAlexander Usyskin if (!ret && pci_dev_run_wake(pdev)) 321*cfe5ab85SAlexander Usyskin mei_disable_interrupts(dev); 322*cfe5ab85SAlexander Usyskin 323*cfe5ab85SAlexander Usyskin dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 324*cfe5ab85SAlexander Usyskin 325*cfe5ab85SAlexander Usyskin mutex_unlock(&dev->device_lock); 326*cfe5ab85SAlexander Usyskin return ret; 327*cfe5ab85SAlexander Usyskin } 328*cfe5ab85SAlexander Usyskin 329*cfe5ab85SAlexander Usyskin static int mei_txe_pm_runtime_resume(struct device *device) 330*cfe5ab85SAlexander Usyskin { 331*cfe5ab85SAlexander Usyskin struct pci_dev *pdev = to_pci_dev(device); 332*cfe5ab85SAlexander Usyskin struct mei_device *dev; 333*cfe5ab85SAlexander Usyskin int ret; 334*cfe5ab85SAlexander Usyskin 335*cfe5ab85SAlexander Usyskin dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); 336*cfe5ab85SAlexander Usyskin 337*cfe5ab85SAlexander Usyskin dev = pci_get_drvdata(pdev); 338*cfe5ab85SAlexander Usyskin if (!dev) 339*cfe5ab85SAlexander Usyskin return -ENODEV; 340*cfe5ab85SAlexander Usyskin 341*cfe5ab85SAlexander Usyskin mutex_lock(&dev->device_lock); 342*cfe5ab85SAlexander Usyskin 343*cfe5ab85SAlexander Usyskin mei_enable_interrupts(dev); 344*cfe5ab85SAlexander Usyskin 345*cfe5ab85SAlexander Usyskin ret = mei_txe_aliveness_set_sync(dev, 1); 346*cfe5ab85SAlexander Usyskin 347*cfe5ab85SAlexander Usyskin mutex_unlock(&dev->device_lock); 348*cfe5ab85SAlexander Usyskin 349*cfe5ab85SAlexander Usyskin dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); 350*cfe5ab85SAlexander Usyskin 351*cfe5ab85SAlexander Usyskin return ret; 352*cfe5ab85SAlexander Usyskin } 353*cfe5ab85SAlexander Usyskin #endif /* CONFIG_PM_RUNTIME */ 354*cfe5ab85SAlexander Usyskin 355*cfe5ab85SAlexander Usyskin #ifdef CONFIG_PM 356*cfe5ab85SAlexander Usyskin static const struct dev_pm_ops mei_txe_pm_ops = { 357*cfe5ab85SAlexander Usyskin SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 358*cfe5ab85SAlexander Usyskin mei_txe_pci_resume) 359*cfe5ab85SAlexander Usyskin SET_RUNTIME_PM_OPS( 360*cfe5ab85SAlexander Usyskin mei_txe_pm_runtime_suspend, 361*cfe5ab85SAlexander Usyskin mei_txe_pm_runtime_resume, 362*cfe5ab85SAlexander Usyskin mei_txe_pm_runtime_idle) 363*cfe5ab85SAlexander Usyskin }; 364795536acSTomas Winkler 365795536acSTomas Winkler #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) 366795536acSTomas Winkler #else 367795536acSTomas Winkler #define MEI_TXE_PM_OPS NULL 368*cfe5ab85SAlexander Usyskin #endif /* CONFIG_PM */ 369*cfe5ab85SAlexander Usyskin 370795536acSTomas Winkler /* 371795536acSTomas Winkler * PCI driver structure 372795536acSTomas Winkler */ 373795536acSTomas Winkler static struct pci_driver mei_txe_driver = { 374795536acSTomas Winkler .name = KBUILD_MODNAME, 375795536acSTomas Winkler .id_table = mei_txe_pci_tbl, 376795536acSTomas Winkler .probe = mei_txe_probe, 377795536acSTomas Winkler .remove = mei_txe_remove, 378795536acSTomas Winkler .shutdown = mei_txe_remove, 379795536acSTomas Winkler .driver.pm = MEI_TXE_PM_OPS, 380795536acSTomas Winkler }; 381795536acSTomas Winkler 382795536acSTomas Winkler module_pci_driver(mei_txe_driver); 383795536acSTomas Winkler 384795536acSTomas Winkler MODULE_AUTHOR("Intel Corporation"); 385795536acSTomas Winkler MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); 386795536acSTomas Winkler MODULE_LICENSE("GPL v2"); 387