xref: /linux/drivers/misc/mei/pci-txe.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
19fff0425STomas Winkler // SPDX-License-Identifier: GPL-2.0
2795536acSTomas Winkler /*
3a27ad0f7STomas Winkler  * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
4795536acSTomas Winkler  * Intel Management Engine Interface (Intel MEI) Linux driver
5795536acSTomas Winkler  */
6795536acSTomas Winkler 
7795536acSTomas Winkler #include <linux/module.h>
8795536acSTomas Winkler #include <linux/kernel.h>
9795536acSTomas Winkler #include <linux/device.h>
10795536acSTomas Winkler #include <linux/errno.h>
11795536acSTomas Winkler #include <linux/types.h>
12795536acSTomas Winkler #include <linux/pci.h>
13795536acSTomas Winkler #include <linux/init.h>
14795536acSTomas Winkler #include <linux/sched.h>
15795536acSTomas Winkler #include <linux/interrupt.h>
16795536acSTomas Winkler #include <linux/workqueue.h>
17989561deSTomeu Vizoso #include <linux/pm_domain.h>
18cfe5ab85SAlexander Usyskin #include <linux/pm_runtime.h>
19795536acSTomas Winkler 
20795536acSTomas Winkler #include <linux/mei.h>
21795536acSTomas Winkler 
22795536acSTomas Winkler 
23795536acSTomas Winkler #include "mei_dev.h"
24795536acSTomas Winkler #include "hw-txe.h"
25795536acSTomas Winkler 
26a05f8f86STomas Winkler static const struct pci_device_id mei_txe_pci_tbl[] = {
274ad96db6STomas Winkler 	{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
28e88281edSTomas Winkler 	{PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
294ad96db6STomas Winkler 
30795536acSTomas Winkler 	{0, }
31795536acSTomas Winkler };
32795536acSTomas Winkler MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
33795536acSTomas Winkler 
34bbd6d050SRafael J. Wysocki #ifdef CONFIG_PM
35d2d56faeSAlexander Usyskin static inline void mei_txe_set_pm_domain(struct mei_device *dev);
36d2d56faeSAlexander Usyskin static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
37d2d56faeSAlexander Usyskin #else
mei_txe_set_pm_domain(struct mei_device * dev)38d2d56faeSAlexander Usyskin static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
mei_txe_unset_pm_domain(struct mei_device * dev)39d2d56faeSAlexander Usyskin static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
40bbd6d050SRafael J. Wysocki #endif /* CONFIG_PM */
41795536acSTomas Winkler 
42795536acSTomas Winkler /**
433908be6fSAlexander Usyskin  * mei_txe_probe - Device Initialization Routine
44795536acSTomas Winkler  *
45795536acSTomas Winkler  * @pdev: PCI device structure
46795536acSTomas Winkler  * @ent: entry in mei_txe_pci_tbl
47795536acSTomas Winkler  *
48a8605ea2SAlexander Usyskin  * Return: 0 on success, <0 on failure.
49795536acSTomas Winkler  */
mei_txe_probe(struct pci_dev * pdev,const struct pci_device_id * ent)50795536acSTomas Winkler static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
51795536acSTomas Winkler {
52795536acSTomas Winkler 	struct mei_device *dev;
53795536acSTomas Winkler 	struct mei_txe_hw *hw;
54f8a09605STomas Winkler 	const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
55795536acSTomas Winkler 	int err;
56795536acSTomas Winkler 
57795536acSTomas Winkler 	/* enable pci dev */
58f8a09605STomas Winkler 	err = pcim_enable_device(pdev);
59795536acSTomas Winkler 	if (err) {
60795536acSTomas Winkler 		dev_err(&pdev->dev, "failed to enable pci device.\n");
61795536acSTomas Winkler 		goto end;
62795536acSTomas Winkler 	}
63795536acSTomas Winkler 	/* set PCI host mastering  */
64795536acSTomas Winkler 	pci_set_master(pdev);
65f8a09605STomas Winkler 	/* pci request regions and mapping IO device memory for mei driver */
66f8a09605STomas Winkler 	err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
67795536acSTomas Winkler 	if (err) {
68795536acSTomas Winkler 		dev_err(&pdev->dev, "failed to get pci regions.\n");
69f8a09605STomas Winkler 		goto end;
70795536acSTomas Winkler 	}
71795536acSTomas Winkler 
7254fa156bSChristophe JAILLET 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
73795536acSTomas Winkler 	if (err) {
7454fa156bSChristophe JAILLET 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
75795536acSTomas Winkler 		if (err) {
76795536acSTomas Winkler 			dev_err(&pdev->dev, "No suitable DMA available.\n");
77f8a09605STomas Winkler 			goto end;
78795536acSTomas Winkler 		}
79795536acSTomas Winkler 	}
80795536acSTomas Winkler 
81795536acSTomas Winkler 	/* allocates and initializes the mei dev structure */
824ad96db6STomas Winkler 	dev = mei_txe_dev_init(pdev);
83795536acSTomas Winkler 	if (!dev) {
84795536acSTomas Winkler 		err = -ENOMEM;
85f8a09605STomas Winkler 		goto end;
86795536acSTomas Winkler 	}
87795536acSTomas Winkler 	hw = to_txe_hw(dev);
88f8a09605STomas Winkler 	hw->mem_addr = pcim_iomap_table(pdev);
89795536acSTomas Winkler 
90795536acSTomas Winkler 	pci_enable_msi(pdev);
91795536acSTomas Winkler 
92795536acSTomas Winkler 	/* clear spurious interrupts */
93795536acSTomas Winkler 	mei_clear_interrupts(dev);
94795536acSTomas Winkler 
95795536acSTomas Winkler 	/* request and enable interrupt  */
96795536acSTomas Winkler 	if (pci_dev_msi_enabled(pdev))
97795536acSTomas Winkler 		err = request_threaded_irq(pdev->irq,
98795536acSTomas Winkler 			NULL,
99795536acSTomas Winkler 			mei_txe_irq_thread_handler,
100795536acSTomas Winkler 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
101795536acSTomas Winkler 	else
102795536acSTomas Winkler 		err = request_threaded_irq(pdev->irq,
103795536acSTomas Winkler 			mei_txe_irq_quick_handler,
104795536acSTomas Winkler 			mei_txe_irq_thread_handler,
105795536acSTomas Winkler 			IRQF_SHARED, KBUILD_MODNAME, dev);
106795536acSTomas Winkler 	if (err) {
107795536acSTomas Winkler 		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
108795536acSTomas Winkler 			pdev->irq);
109f8a09605STomas Winkler 		goto end;
110795536acSTomas Winkler 	}
111795536acSTomas Winkler 
112795536acSTomas Winkler 	if (mei_start(dev)) {
113795536acSTomas Winkler 		dev_err(&pdev->dev, "init hw failure.\n");
114795536acSTomas Winkler 		err = -ENODEV;
115795536acSTomas Winkler 		goto release_irq;
116795536acSTomas Winkler 	}
117795536acSTomas Winkler 
118cfe5ab85SAlexander Usyskin 	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
119cfe5ab85SAlexander Usyskin 	pm_runtime_use_autosuspend(&pdev->dev);
120cfe5ab85SAlexander Usyskin 
121f3d8e878SAlexander Usyskin 	err = mei_register(dev, &pdev->dev);
122795536acSTomas Winkler 	if (err)
1231f7e489aSAlexander Usyskin 		goto stop;
124795536acSTomas Winkler 
125795536acSTomas Winkler 	pci_set_drvdata(pdev, dev);
126795536acSTomas Winkler 
127d2d56faeSAlexander Usyskin 	/*
128557909e1SAlexander Usyskin 	 * MEI requires to resume from runtime suspend mode
129557909e1SAlexander Usyskin 	 * in order to perform link reset flow upon system suspend.
130557909e1SAlexander Usyskin 	 */
131e0751556SRafael J. Wysocki 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
132557909e1SAlexander Usyskin 
133557909e1SAlexander Usyskin 	/*
134b42dc063SAlexander Usyskin 	 * TXE maps runtime suspend/resume to own power gating states,
135b42dc063SAlexander Usyskin 	 * hence we need to go around native PCI runtime service which
136b42dc063SAlexander Usyskin 	 * eventually brings the device into D3cold/hot state.
137b42dc063SAlexander Usyskin 	 * But the TXE device cannot wake up from D3 unlike from own
138b42dc063SAlexander Usyskin 	 * power gating. To get around PCI device native runtime pm,
139b42dc063SAlexander Usyskin 	 * TXE uses runtime pm domain handlers which take precedence.
140d2d56faeSAlexander Usyskin 	 */
141d2d56faeSAlexander Usyskin 	mei_txe_set_pm_domain(dev);
142d2d56faeSAlexander Usyskin 
143cfe5ab85SAlexander Usyskin 	pm_runtime_put_noidle(&pdev->dev);
144cfe5ab85SAlexander Usyskin 
145795536acSTomas Winkler 	return 0;
146795536acSTomas Winkler 
1471f7e489aSAlexander Usyskin stop:
1481f7e489aSAlexander Usyskin 	mei_stop(dev);
149795536acSTomas Winkler release_irq:
150795536acSTomas Winkler 	mei_cancel_work(dev);
151795536acSTomas Winkler 	mei_disable_interrupts(dev);
152795536acSTomas Winkler 	free_irq(pdev->irq, dev);
153795536acSTomas Winkler end:
154795536acSTomas Winkler 	dev_err(&pdev->dev, "initialization failed.\n");
155795536acSTomas Winkler 	return err;
156795536acSTomas Winkler }
157795536acSTomas Winkler 
158795536acSTomas Winkler /**
15909f8c33aSTamar Mashiah  * mei_txe_shutdown- Device Shutdown Routine
1605c4c0106STomas Winkler  *
1615c4c0106STomas Winkler  * @pdev: PCI device structure
1625c4c0106STomas Winkler  *
1635c4c0106STomas Winkler  *  mei_txe_shutdown is called from the reboot notifier
1645c4c0106STomas Winkler  *  it's a simplified version of remove so we go down
1655c4c0106STomas Winkler  *  faster.
1665c4c0106STomas Winkler  */
mei_txe_shutdown(struct pci_dev * pdev)1675c4c0106STomas Winkler static void mei_txe_shutdown(struct pci_dev *pdev)
1685c4c0106STomas Winkler {
169*64386d15SBjorn Helgaas 	struct mei_device *dev = pci_get_drvdata(pdev);
1705c4c0106STomas Winkler 
1715c4c0106STomas Winkler 	dev_dbg(&pdev->dev, "shutdown\n");
1725c4c0106STomas Winkler 	mei_stop(dev);
1735c4c0106STomas Winkler 
1745c4c0106STomas Winkler 	mei_txe_unset_pm_domain(dev);
1755c4c0106STomas Winkler 
1765c4c0106STomas Winkler 	mei_disable_interrupts(dev);
1775c4c0106STomas Winkler 	free_irq(pdev->irq, dev);
1785c4c0106STomas Winkler }
1795c4c0106STomas Winkler 
1805c4c0106STomas Winkler /**
1813908be6fSAlexander Usyskin  * mei_txe_remove - Device Removal Routine
182795536acSTomas Winkler  *
183795536acSTomas Winkler  * @pdev: PCI device structure
184795536acSTomas Winkler  *
185795536acSTomas Winkler  * mei_remove is called by the PCI subsystem to alert the driver
186795536acSTomas Winkler  * that it should release a PCI device.
187795536acSTomas Winkler  */
mei_txe_remove(struct pci_dev * pdev)188795536acSTomas Winkler static void mei_txe_remove(struct pci_dev *pdev)
189795536acSTomas Winkler {
190*64386d15SBjorn Helgaas 	struct mei_device *dev = pci_get_drvdata(pdev);
191795536acSTomas Winkler 
192cfe5ab85SAlexander Usyskin 	pm_runtime_get_noresume(&pdev->dev);
193cfe5ab85SAlexander Usyskin 
194795536acSTomas Winkler 	mei_stop(dev);
195795536acSTomas Winkler 
196d2d56faeSAlexander Usyskin 	mei_txe_unset_pm_domain(dev);
197d2d56faeSAlexander Usyskin 
198795536acSTomas Winkler 	mei_disable_interrupts(dev);
199795536acSTomas Winkler 	free_irq(pdev->irq, dev);
200795536acSTomas Winkler 
201795536acSTomas Winkler 	mei_deregister(dev);
202795536acSTomas Winkler }
203795536acSTomas Winkler 
204795536acSTomas Winkler 
205e0270addSTomas Winkler #ifdef CONFIG_PM_SLEEP
mei_txe_pci_suspend(struct device * device)206795536acSTomas Winkler static int mei_txe_pci_suspend(struct device *device)
207795536acSTomas Winkler {
208795536acSTomas Winkler 	struct pci_dev *pdev = to_pci_dev(device);
209795536acSTomas Winkler 	struct mei_device *dev = pci_get_drvdata(pdev);
210795536acSTomas Winkler 
211795536acSTomas Winkler 	dev_dbg(&pdev->dev, "suspend\n");
212795536acSTomas Winkler 
213795536acSTomas Winkler 	mei_stop(dev);
214795536acSTomas Winkler 
215795536acSTomas Winkler 	mei_disable_interrupts(dev);
216795536acSTomas Winkler 
217795536acSTomas Winkler 	free_irq(pdev->irq, dev);
218795536acSTomas Winkler 	pci_disable_msi(pdev);
219795536acSTomas Winkler 
220795536acSTomas Winkler 	return 0;
221795536acSTomas Winkler }
222795536acSTomas Winkler 
mei_txe_pci_resume(struct device * device)223795536acSTomas Winkler static int mei_txe_pci_resume(struct device *device)
224795536acSTomas Winkler {
225795536acSTomas Winkler 	struct pci_dev *pdev = to_pci_dev(device);
226*64386d15SBjorn Helgaas 	struct mei_device *dev = pci_get_drvdata(pdev);
227795536acSTomas Winkler 	int err;
228795536acSTomas Winkler 
229795536acSTomas Winkler 	pci_enable_msi(pdev);
230795536acSTomas Winkler 
231795536acSTomas Winkler 	mei_clear_interrupts(dev);
232795536acSTomas Winkler 
233795536acSTomas Winkler 	/* request and enable interrupt */
234795536acSTomas Winkler 	if (pci_dev_msi_enabled(pdev))
235795536acSTomas Winkler 		err = request_threaded_irq(pdev->irq,
236795536acSTomas Winkler 			NULL,
237795536acSTomas Winkler 			mei_txe_irq_thread_handler,
238795536acSTomas Winkler 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
239795536acSTomas Winkler 	else
240795536acSTomas Winkler 		err = request_threaded_irq(pdev->irq,
241795536acSTomas Winkler 			mei_txe_irq_quick_handler,
242795536acSTomas Winkler 			mei_txe_irq_thread_handler,
243795536acSTomas Winkler 			IRQF_SHARED, KBUILD_MODNAME, dev);
244795536acSTomas Winkler 	if (err) {
245795536acSTomas Winkler 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
246795536acSTomas Winkler 				pdev->irq);
247795536acSTomas Winkler 		return err;
248795536acSTomas Winkler 	}
249795536acSTomas Winkler 
250795536acSTomas Winkler 	err = mei_restart(dev);
251795536acSTomas Winkler 
252795536acSTomas Winkler 	return err;
253795536acSTomas Winkler }
254cfe5ab85SAlexander Usyskin #endif /* CONFIG_PM_SLEEP */
255795536acSTomas Winkler 
256bbd6d050SRafael J. Wysocki #ifdef CONFIG_PM
mei_txe_pm_runtime_idle(struct device * device)257cfe5ab85SAlexander Usyskin static int mei_txe_pm_runtime_idle(struct device *device)
258cfe5ab85SAlexander Usyskin {
259*64386d15SBjorn Helgaas 	struct mei_device *dev = dev_get_drvdata(device);
260cfe5ab85SAlexander Usyskin 
261ab81f3f3SChuhong Yuan 	dev_dbg(device, "rpm: txe: runtime_idle\n");
262cfe5ab85SAlexander Usyskin 
263cfe5ab85SAlexander Usyskin 	if (mei_write_is_idle(dev))
264d5d83f8aSAlexander Usyskin 		pm_runtime_autosuspend(device);
265cfe5ab85SAlexander Usyskin 
266cfe5ab85SAlexander Usyskin 	return -EBUSY;
267cfe5ab85SAlexander Usyskin }
mei_txe_pm_runtime_suspend(struct device * device)268cfe5ab85SAlexander Usyskin static int mei_txe_pm_runtime_suspend(struct device *device)
269cfe5ab85SAlexander Usyskin {
270*64386d15SBjorn Helgaas 	struct mei_device *dev = dev_get_drvdata(device);
271cfe5ab85SAlexander Usyskin 	int ret;
272cfe5ab85SAlexander Usyskin 
273ab81f3f3SChuhong Yuan 	dev_dbg(device, "rpm: txe: runtime suspend\n");
274cfe5ab85SAlexander Usyskin 
275cfe5ab85SAlexander Usyskin 	mutex_lock(&dev->device_lock);
276cfe5ab85SAlexander Usyskin 
277cfe5ab85SAlexander Usyskin 	if (mei_write_is_idle(dev))
278cfe5ab85SAlexander Usyskin 		ret = mei_txe_aliveness_set_sync(dev, 0);
279cfe5ab85SAlexander Usyskin 	else
280cfe5ab85SAlexander Usyskin 		ret = -EAGAIN;
281cfe5ab85SAlexander Usyskin 
282b42dc063SAlexander Usyskin 	/* keep irq on we are staying in D0 */
283cfe5ab85SAlexander Usyskin 
284ab81f3f3SChuhong Yuan 	dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
285cfe5ab85SAlexander Usyskin 
286cfe5ab85SAlexander Usyskin 	mutex_unlock(&dev->device_lock);
28777537ad2SAlexander Usyskin 
28877537ad2SAlexander Usyskin 	if (ret && ret != -EAGAIN)
28977537ad2SAlexander Usyskin 		schedule_work(&dev->reset_work);
29077537ad2SAlexander Usyskin 
291cfe5ab85SAlexander Usyskin 	return ret;
292cfe5ab85SAlexander Usyskin }
293cfe5ab85SAlexander Usyskin 
mei_txe_pm_runtime_resume(struct device * device)294cfe5ab85SAlexander Usyskin static int mei_txe_pm_runtime_resume(struct device *device)
295cfe5ab85SAlexander Usyskin {
296*64386d15SBjorn Helgaas 	struct mei_device *dev = dev_get_drvdata(device);
297cfe5ab85SAlexander Usyskin 	int ret;
298cfe5ab85SAlexander Usyskin 
299ab81f3f3SChuhong Yuan 	dev_dbg(device, "rpm: txe: runtime resume\n");
300cfe5ab85SAlexander Usyskin 
301cfe5ab85SAlexander Usyskin 	mutex_lock(&dev->device_lock);
302cfe5ab85SAlexander Usyskin 
303cfe5ab85SAlexander Usyskin 	mei_enable_interrupts(dev);
304cfe5ab85SAlexander Usyskin 
305cfe5ab85SAlexander Usyskin 	ret = mei_txe_aliveness_set_sync(dev, 1);
306cfe5ab85SAlexander Usyskin 
307cfe5ab85SAlexander Usyskin 	mutex_unlock(&dev->device_lock);
308cfe5ab85SAlexander Usyskin 
309ab81f3f3SChuhong Yuan 	dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
310cfe5ab85SAlexander Usyskin 
31177537ad2SAlexander Usyskin 	if (ret)
31277537ad2SAlexander Usyskin 		schedule_work(&dev->reset_work);
31377537ad2SAlexander Usyskin 
314cfe5ab85SAlexander Usyskin 	return ret;
315cfe5ab85SAlexander Usyskin }
316d2d56faeSAlexander Usyskin 
317d2d56faeSAlexander Usyskin /**
3187efceb55SGeert Uytterhoeven  * mei_txe_set_pm_domain - fill and set pm domain structure for device
319d2d56faeSAlexander Usyskin  *
320d2d56faeSAlexander Usyskin  * @dev: mei_device
321d2d56faeSAlexander Usyskin  */
mei_txe_set_pm_domain(struct mei_device * dev)322d2d56faeSAlexander Usyskin static inline void mei_txe_set_pm_domain(struct mei_device *dev)
323d2d56faeSAlexander Usyskin {
324d08b8fc0STomas Winkler 	struct pci_dev *pdev  = to_pci_dev(dev->dev);
325d2d56faeSAlexander Usyskin 
326d2d56faeSAlexander Usyskin 	if (pdev->dev.bus && pdev->dev.bus->pm) {
327d2d56faeSAlexander Usyskin 		dev->pg_domain.ops = *pdev->dev.bus->pm;
328d2d56faeSAlexander Usyskin 
329d2d56faeSAlexander Usyskin 		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
330d2d56faeSAlexander Usyskin 		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
331d2d56faeSAlexander Usyskin 		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
332d2d56faeSAlexander Usyskin 
333989561deSTomeu Vizoso 		dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
334d2d56faeSAlexander Usyskin 	}
335d2d56faeSAlexander Usyskin }
336d2d56faeSAlexander Usyskin 
337d2d56faeSAlexander Usyskin /**
3387efceb55SGeert Uytterhoeven  * mei_txe_unset_pm_domain - clean pm domain structure for device
339d2d56faeSAlexander Usyskin  *
340d2d56faeSAlexander Usyskin  * @dev: mei_device
341d2d56faeSAlexander Usyskin  */
mei_txe_unset_pm_domain(struct mei_device * dev)342d2d56faeSAlexander Usyskin static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
343d2d56faeSAlexander Usyskin {
344d2d56faeSAlexander Usyskin 	/* stop using pm callbacks if any */
345989561deSTomeu Vizoso 	dev_pm_domain_set(dev->dev, NULL);
346d2d56faeSAlexander Usyskin }
347cfe5ab85SAlexander Usyskin 
348cfe5ab85SAlexander Usyskin static const struct dev_pm_ops mei_txe_pm_ops = {
349cfe5ab85SAlexander Usyskin 	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
350cfe5ab85SAlexander Usyskin 				mei_txe_pci_resume)
351cfe5ab85SAlexander Usyskin 	SET_RUNTIME_PM_OPS(
352cfe5ab85SAlexander Usyskin 		mei_txe_pm_runtime_suspend,
353cfe5ab85SAlexander Usyskin 		mei_txe_pm_runtime_resume,
354cfe5ab85SAlexander Usyskin 		mei_txe_pm_runtime_idle)
355cfe5ab85SAlexander Usyskin };
356795536acSTomas Winkler 
357795536acSTomas Winkler #define MEI_TXE_PM_OPS	(&mei_txe_pm_ops)
358795536acSTomas Winkler #else
359795536acSTomas Winkler #define MEI_TXE_PM_OPS	NULL
360cfe5ab85SAlexander Usyskin #endif /* CONFIG_PM */
361cfe5ab85SAlexander Usyskin 
362795536acSTomas Winkler /*
363795536acSTomas Winkler  *  PCI driver structure
364795536acSTomas Winkler  */
365795536acSTomas Winkler static struct pci_driver mei_txe_driver = {
366795536acSTomas Winkler 	.name = KBUILD_MODNAME,
367795536acSTomas Winkler 	.id_table = mei_txe_pci_tbl,
368795536acSTomas Winkler 	.probe = mei_txe_probe,
369795536acSTomas Winkler 	.remove = mei_txe_remove,
3705c4c0106STomas Winkler 	.shutdown = mei_txe_shutdown,
371795536acSTomas Winkler 	.driver.pm = MEI_TXE_PM_OPS,
372795536acSTomas Winkler };
373795536acSTomas Winkler 
374795536acSTomas Winkler module_pci_driver(mei_txe_driver);
375795536acSTomas Winkler 
376795536acSTomas Winkler MODULE_AUTHOR("Intel Corporation");
377795536acSTomas Winkler MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
378795536acSTomas Winkler MODULE_LICENSE("GPL v2");
379