1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2025-2026, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 * for CSC platforms.
6 */
7
8 #include <linux/cleanup.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/pci.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20
21 #include "client.h"
22 #include "hw-me-regs.h"
23 #include "hw-me.h"
24 #include "mei_dev.h"
25 #include "mei-trace.h"
26
27 #define MEI_CSC_HECI2_OFFSET 0x1000
28
mei_csc_read_fws(const struct mei_device * mdev,int where,const char * name,u32 * val)29 static int mei_csc_read_fws(const struct mei_device *mdev, int where, const char *name, u32 *val)
30 {
31 struct mei_me_hw *hw = to_me_hw(mdev);
32
33 *val = ioread32(hw->mem_addr + where + 0xC00);
34 trace_mei_reg_read(&mdev->dev, name, where, *val);
35 return 0;
36 }
37
mei_csc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)38 static int mei_csc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39 {
40 struct device *dev = &pdev->dev;
41 const struct mei_cfg *cfg;
42 char __iomem *registers;
43 struct mei_device *mdev;
44 struct mei_me_hw *hw;
45 int err;
46
47 cfg = mei_me_get_cfg(ent->driver_data);
48 if (!cfg)
49 return -ENODEV;
50
51 err = pcim_enable_device(pdev);
52 if (err)
53 return dev_err_probe(dev, err, "Failed to enable PCI device.\n");
54
55 pci_set_master(pdev);
56
57 registers = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
58 if (IS_ERR(registers))
59 return dev_err_probe(dev, PTR_ERR(registers), "Failed to get PCI region.\n");
60
61 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
62 if (err)
63 return dev_err_probe(dev, err, "No usable DMA configuration.\n");
64
65 /* allocates and initializes the mei dev structure */
66 mdev = mei_me_dev_init(dev, cfg, false);
67 if (!mdev)
68 return -ENOMEM;
69
70 mdev->read_fws_need_resume = true;
71
72 hw = to_me_hw(mdev);
73
74 /*
75 * Both HECI1 and HECI2 are on this device, but only HECI2 is supported.
76 */
77 hw->mem_addr = registers + MEI_CSC_HECI2_OFFSET;
78 hw->read_fws = mei_csc_read_fws;
79
80 /*
81 * mei_register() assumes ownership of mdev.
82 * No need to release it explicitly in error path.
83 */
84 err = mei_register(mdev, dev);
85 if (err)
86 return err;
87
88 pci_set_drvdata(pdev, mdev);
89
90 err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
91 if (err < 0) {
92 dev_err_probe(dev, err, "Failed to allocate IRQ.\n");
93 goto err_mei_unreg;
94 }
95
96 hw->irq = pci_irq_vector(pdev, 0);
97
98 /* request and enable interrupt */
99 err = request_threaded_irq(hw->irq,
100 mei_me_irq_quick_handler, mei_me_irq_thread_handler,
101 IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, mdev);
102 if (err)
103 goto err_free_irq_vectors;
104
105 /*
106 * Continue to char device setup in spite of firmware handshake failure.
107 * In order to provide access to the firmware status registers to the user
108 * space via sysfs. The firmware status registers required to understand
109 * firmware error state and possible recovery flow.
110 */
111 if (mei_start(mdev))
112 dev_warn(dev, "Failed to initialize HECI hardware.\n");
113
114 pm_runtime_set_autosuspend_delay(dev, MEI_ME_RPM_TIMEOUT);
115 pm_runtime_use_autosuspend(dev);
116
117 /*
118 * MEI requires to resume from runtime suspend mode
119 * in order to perform link reset flow upon system suspend.
120 */
121 dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
122
123 pm_runtime_allow(dev);
124 pm_runtime_put_noidle(dev);
125
126 return 0;
127
128 err_free_irq_vectors:
129 pci_free_irq_vectors(pdev);
130 err_mei_unreg:
131 mei_deregister(mdev);
132 return err;
133 }
134
mei_csc_shutdown(struct pci_dev * pdev)135 static void mei_csc_shutdown(struct pci_dev *pdev)
136 {
137 struct mei_device *mdev = pci_get_drvdata(pdev);
138 struct mei_me_hw *hw = to_me_hw(mdev);
139
140 pm_runtime_get_noresume(&pdev->dev);
141
142 mei_stop(mdev);
143
144 mei_disable_interrupts(mdev);
145 free_irq(hw->irq, mdev);
146 pci_free_irq_vectors(pdev);
147 }
148
mei_csc_remove(struct pci_dev * pdev)149 static void mei_csc_remove(struct pci_dev *pdev)
150 {
151 struct mei_device *mdev = pci_get_drvdata(pdev);
152
153 mei_csc_shutdown(pdev);
154
155 mei_deregister(mdev);
156 }
157
mei_csc_pci_prepare(struct device * dev)158 static int mei_csc_pci_prepare(struct device *dev)
159 {
160 pm_runtime_resume(dev);
161 return 0;
162 }
163
mei_csc_pci_suspend(struct device * dev)164 static int mei_csc_pci_suspend(struct device *dev)
165 {
166 struct mei_device *mdev = dev_get_drvdata(dev);
167
168 mei_stop(mdev);
169
170 mei_disable_interrupts(mdev);
171
172 return 0;
173 }
174
mei_csc_pci_resume(struct device * dev)175 static int mei_csc_pci_resume(struct device *dev)
176 {
177 struct mei_device *mdev = dev_get_drvdata(dev);
178 int err;
179
180 err = mei_restart(mdev);
181 if (err)
182 return err;
183
184 /* Start timer if stopped in suspend */
185 schedule_delayed_work(&mdev->timer_work, HZ);
186
187 return 0;
188 }
189
mei_csc_pci_complete(struct device * dev)190 static void mei_csc_pci_complete(struct device *dev)
191 {
192 pm_runtime_suspend(dev);
193 }
194
mei_csc_pm_runtime_idle(struct device * dev)195 static int mei_csc_pm_runtime_idle(struct device *dev)
196 {
197 struct mei_device *mdev = dev_get_drvdata(dev);
198
199 return mei_write_is_idle(mdev) ? 0 : -EBUSY;
200 }
201
mei_csc_pm_runtime_suspend(struct device * dev)202 static int mei_csc_pm_runtime_suspend(struct device *dev)
203 {
204 struct mei_device *mdev = dev_get_drvdata(dev);
205 struct mei_me_hw *hw = to_me_hw(mdev);
206
207 guard(mutex)(&mdev->device_lock);
208
209 if (!mei_write_is_idle(mdev))
210 return -EAGAIN;
211
212 hw->pg_state = MEI_PG_ON;
213 return 0;
214 }
215
mei_csc_pm_runtime_resume(struct device * dev)216 static int mei_csc_pm_runtime_resume(struct device *dev)
217 {
218 struct mei_device *mdev = dev_get_drvdata(dev);
219 struct mei_me_hw *hw = to_me_hw(mdev);
220 irqreturn_t irq_ret;
221
222 scoped_guard(mutex, &mdev->device_lock)
223 hw->pg_state = MEI_PG_OFF;
224
225 /* Process all queues that wait for resume */
226 irq_ret = mei_me_irq_thread_handler(1, mdev);
227 if (irq_ret != IRQ_HANDLED)
228 dev_err(dev, "thread handler fail %d\n", irq_ret);
229
230 return 0;
231 }
232
233 static const struct dev_pm_ops mei_csc_pm_ops = {
234 .prepare = pm_sleep_ptr(mei_csc_pci_prepare),
235 .complete = pm_sleep_ptr(mei_csc_pci_complete),
236 SYSTEM_SLEEP_PM_OPS(mei_csc_pci_suspend, mei_csc_pci_resume)
237 RUNTIME_PM_OPS(mei_csc_pm_runtime_suspend,
238 mei_csc_pm_runtime_resume, mei_csc_pm_runtime_idle)
239 };
240
241 static const struct pci_device_id mei_csc_pci_tbl[] = {
242 { PCI_DEVICE_DATA(INTEL, MEI_CRI, MEI_ME_CSC_CFG) },
243 {}
244 };
245 MODULE_DEVICE_TABLE(pci, mei_csc_pci_tbl);
246
247 static struct pci_driver mei_csc_driver = {
248 .name = KBUILD_MODNAME,
249 .id_table = mei_csc_pci_tbl,
250 .probe = mei_csc_probe,
251 .remove = mei_csc_remove,
252 .shutdown = mei_csc_shutdown,
253 .driver = {
254 .pm = &mei_csc_pm_ops,
255 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
256 }
257 };
258 module_pci_driver(mei_csc_driver);
259
260 MODULE_DESCRIPTION("Intel(R) Management Engine Interface for discrete graphics (CSC)");
261 MODULE_LICENSE("GPL");
262