1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 */
5
6 /*
7 * This driver supports an interface for DCA clients and providers to meet.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/notifier.h>
12 #include <linux/device.h>
13 #include <linux/dca.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16
17 #define DCA_VERSION "1.12.1"
18
19 MODULE_VERSION(DCA_VERSION);
20 MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Intel Corporation");
23
24 static DEFINE_RAW_SPINLOCK(dca_lock);
25
26 static LIST_HEAD(dca_domains);
27
28 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
29
30 static int dca_providers_blocked;
31
dca_pci_rc_from_dev(struct device * dev)32 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
33 {
34 struct pci_dev *pdev = to_pci_dev(dev);
35 struct pci_bus *bus = pdev->bus;
36
37 while (bus->parent)
38 bus = bus->parent;
39
40 return bus;
41 }
42
dca_allocate_domain(struct pci_bus * rc)43 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
44 {
45 struct dca_domain *domain;
46
47 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
48 if (!domain)
49 return NULL;
50
51 INIT_LIST_HEAD(&domain->dca_providers);
52 domain->pci_rc = rc;
53
54 return domain;
55 }
56
dca_free_domain(struct dca_domain * domain)57 static void dca_free_domain(struct dca_domain *domain)
58 {
59 list_del(&domain->node);
60 kfree(domain);
61 }
62
dca_provider_ioat_ver_3_0(struct device * dev)63 static int dca_provider_ioat_ver_3_0(struct device *dev)
64 {
65 struct pci_dev *pdev = to_pci_dev(dev);
66
67 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
68 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
69 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
70 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
71 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
72 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
73 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
74 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
75 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
76 }
77
unregister_dca_providers(void)78 static void unregister_dca_providers(void)
79 {
80 struct dca_provider *dca, *_dca;
81 struct list_head unregistered_providers;
82 struct dca_domain *domain;
83 unsigned long flags;
84
85 blocking_notifier_call_chain(&dca_provider_chain,
86 DCA_PROVIDER_REMOVE, NULL);
87
88 INIT_LIST_HEAD(&unregistered_providers);
89
90 raw_spin_lock_irqsave(&dca_lock, flags);
91
92 if (list_empty(&dca_domains)) {
93 raw_spin_unlock_irqrestore(&dca_lock, flags);
94 return;
95 }
96
97 /* at this point only one domain in the list is expected */
98 domain = list_first_entry(&dca_domains, struct dca_domain, node);
99
100 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
101 list_move(&dca->node, &unregistered_providers);
102
103 dca_free_domain(domain);
104
105 raw_spin_unlock_irqrestore(&dca_lock, flags);
106
107 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
108 dca_sysfs_remove_provider(dca);
109 list_del(&dca->node);
110 }
111 }
112
dca_find_domain(struct pci_bus * rc)113 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
114 {
115 struct dca_domain *domain;
116
117 list_for_each_entry(domain, &dca_domains, node)
118 if (domain->pci_rc == rc)
119 return domain;
120
121 return NULL;
122 }
123
dca_get_domain(struct device * dev)124 static struct dca_domain *dca_get_domain(struct device *dev)
125 {
126 struct pci_bus *rc;
127 struct dca_domain *domain;
128
129 rc = dca_pci_rc_from_dev(dev);
130 domain = dca_find_domain(rc);
131
132 if (!domain) {
133 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
134 dca_providers_blocked = 1;
135 }
136
137 return domain;
138 }
139
dca_find_provider_by_dev(struct device * dev)140 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
141 {
142 struct dca_provider *dca;
143 struct pci_bus *rc;
144 struct dca_domain *domain;
145
146 if (dev) {
147 rc = dca_pci_rc_from_dev(dev);
148 domain = dca_find_domain(rc);
149 if (!domain)
150 return NULL;
151 } else {
152 if (!list_empty(&dca_domains))
153 domain = list_first_entry(&dca_domains,
154 struct dca_domain,
155 node);
156 else
157 return NULL;
158 }
159
160 list_for_each_entry(dca, &domain->dca_providers, node)
161 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
162 return dca;
163
164 return NULL;
165 }
166
167 /**
168 * dca_add_requester - add a dca client to the list
169 * @dev - the device that wants dca service
170 */
dca_add_requester(struct device * dev)171 int dca_add_requester(struct device *dev)
172 {
173 struct dca_provider *dca;
174 int err, slot = -ENODEV;
175 unsigned long flags;
176 struct pci_bus *pci_rc;
177 struct dca_domain *domain;
178
179 if (!dev)
180 return -EFAULT;
181
182 raw_spin_lock_irqsave(&dca_lock, flags);
183
184 /* check if the requester has not been added already */
185 dca = dca_find_provider_by_dev(dev);
186 if (dca) {
187 raw_spin_unlock_irqrestore(&dca_lock, flags);
188 return -EEXIST;
189 }
190
191 pci_rc = dca_pci_rc_from_dev(dev);
192 domain = dca_find_domain(pci_rc);
193 if (!domain) {
194 raw_spin_unlock_irqrestore(&dca_lock, flags);
195 return -ENODEV;
196 }
197
198 list_for_each_entry(dca, &domain->dca_providers, node) {
199 slot = dca->ops->add_requester(dca, dev);
200 if (slot >= 0)
201 break;
202 }
203
204 raw_spin_unlock_irqrestore(&dca_lock, flags);
205
206 if (slot < 0)
207 return slot;
208
209 err = dca_sysfs_add_req(dca, dev, slot);
210 if (err) {
211 raw_spin_lock_irqsave(&dca_lock, flags);
212 if (dca == dca_find_provider_by_dev(dev))
213 dca->ops->remove_requester(dca, dev);
214 raw_spin_unlock_irqrestore(&dca_lock, flags);
215 return err;
216 }
217
218 return 0;
219 }
220 EXPORT_SYMBOL_GPL(dca_add_requester);
221
222 /**
223 * dca_remove_requester - remove a dca client from the list
224 * @dev - the device that wants dca service
225 */
dca_remove_requester(struct device * dev)226 int dca_remove_requester(struct device *dev)
227 {
228 struct dca_provider *dca;
229 int slot;
230 unsigned long flags;
231
232 if (!dev)
233 return -EFAULT;
234
235 raw_spin_lock_irqsave(&dca_lock, flags);
236 dca = dca_find_provider_by_dev(dev);
237 if (!dca) {
238 raw_spin_unlock_irqrestore(&dca_lock, flags);
239 return -ENODEV;
240 }
241 slot = dca->ops->remove_requester(dca, dev);
242 raw_spin_unlock_irqrestore(&dca_lock, flags);
243
244 if (slot < 0)
245 return slot;
246
247 dca_sysfs_remove_req(dca, slot);
248
249 return 0;
250 }
251 EXPORT_SYMBOL_GPL(dca_remove_requester);
252
253 /**
254 * dca_common_get_tag - return the dca tag (serves both new and old api)
255 * @dev - the device that wants dca service
256 * @cpu - the cpuid as returned by get_cpu()
257 */
dca_common_get_tag(struct device * dev,int cpu)258 static u8 dca_common_get_tag(struct device *dev, int cpu)
259 {
260 struct dca_provider *dca;
261 u8 tag;
262 unsigned long flags;
263
264 raw_spin_lock_irqsave(&dca_lock, flags);
265
266 dca = dca_find_provider_by_dev(dev);
267 if (!dca) {
268 raw_spin_unlock_irqrestore(&dca_lock, flags);
269 return -ENODEV;
270 }
271 tag = dca->ops->get_tag(dca, dev, cpu);
272
273 raw_spin_unlock_irqrestore(&dca_lock, flags);
274 return tag;
275 }
276
277 /**
278 * dca3_get_tag - return the dca tag to the requester device
279 * for the given cpu (new api)
280 * @dev - the device that wants dca service
281 * @cpu - the cpuid as returned by get_cpu()
282 */
dca3_get_tag(struct device * dev,int cpu)283 u8 dca3_get_tag(struct device *dev, int cpu)
284 {
285 if (!dev)
286 return -EFAULT;
287
288 return dca_common_get_tag(dev, cpu);
289 }
290 EXPORT_SYMBOL_GPL(dca3_get_tag);
291
292 /**
293 * dca_get_tag - return the dca tag for the given cpu (old api)
294 * @cpu - the cpuid as returned by get_cpu()
295 */
dca_get_tag(int cpu)296 u8 dca_get_tag(int cpu)
297 {
298 return dca_common_get_tag(NULL, cpu);
299 }
300 EXPORT_SYMBOL_GPL(dca_get_tag);
301
302 /**
303 * alloc_dca_provider - get data struct for describing a dca provider
304 * @ops - pointer to struct of dca operation function pointers
305 * @priv_size - size of extra mem to be added for provider's needs
306 */
alloc_dca_provider(const struct dca_ops * ops,int priv_size)307 struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
308 int priv_size)
309 {
310 struct dca_provider *dca;
311 int alloc_size;
312
313 alloc_size = (sizeof(*dca) + priv_size);
314 dca = kzalloc(alloc_size, GFP_KERNEL);
315 if (!dca)
316 return NULL;
317 dca->ops = ops;
318
319 return dca;
320 }
321 EXPORT_SYMBOL_GPL(alloc_dca_provider);
322
323 /**
324 * free_dca_provider - release the dca provider data struct
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
free_dca_provider(struct dca_provider * dca)328 void free_dca_provider(struct dca_provider *dca)
329 {
330 kfree(dca);
331 }
332 EXPORT_SYMBOL_GPL(free_dca_provider);
333
334 /**
335 * register_dca_provider - register a dca provider
336 * @dca - struct created by alloc_dca_provider()
337 * @dev - device providing dca services
338 */
register_dca_provider(struct dca_provider * dca,struct device * dev)339 int register_dca_provider(struct dca_provider *dca, struct device *dev)
340 {
341 int err;
342 unsigned long flags;
343 struct dca_domain *domain, *newdomain = NULL;
344
345 raw_spin_lock_irqsave(&dca_lock, flags);
346 if (dca_providers_blocked) {
347 raw_spin_unlock_irqrestore(&dca_lock, flags);
348 return -ENODEV;
349 }
350 raw_spin_unlock_irqrestore(&dca_lock, flags);
351
352 err = dca_sysfs_add_provider(dca, dev);
353 if (err)
354 return err;
355
356 raw_spin_lock_irqsave(&dca_lock, flags);
357 domain = dca_get_domain(dev);
358 if (!domain) {
359 struct pci_bus *rc;
360
361 if (dca_providers_blocked) {
362 raw_spin_unlock_irqrestore(&dca_lock, flags);
363 dca_sysfs_remove_provider(dca);
364 unregister_dca_providers();
365 return -ENODEV;
366 }
367
368 raw_spin_unlock_irqrestore(&dca_lock, flags);
369 rc = dca_pci_rc_from_dev(dev);
370 newdomain = dca_allocate_domain(rc);
371 if (!newdomain)
372 return -ENODEV;
373 raw_spin_lock_irqsave(&dca_lock, flags);
374 /* Recheck, we might have raced after dropping the lock */
375 domain = dca_get_domain(dev);
376 if (!domain) {
377 domain = newdomain;
378 newdomain = NULL;
379 list_add(&domain->node, &dca_domains);
380 }
381 }
382 list_add(&dca->node, &domain->dca_providers);
383 raw_spin_unlock_irqrestore(&dca_lock, flags);
384
385 blocking_notifier_call_chain(&dca_provider_chain,
386 DCA_PROVIDER_ADD, NULL);
387 kfree(newdomain);
388 return 0;
389 }
390 EXPORT_SYMBOL_GPL(register_dca_provider);
391
392 /**
393 * unregister_dca_provider - remove a dca provider
394 * @dca - struct created by alloc_dca_provider()
395 */
unregister_dca_provider(struct dca_provider * dca,struct device * dev)396 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
397 {
398 unsigned long flags;
399 struct pci_bus *pci_rc;
400 struct dca_domain *domain;
401
402 blocking_notifier_call_chain(&dca_provider_chain,
403 DCA_PROVIDER_REMOVE, NULL);
404
405 raw_spin_lock_irqsave(&dca_lock, flags);
406
407 if (list_empty(&dca_domains)) {
408 raw_spin_unlock_irqrestore(&dca_lock, flags);
409 return;
410 }
411
412 list_del(&dca->node);
413
414 pci_rc = dca_pci_rc_from_dev(dev);
415 domain = dca_find_domain(pci_rc);
416 if (list_empty(&domain->dca_providers))
417 dca_free_domain(domain);
418
419 raw_spin_unlock_irqrestore(&dca_lock, flags);
420
421 dca_sysfs_remove_provider(dca);
422 }
423 EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425 /**
426 * dca_register_notify - register a client's notifier callback
427 */
dca_register_notify(struct notifier_block * nb)428 void dca_register_notify(struct notifier_block *nb)
429 {
430 blocking_notifier_chain_register(&dca_provider_chain, nb);
431 }
432 EXPORT_SYMBOL_GPL(dca_register_notify);
433
434 /**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
dca_unregister_notify(struct notifier_block * nb)437 void dca_unregister_notify(struct notifier_block *nb)
438 {
439 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440 }
441 EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
dca_init(void)443 static int __init dca_init(void)
444 {
445 pr_info("dca service started, version %s\n", DCA_VERSION);
446 return dca_sysfs_init();
447 }
448
dca_exit(void)449 static void __exit dca_exit(void)
450 {
451 dca_sysfs_exit();
452 }
453
454 arch_initcall(dca_init);
455 module_exit(dca_exit);
456
457