xref: /linux/drivers/dca/dca-core.c (revision db4e83957f961f9053282409c5062c6baef857a4)
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This driver supports an interface for DCA clients and providers to meet.
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
31 
32 #define DCA_VERSION "1.12.1"
33 
34 MODULE_VERSION(DCA_VERSION);
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Intel Corporation");
37 
38 static DEFINE_RAW_SPINLOCK(dca_lock);
39 
40 static LIST_HEAD(dca_domains);
41 
42 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43 
44 static int dca_providers_blocked;
45 
46 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
47 {
48 	struct pci_dev *pdev = to_pci_dev(dev);
49 	struct pci_bus *bus = pdev->bus;
50 
51 	while (bus->parent)
52 		bus = bus->parent;
53 
54 	return bus;
55 }
56 
57 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
58 {
59 	struct dca_domain *domain;
60 
61 	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
62 	if (!domain)
63 		return NULL;
64 
65 	INIT_LIST_HEAD(&domain->dca_providers);
66 	domain->pci_rc = rc;
67 
68 	return domain;
69 }
70 
71 static void dca_free_domain(struct dca_domain *domain)
72 {
73 	list_del(&domain->node);
74 	kfree(domain);
75 }
76 
77 static int dca_provider_ioat_ver_3_0(struct device *dev)
78 {
79 	struct pci_dev *pdev = to_pci_dev(dev);
80 
81 	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90 }
91 
92 static void unregister_dca_providers(void)
93 {
94 	struct dca_provider *dca, *_dca;
95 	struct list_head unregistered_providers;
96 	struct dca_domain *domain;
97 	unsigned long flags;
98 
99 	blocking_notifier_call_chain(&dca_provider_chain,
100 				     DCA_PROVIDER_REMOVE, NULL);
101 
102 	INIT_LIST_HEAD(&unregistered_providers);
103 
104 	raw_spin_lock_irqsave(&dca_lock, flags);
105 
106 	if (list_empty(&dca_domains)) {
107 		raw_spin_unlock_irqrestore(&dca_lock, flags);
108 		return;
109 	}
110 
111 	/* at this point only one domain in the list is expected */
112 	domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 
114 	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115 		list_move(&dca->node, &unregistered_providers);
116 
117 	dca_free_domain(domain);
118 
119 	raw_spin_unlock_irqrestore(&dca_lock, flags);
120 
121 	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
122 		dca_sysfs_remove_provider(dca);
123 		list_del(&dca->node);
124 	}
125 }
126 
127 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
128 {
129 	struct dca_domain *domain;
130 
131 	list_for_each_entry(domain, &dca_domains, node)
132 		if (domain->pci_rc == rc)
133 			return domain;
134 
135 	return NULL;
136 }
137 
138 static struct dca_domain *dca_get_domain(struct device *dev)
139 {
140 	struct pci_bus *rc;
141 	struct dca_domain *domain;
142 
143 	rc = dca_pci_rc_from_dev(dev);
144 	domain = dca_find_domain(rc);
145 
146 	if (!domain) {
147 		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
148 			dca_providers_blocked = 1;
149 	}
150 
151 	return domain;
152 }
153 
154 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
155 {
156 	struct dca_provider *dca;
157 	struct pci_bus *rc;
158 	struct dca_domain *domain;
159 
160 	if (dev) {
161 		rc = dca_pci_rc_from_dev(dev);
162 		domain = dca_find_domain(rc);
163 		if (!domain)
164 			return NULL;
165 	} else {
166 		if (!list_empty(&dca_domains))
167 			domain = list_first_entry(&dca_domains,
168 						  struct dca_domain,
169 						  node);
170 		else
171 			return NULL;
172 	}
173 
174 	list_for_each_entry(dca, &domain->dca_providers, node)
175 		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
176 			return dca;
177 
178 	return NULL;
179 }
180 
181 /**
182  * dca_add_requester - add a dca client to the list
183  * @dev - the device that wants dca service
184  */
185 int dca_add_requester(struct device *dev)
186 {
187 	struct dca_provider *dca;
188 	int err, slot = -ENODEV;
189 	unsigned long flags;
190 	struct pci_bus *pci_rc;
191 	struct dca_domain *domain;
192 
193 	if (!dev)
194 		return -EFAULT;
195 
196 	raw_spin_lock_irqsave(&dca_lock, flags);
197 
198 	/* check if the requester has not been added already */
199 	dca = dca_find_provider_by_dev(dev);
200 	if (dca) {
201 		raw_spin_unlock_irqrestore(&dca_lock, flags);
202 		return -EEXIST;
203 	}
204 
205 	pci_rc = dca_pci_rc_from_dev(dev);
206 	domain = dca_find_domain(pci_rc);
207 	if (!domain) {
208 		raw_spin_unlock_irqrestore(&dca_lock, flags);
209 		return -ENODEV;
210 	}
211 
212 	list_for_each_entry(dca, &domain->dca_providers, node) {
213 		slot = dca->ops->add_requester(dca, dev);
214 		if (slot >= 0)
215 			break;
216 	}
217 
218 	raw_spin_unlock_irqrestore(&dca_lock, flags);
219 
220 	if (slot < 0)
221 		return slot;
222 
223 	err = dca_sysfs_add_req(dca, dev, slot);
224 	if (err) {
225 		raw_spin_lock_irqsave(&dca_lock, flags);
226 		if (dca == dca_find_provider_by_dev(dev))
227 			dca->ops->remove_requester(dca, dev);
228 		raw_spin_unlock_irqrestore(&dca_lock, flags);
229 		return err;
230 	}
231 
232 	return 0;
233 }
234 EXPORT_SYMBOL_GPL(dca_add_requester);
235 
236 /**
237  * dca_remove_requester - remove a dca client from the list
238  * @dev - the device that wants dca service
239  */
240 int dca_remove_requester(struct device *dev)
241 {
242 	struct dca_provider *dca;
243 	int slot;
244 	unsigned long flags;
245 
246 	if (!dev)
247 		return -EFAULT;
248 
249 	raw_spin_lock_irqsave(&dca_lock, flags);
250 	dca = dca_find_provider_by_dev(dev);
251 	if (!dca) {
252 		raw_spin_unlock_irqrestore(&dca_lock, flags);
253 		return -ENODEV;
254 	}
255 	slot = dca->ops->remove_requester(dca, dev);
256 	raw_spin_unlock_irqrestore(&dca_lock, flags);
257 
258 	if (slot < 0)
259 		return slot;
260 
261 	dca_sysfs_remove_req(dca, slot);
262 
263 	return 0;
264 }
265 EXPORT_SYMBOL_GPL(dca_remove_requester);
266 
267 /**
268  * dca_common_get_tag - return the dca tag (serves both new and old api)
269  * @dev - the device that wants dca service
270  * @cpu - the cpuid as returned by get_cpu()
271  */
272 u8 dca_common_get_tag(struct device *dev, int cpu)
273 {
274 	struct dca_provider *dca;
275 	u8 tag;
276 	unsigned long flags;
277 
278 	raw_spin_lock_irqsave(&dca_lock, flags);
279 
280 	dca = dca_find_provider_by_dev(dev);
281 	if (!dca) {
282 		raw_spin_unlock_irqrestore(&dca_lock, flags);
283 		return -ENODEV;
284 	}
285 	tag = dca->ops->get_tag(dca, dev, cpu);
286 
287 	raw_spin_unlock_irqrestore(&dca_lock, flags);
288 	return tag;
289 }
290 
291 /**
292  * dca3_get_tag - return the dca tag to the requester device
293  *                for the given cpu (new api)
294  * @dev - the device that wants dca service
295  * @cpu - the cpuid as returned by get_cpu()
296  */
297 u8 dca3_get_tag(struct device *dev, int cpu)
298 {
299 	if (!dev)
300 		return -EFAULT;
301 
302 	return dca_common_get_tag(dev, cpu);
303 }
304 EXPORT_SYMBOL_GPL(dca3_get_tag);
305 
306 /**
307  * dca_get_tag - return the dca tag for the given cpu (old api)
308  * @cpu - the cpuid as returned by get_cpu()
309  */
310 u8 dca_get_tag(int cpu)
311 {
312 	struct device *dev = NULL;
313 
314 	return dca_common_get_tag(dev, cpu);
315 }
316 EXPORT_SYMBOL_GPL(dca_get_tag);
317 
318 /**
319  * alloc_dca_provider - get data struct for describing a dca provider
320  * @ops - pointer to struct of dca operation function pointers
321  * @priv_size - size of extra mem to be added for provider's needs
322  */
323 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
324 {
325 	struct dca_provider *dca;
326 	int alloc_size;
327 
328 	alloc_size = (sizeof(*dca) + priv_size);
329 	dca = kzalloc(alloc_size, GFP_KERNEL);
330 	if (!dca)
331 		return NULL;
332 	dca->ops = ops;
333 
334 	return dca;
335 }
336 EXPORT_SYMBOL_GPL(alloc_dca_provider);
337 
338 /**
339  * free_dca_provider - release the dca provider data struct
340  * @ops - pointer to struct of dca operation function pointers
341  * @priv_size - size of extra mem to be added for provider's needs
342  */
343 void free_dca_provider(struct dca_provider *dca)
344 {
345 	kfree(dca);
346 }
347 EXPORT_SYMBOL_GPL(free_dca_provider);
348 
349 /**
350  * register_dca_provider - register a dca provider
351  * @dca - struct created by alloc_dca_provider()
352  * @dev - device providing dca services
353  */
354 int register_dca_provider(struct dca_provider *dca, struct device *dev)
355 {
356 	int err;
357 	unsigned long flags;
358 	struct dca_domain *domain, *newdomain = NULL;
359 
360 	raw_spin_lock_irqsave(&dca_lock, flags);
361 	if (dca_providers_blocked) {
362 		raw_spin_unlock_irqrestore(&dca_lock, flags);
363 		return -ENODEV;
364 	}
365 	raw_spin_unlock_irqrestore(&dca_lock, flags);
366 
367 	err = dca_sysfs_add_provider(dca, dev);
368 	if (err)
369 		return err;
370 
371 	raw_spin_lock_irqsave(&dca_lock, flags);
372 	domain = dca_get_domain(dev);
373 	if (!domain) {
374 		struct pci_bus *rc;
375 
376 		if (dca_providers_blocked) {
377 			raw_spin_unlock_irqrestore(&dca_lock, flags);
378 			dca_sysfs_remove_provider(dca);
379 			unregister_dca_providers();
380 			return -ENODEV;
381 		}
382 
383 		raw_spin_unlock_irqrestore(&dca_lock, flags);
384 		rc = dca_pci_rc_from_dev(dev);
385 		newdomain = dca_allocate_domain(rc);
386 		if (!newdomain)
387 			return -ENODEV;
388 		raw_spin_lock_irqsave(&dca_lock, flags);
389 		/* Recheck, we might have raced after dropping the lock */
390 		domain = dca_get_domain(dev);
391 		if (!domain) {
392 			domain = newdomain;
393 			newdomain = NULL;
394 			list_add(&domain->node, &dca_domains);
395 		}
396 	}
397 	list_add(&dca->node, &domain->dca_providers);
398 	raw_spin_unlock_irqrestore(&dca_lock, flags);
399 
400 	blocking_notifier_call_chain(&dca_provider_chain,
401 				     DCA_PROVIDER_ADD, NULL);
402 	kfree(newdomain);
403 	return 0;
404 }
405 EXPORT_SYMBOL_GPL(register_dca_provider);
406 
407 /**
408  * unregister_dca_provider - remove a dca provider
409  * @dca - struct created by alloc_dca_provider()
410  */
411 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
412 {
413 	unsigned long flags;
414 	struct pci_bus *pci_rc;
415 	struct dca_domain *domain;
416 
417 	blocking_notifier_call_chain(&dca_provider_chain,
418 				     DCA_PROVIDER_REMOVE, NULL);
419 
420 	raw_spin_lock_irqsave(&dca_lock, flags);
421 
422 	list_del(&dca->node);
423 
424 	pci_rc = dca_pci_rc_from_dev(dev);
425 	domain = dca_find_domain(pci_rc);
426 	if (list_empty(&domain->dca_providers))
427 		dca_free_domain(domain);
428 
429 	raw_spin_unlock_irqrestore(&dca_lock, flags);
430 
431 	dca_sysfs_remove_provider(dca);
432 }
433 EXPORT_SYMBOL_GPL(unregister_dca_provider);
434 
435 /**
436  * dca_register_notify - register a client's notifier callback
437  */
438 void dca_register_notify(struct notifier_block *nb)
439 {
440 	blocking_notifier_chain_register(&dca_provider_chain, nb);
441 }
442 EXPORT_SYMBOL_GPL(dca_register_notify);
443 
444 /**
445  * dca_unregister_notify - remove a client's notifier callback
446  */
447 void dca_unregister_notify(struct notifier_block *nb)
448 {
449 	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
450 }
451 EXPORT_SYMBOL_GPL(dca_unregister_notify);
452 
453 static int __init dca_init(void)
454 {
455 	pr_info("dca service started, version %s\n", DCA_VERSION);
456 	return dca_sysfs_init();
457 }
458 
459 static void __exit dca_exit(void)
460 {
461 	dca_sysfs_exit();
462 }
463 
464 arch_initcall(dca_init);
465 module_exit(dca_exit);
466 
467