xref: /linux/drivers/resctrl/mpam_devices.c (revision 8f8d0ac1da7885c0d619636f93e0983239dc145c)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2025 Arm Ltd.
3 
4 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
5 
6 #include <linux/acpi.h>
7 #include <linux/atomic.h>
8 #include <linux/arm_mpam.h>
9 #include <linux/cacheinfo.h>
10 #include <linux/cpu.h>
11 #include <linux/cpumask.h>
12 #include <linux/device.h>
13 #include <linux/errno.h>
14 #include <linux/gfp.h>
15 #include <linux/list.h>
16 #include <linux/lockdep.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/printk.h>
20 #include <linux/srcu.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23 
24 #include "mpam_internal.h"
25 
26 /*
27  * mpam_list_lock protects the SRCU lists when writing. Once the
28  * mpam_enabled key is enabled these lists are read-only,
29  * unless the error interrupt disables the driver.
30  */
31 static DEFINE_MUTEX(mpam_list_lock);
32 static LIST_HEAD(mpam_all_msc);
33 
34 struct srcu_struct mpam_srcu;
35 
36 /*
37  * Number of MSCs that have been probed. Once all MSCs have been probed MPAM
38  * can be enabled.
39  */
40 static atomic_t mpam_num_msc;
41 
42 static int mpam_cpuhp_state;
43 static DEFINE_MUTEX(mpam_cpuhp_state_lock);
44 
45 /*
46  * mpam is enabled once all devices have been probed from CPU online callbacks,
47  * scheduled via this work_struct. If access to an MSC depends on a CPU that
48  * was not brought online at boot, this can happen surprisingly late.
49  */
50 static DECLARE_WORK(mpam_enable_work, &mpam_enable);
51 
52 /*
53  * All mpam error interrupts indicate a software bug. On receipt, disable the
54  * driver.
55  */
56 static DECLARE_WORK(mpam_broken_work, &mpam_disable);
57 
58 /* When mpam is disabled, the printed reason to aid debugging */
59 static char *mpam_disable_reason;
60 
61 /*
62  * An MSC is a physical container for controls and monitors, each identified by
63  * their RIS index. These share a base-address, interrupts and some MMIO
64  * registers. A vMSC is a virtual container for RIS in an MSC that control or
65  * monitor the same thing. Members of a vMSC are all RIS in the same MSC, but
66  * not all RIS in an MSC share a vMSC.
67  *
68  * Components are a group of vMSC that control or monitor the same thing but
69  * are from different MSC, so have different base-address, interrupts etc.
70  * Classes are the set components of the same type.
71  *
72  * The features of a vMSC is the union of the RIS it contains.
73  * The features of a Class and Component are the common subset of the vMSC
74  * they contain.
75  *
76  * e.g. The system cache may have bandwidth controls on multiple interfaces,
77  * for regulating traffic from devices independently of traffic from CPUs.
78  * If these are two RIS in one MSC, they will be treated as controlling
79  * different things, and will not share a vMSC/component/class.
80  *
81  * e.g. The L2 may have one MSC and two RIS, one for cache-controls another
82  * for bandwidth. These two RIS are members of the same vMSC.
83  *
84  * e.g. The set of RIS that make up the L2 are grouped as a component. These
85  * are sometimes termed slices. They should be configured the same, as if there
86  * were only one.
87  *
88  * e.g. The SoC probably has more than one L2, each attached to a distinct set
89  * of CPUs. All the L2 components are grouped as a class.
90  *
91  * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list,
92  * then linked via struct mpam_ris to a vmsc, component and class.
93  * The same MSC may exist under different class->component->vmsc paths, but the
94  * RIS index will be unique.
95  */
96 LIST_HEAD(mpam_classes);
97 
98 /* List of all objects that can be free()d after synchronise_srcu() */
99 static LLIST_HEAD(mpam_garbage);
100 
101 static inline void init_garbage(struct mpam_garbage *garbage)
102 {
103 	init_llist_node(&garbage->llist);
104 }
105 
106 #define add_to_garbage(x)				\
107 do {							\
108 	__typeof__(x) _x = (x);				\
109 	_x->garbage.to_free = _x;			\
110 	llist_add(&_x->garbage.llist, &mpam_garbage);	\
111 } while (0)
112 
113 static void mpam_free_garbage(void)
114 {
115 	struct mpam_garbage *iter, *tmp;
116 	struct llist_node *to_free = llist_del_all(&mpam_garbage);
117 
118 	if (!to_free)
119 		return;
120 
121 	synchronize_srcu(&mpam_srcu);
122 
123 	llist_for_each_entry_safe(iter, tmp, to_free, llist) {
124 		if (iter->pdev)
125 			devm_kfree(&iter->pdev->dev, iter->to_free);
126 		else
127 			kfree(iter->to_free);
128 	}
129 }
130 
131 static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg)
132 {
133 	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility));
134 
135 	return readl_relaxed(msc->mapped_hwpage + reg);
136 }
137 
138 static inline u32 _mpam_read_partsel_reg(struct mpam_msc *msc, u16 reg)
139 {
140 	lockdep_assert_held_once(&msc->part_sel_lock);
141 	return __mpam_read_reg(msc, reg);
142 }
143 
144 #define mpam_read_partsel_reg(msc, reg) _mpam_read_partsel_reg(msc, MPAMF_##reg)
145 
146 static struct mpam_class *
147 mpam_class_alloc(u8 level_idx, enum mpam_class_types type)
148 {
149 	struct mpam_class *class;
150 
151 	lockdep_assert_held(&mpam_list_lock);
152 
153 	class = kzalloc(sizeof(*class), GFP_KERNEL);
154 	if (!class)
155 		return ERR_PTR(-ENOMEM);
156 	init_garbage(&class->garbage);
157 
158 	INIT_LIST_HEAD_RCU(&class->components);
159 	/* Affinity is updated when ris are added */
160 	class->level = level_idx;
161 	class->type = type;
162 	INIT_LIST_HEAD_RCU(&class->classes_list);
163 
164 	list_add_rcu(&class->classes_list, &mpam_classes);
165 
166 	return class;
167 }
168 
169 static void mpam_class_destroy(struct mpam_class *class)
170 {
171 	lockdep_assert_held(&mpam_list_lock);
172 
173 	list_del_rcu(&class->classes_list);
174 	add_to_garbage(class);
175 }
176 
177 static struct mpam_class *
178 mpam_class_find(u8 level_idx, enum mpam_class_types type)
179 {
180 	struct mpam_class *class;
181 
182 	lockdep_assert_held(&mpam_list_lock);
183 
184 	list_for_each_entry(class, &mpam_classes, classes_list) {
185 		if (class->type == type && class->level == level_idx)
186 			return class;
187 	}
188 
189 	return mpam_class_alloc(level_idx, type);
190 }
191 
192 static struct mpam_component *
193 mpam_component_alloc(struct mpam_class *class, int id)
194 {
195 	struct mpam_component *comp;
196 
197 	lockdep_assert_held(&mpam_list_lock);
198 
199 	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
200 	if (!comp)
201 		return ERR_PTR(-ENOMEM);
202 	init_garbage(&comp->garbage);
203 
204 	comp->comp_id = id;
205 	INIT_LIST_HEAD_RCU(&comp->vmsc);
206 	/* Affinity is updated when RIS are added */
207 	INIT_LIST_HEAD_RCU(&comp->class_list);
208 	comp->class = class;
209 
210 	list_add_rcu(&comp->class_list, &class->components);
211 
212 	return comp;
213 }
214 
215 static void mpam_component_destroy(struct mpam_component *comp)
216 {
217 	struct mpam_class *class = comp->class;
218 
219 	lockdep_assert_held(&mpam_list_lock);
220 
221 	list_del_rcu(&comp->class_list);
222 	add_to_garbage(comp);
223 
224 	if (list_empty(&class->components))
225 		mpam_class_destroy(class);
226 }
227 
228 static struct mpam_component *
229 mpam_component_find(struct mpam_class *class, int id)
230 {
231 	struct mpam_component *comp;
232 
233 	lockdep_assert_held(&mpam_list_lock);
234 
235 	list_for_each_entry(comp, &class->components, class_list) {
236 		if (comp->comp_id == id)
237 			return comp;
238 	}
239 
240 	return mpam_component_alloc(class, id);
241 }
242 
243 static struct mpam_vmsc *
244 mpam_vmsc_alloc(struct mpam_component *comp, struct mpam_msc *msc)
245 {
246 	struct mpam_vmsc *vmsc;
247 
248 	lockdep_assert_held(&mpam_list_lock);
249 
250 	vmsc = kzalloc(sizeof(*vmsc), GFP_KERNEL);
251 	if (!vmsc)
252 		return ERR_PTR(-ENOMEM);
253 	init_garbage(&vmsc->garbage);
254 
255 	INIT_LIST_HEAD_RCU(&vmsc->ris);
256 	INIT_LIST_HEAD_RCU(&vmsc->comp_list);
257 	vmsc->comp = comp;
258 	vmsc->msc = msc;
259 
260 	list_add_rcu(&vmsc->comp_list, &comp->vmsc);
261 
262 	return vmsc;
263 }
264 
265 static void mpam_vmsc_destroy(struct mpam_vmsc *vmsc)
266 {
267 	struct mpam_component *comp = vmsc->comp;
268 
269 	lockdep_assert_held(&mpam_list_lock);
270 
271 	list_del_rcu(&vmsc->comp_list);
272 	add_to_garbage(vmsc);
273 
274 	if (list_empty(&comp->vmsc))
275 		mpam_component_destroy(comp);
276 }
277 
278 static struct mpam_vmsc *
279 mpam_vmsc_find(struct mpam_component *comp, struct mpam_msc *msc)
280 {
281 	struct mpam_vmsc *vmsc;
282 
283 	lockdep_assert_held(&mpam_list_lock);
284 
285 	list_for_each_entry(vmsc, &comp->vmsc, comp_list) {
286 		if (vmsc->msc->id == msc->id)
287 			return vmsc;
288 	}
289 
290 	return mpam_vmsc_alloc(comp, msc);
291 }
292 
293 /*
294  * The cacheinfo structures are only populated when CPUs are online.
295  * This helper walks the acpi tables to include offline CPUs too.
296  */
297 int mpam_get_cpumask_from_cache_id(unsigned long cache_id, u32 cache_level,
298 				   cpumask_t *affinity)
299 {
300 	return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity);
301 }
302 
303 /*
304  * cpumask_of_node() only knows about online CPUs. This can't tell us whether
305  * a class is represented on all possible CPUs.
306  */
307 static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity)
308 {
309 	int cpu;
310 
311 	for_each_possible_cpu(cpu) {
312 		if (node_id == cpu_to_node(cpu))
313 			cpumask_set_cpu(cpu, affinity);
314 	}
315 }
316 
317 static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity,
318 				 enum mpam_class_types type,
319 				 struct mpam_class *class,
320 				 struct mpam_component *comp)
321 {
322 	int err;
323 
324 	switch (type) {
325 	case MPAM_CLASS_CACHE:
326 		err = mpam_get_cpumask_from_cache_id(comp->comp_id, class->level,
327 						     affinity);
328 		if (err) {
329 			dev_warn_once(&msc->pdev->dev,
330 				      "Failed to determine CPU affinity\n");
331 			return err;
332 		}
333 
334 		if (cpumask_empty(affinity))
335 			dev_warn_once(&msc->pdev->dev, "no CPUs associated with cache node\n");
336 
337 		break;
338 	case MPAM_CLASS_MEMORY:
339 		get_cpumask_from_node_id(comp->comp_id, affinity);
340 		/* affinity may be empty for CPU-less memory nodes */
341 		break;
342 	case MPAM_CLASS_UNKNOWN:
343 		return 0;
344 	}
345 
346 	cpumask_and(affinity, affinity, &msc->accessibility);
347 
348 	return 0;
349 }
350 
351 static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx,
352 				  enum mpam_class_types type, u8 class_id,
353 				  int component_id)
354 {
355 	int err;
356 	struct mpam_vmsc *vmsc;
357 	struct mpam_msc_ris *ris;
358 	struct mpam_class *class;
359 	struct mpam_component *comp;
360 	struct platform_device *pdev = msc->pdev;
361 
362 	lockdep_assert_held(&mpam_list_lock);
363 
364 	if (ris_idx > MPAM_MSC_MAX_NUM_RIS)
365 		return -EINVAL;
366 
367 	if (test_and_set_bit(ris_idx, &msc->ris_idxs))
368 		return -EBUSY;
369 
370 	ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), GFP_KERNEL);
371 	if (!ris)
372 		return -ENOMEM;
373 	init_garbage(&ris->garbage);
374 	ris->garbage.pdev = pdev;
375 
376 	class = mpam_class_find(class_id, type);
377 	if (IS_ERR(class))
378 		return PTR_ERR(class);
379 
380 	comp = mpam_component_find(class, component_id);
381 	if (IS_ERR(comp)) {
382 		if (list_empty(&class->components))
383 			mpam_class_destroy(class);
384 		return PTR_ERR(comp);
385 	}
386 
387 	vmsc = mpam_vmsc_find(comp, msc);
388 	if (IS_ERR(vmsc)) {
389 		if (list_empty(&comp->vmsc))
390 			mpam_component_destroy(comp);
391 		return PTR_ERR(vmsc);
392 	}
393 
394 	err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp);
395 	if (err) {
396 		if (list_empty(&vmsc->ris))
397 			mpam_vmsc_destroy(vmsc);
398 		return err;
399 	}
400 
401 	ris->ris_idx = ris_idx;
402 	INIT_LIST_HEAD_RCU(&ris->msc_list);
403 	INIT_LIST_HEAD_RCU(&ris->vmsc_list);
404 	ris->vmsc = vmsc;
405 
406 	cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity);
407 	cpumask_or(&class->affinity, &class->affinity, &ris->affinity);
408 	list_add_rcu(&ris->vmsc_list, &vmsc->ris);
409 	list_add_rcu(&ris->msc_list, &msc->ris);
410 
411 	return 0;
412 }
413 
414 static void mpam_ris_destroy(struct mpam_msc_ris *ris)
415 {
416 	struct mpam_vmsc *vmsc = ris->vmsc;
417 	struct mpam_msc *msc = vmsc->msc;
418 	struct mpam_component *comp = vmsc->comp;
419 	struct mpam_class *class = comp->class;
420 
421 	lockdep_assert_held(&mpam_list_lock);
422 
423 	/*
424 	 * It is assumed affinities don't overlap. If they do the class becomes
425 	 * unusable immediately.
426 	 */
427 	cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity);
428 	cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity);
429 	clear_bit(ris->ris_idx, &msc->ris_idxs);
430 	list_del_rcu(&ris->msc_list);
431 	list_del_rcu(&ris->vmsc_list);
432 	add_to_garbage(ris);
433 
434 	if (list_empty(&vmsc->ris))
435 		mpam_vmsc_destroy(vmsc);
436 }
437 
438 int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
439 		    enum mpam_class_types type, u8 class_id, int component_id)
440 {
441 	int err;
442 
443 	mutex_lock(&mpam_list_lock);
444 	err = mpam_ris_create_locked(msc, ris_idx, type, class_id,
445 				     component_id);
446 	mutex_unlock(&mpam_list_lock);
447 	if (err)
448 		mpam_free_garbage();
449 
450 	return err;
451 }
452 
453 static int mpam_msc_hw_probe(struct mpam_msc *msc)
454 {
455 	u64 idr;
456 	struct device *dev = &msc->pdev->dev;
457 
458 	lockdep_assert_held(&msc->probe_lock);
459 
460 	idr = __mpam_read_reg(msc, MPAMF_AIDR);
461 	if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) {
462 		dev_err_once(dev, "MSC does not match MPAM architecture v1.x\n");
463 		return -EIO;
464 	}
465 
466 	msc->probed = true;
467 
468 	return 0;
469 }
470 
471 static int mpam_cpu_online(unsigned int cpu)
472 {
473 	return 0;
474 }
475 
476 /* Before mpam is enabled, try to probe new MSC */
477 static int mpam_discovery_cpu_online(unsigned int cpu)
478 {
479 	int err = 0;
480 	struct mpam_msc *msc;
481 	bool new_device_probed = false;
482 
483 	guard(srcu)(&mpam_srcu);
484 	list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list,
485 				 srcu_read_lock_held(&mpam_srcu)) {
486 		if (!cpumask_test_cpu(cpu, &msc->accessibility))
487 			continue;
488 
489 		mutex_lock(&msc->probe_lock);
490 		if (!msc->probed)
491 			err = mpam_msc_hw_probe(msc);
492 		mutex_unlock(&msc->probe_lock);
493 
494 		if (err)
495 			break;
496 		new_device_probed = true;
497 	}
498 
499 	if (new_device_probed && !err)
500 		schedule_work(&mpam_enable_work);
501 	if (err) {
502 		mpam_disable_reason = "error during probing";
503 		schedule_work(&mpam_broken_work);
504 	}
505 
506 	return err;
507 }
508 
509 static int mpam_cpu_offline(unsigned int cpu)
510 {
511 	return 0;
512 }
513 
514 static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online),
515 					  int (*offline)(unsigned int offline),
516 					  char *name)
517 {
518 	mutex_lock(&mpam_cpuhp_state_lock);
519 	if (mpam_cpuhp_state) {
520 		cpuhp_remove_state(mpam_cpuhp_state);
521 		mpam_cpuhp_state = 0;
522 	}
523 
524 	mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, name, online,
525 					     offline);
526 	if (mpam_cpuhp_state <= 0) {
527 		pr_err("Failed to register cpuhp callbacks");
528 		mpam_cpuhp_state = 0;
529 	}
530 	mutex_unlock(&mpam_cpuhp_state_lock);
531 }
532 
533 /*
534  * An MSC can control traffic from a set of CPUs, but may only be accessible
535  * from a (hopefully wider) set of CPUs. The common reason for this is power
536  * management. If all the CPUs in a cluster are in PSCI:CPU_SUSPEND, the
537  * corresponding cache may also be powered off. By making accesses from
538  * one of those CPUs, we ensure we don't access a cache that's powered off.
539  */
540 static void update_msc_accessibility(struct mpam_msc *msc)
541 {
542 	u32 affinity_id;
543 	int err;
544 
545 	err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity",
546 				       &affinity_id);
547 	if (err)
548 		cpumask_copy(&msc->accessibility, cpu_possible_mask);
549 	else
550 		acpi_pptt_get_cpus_from_container(affinity_id, &msc->accessibility);
551 }
552 
553 /*
554  * There are two ways of reaching a struct mpam_msc_ris. Via the
555  * class->component->vmsc->ris, or via the msc.
556  * When destroying the msc, the other side needs unlinking and cleaning up too.
557  */
558 static void mpam_msc_destroy(struct mpam_msc *msc)
559 {
560 	struct platform_device *pdev = msc->pdev;
561 	struct mpam_msc_ris *ris, *tmp;
562 
563 	lockdep_assert_held(&mpam_list_lock);
564 
565 	list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list)
566 		mpam_ris_destroy(ris);
567 
568 	list_del_rcu(&msc->all_msc_list);
569 	platform_set_drvdata(pdev, NULL);
570 
571 	add_to_garbage(msc);
572 }
573 
574 static void mpam_msc_drv_remove(struct platform_device *pdev)
575 {
576 	struct mpam_msc *msc = platform_get_drvdata(pdev);
577 
578 	mutex_lock(&mpam_list_lock);
579 	mpam_msc_destroy(msc);
580 	mutex_unlock(&mpam_list_lock);
581 
582 	mpam_free_garbage();
583 }
584 
585 static struct mpam_msc *do_mpam_msc_drv_probe(struct platform_device *pdev)
586 {
587 	int err;
588 	u32 tmp;
589 	struct mpam_msc *msc;
590 	struct resource *msc_res;
591 	struct device *dev = &pdev->dev;
592 
593 	lockdep_assert_held(&mpam_list_lock);
594 
595 	msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL);
596 	if (!msc)
597 		return ERR_PTR(-ENOMEM);
598 	init_garbage(&msc->garbage);
599 	msc->garbage.pdev = pdev;
600 
601 	err = devm_mutex_init(dev, &msc->probe_lock);
602 	if (err)
603 		return ERR_PTR(err);
604 
605 	err = devm_mutex_init(dev, &msc->part_sel_lock);
606 	if (err)
607 		return ERR_PTR(err);
608 
609 	msc->id = pdev->id;
610 	msc->pdev = pdev;
611 	INIT_LIST_HEAD_RCU(&msc->all_msc_list);
612 	INIT_LIST_HEAD_RCU(&msc->ris);
613 
614 	update_msc_accessibility(msc);
615 	if (cpumask_empty(&msc->accessibility)) {
616 		dev_err_once(dev, "MSC is not accessible from any CPU!");
617 		return ERR_PTR(-EINVAL);
618 	}
619 
620 	if (device_property_read_u32(&pdev->dev, "pcc-channel", &tmp))
621 		msc->iface = MPAM_IFACE_MMIO;
622 	else
623 		msc->iface = MPAM_IFACE_PCC;
624 
625 	if (msc->iface == MPAM_IFACE_MMIO) {
626 		void __iomem *io;
627 
628 		io = devm_platform_get_and_ioremap_resource(pdev, 0,
629 							    &msc_res);
630 		if (IS_ERR(io)) {
631 			dev_err_once(dev, "Failed to map MSC base address\n");
632 			return ERR_CAST(io);
633 		}
634 		msc->mapped_hwpage_sz = msc_res->end - msc_res->start;
635 		msc->mapped_hwpage = io;
636 	} else {
637 		return ERR_PTR(-EINVAL);
638 	}
639 
640 	list_add_rcu(&msc->all_msc_list, &mpam_all_msc);
641 	platform_set_drvdata(pdev, msc);
642 
643 	return msc;
644 }
645 
646 static int fw_num_msc;
647 
648 static int mpam_msc_drv_probe(struct platform_device *pdev)
649 {
650 	int err;
651 	struct mpam_msc *msc = NULL;
652 	void *plat_data = pdev->dev.platform_data;
653 
654 	mutex_lock(&mpam_list_lock);
655 	msc = do_mpam_msc_drv_probe(pdev);
656 	mutex_unlock(&mpam_list_lock);
657 
658 	if (IS_ERR(msc))
659 		return PTR_ERR(msc);
660 
661 	/* Create RIS entries described by firmware */
662 	err = acpi_mpam_parse_resources(msc, plat_data);
663 	if (err) {
664 		mpam_msc_drv_remove(pdev);
665 		return err;
666 	}
667 
668 	if (atomic_add_return(1, &mpam_num_msc) == fw_num_msc)
669 		mpam_register_cpuhp_callbacks(mpam_discovery_cpu_online, NULL,
670 					      "mpam:drv_probe");
671 
672 	return 0;
673 }
674 
675 static struct platform_driver mpam_msc_driver = {
676 	.driver = {
677 		.name = "mpam_msc",
678 	},
679 	.probe = mpam_msc_drv_probe,
680 	.remove = mpam_msc_drv_remove,
681 };
682 
683 static void mpam_enable_once(void)
684 {
685 	mpam_register_cpuhp_callbacks(mpam_cpu_online, mpam_cpu_offline,
686 				      "mpam:online");
687 
688 	pr_info("MPAM enabled\n");
689 }
690 
691 void mpam_disable(struct work_struct *ignored)
692 {
693 	struct mpam_msc *msc, *tmp;
694 
695 	mutex_lock(&mpam_cpuhp_state_lock);
696 	if (mpam_cpuhp_state) {
697 		cpuhp_remove_state(mpam_cpuhp_state);
698 		mpam_cpuhp_state = 0;
699 	}
700 	mutex_unlock(&mpam_cpuhp_state_lock);
701 
702 	mutex_lock(&mpam_list_lock);
703 	list_for_each_entry_safe(msc, tmp, &mpam_all_msc, all_msc_list)
704 		mpam_msc_destroy(msc);
705 	mutex_unlock(&mpam_list_lock);
706 	mpam_free_garbage();
707 
708 	pr_err_once("MPAM disabled due to %s\n", mpam_disable_reason);
709 }
710 
711 /*
712  * Enable mpam once all devices have been probed.
713  * Scheduled by mpam_discovery_cpu_online() once all devices have been created.
714  * Also scheduled when new devices are probed when new CPUs come online.
715  */
716 void mpam_enable(struct work_struct *work)
717 {
718 	static atomic_t once;
719 	struct mpam_msc *msc;
720 	bool all_devices_probed = true;
721 
722 	/* Have we probed all the hw devices? */
723 	guard(srcu)(&mpam_srcu);
724 	list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list,
725 				 srcu_read_lock_held(&mpam_srcu)) {
726 		mutex_lock(&msc->probe_lock);
727 		if (!msc->probed)
728 			all_devices_probed = false;
729 		mutex_unlock(&msc->probe_lock);
730 
731 		if (!all_devices_probed)
732 			break;
733 	}
734 
735 	if (all_devices_probed && !atomic_fetch_inc(&once))
736 		mpam_enable_once();
737 }
738 
739 static int __init mpam_msc_driver_init(void)
740 {
741 	if (!system_supports_mpam())
742 		return -EOPNOTSUPP;
743 
744 	init_srcu_struct(&mpam_srcu);
745 
746 	fw_num_msc = acpi_mpam_count_msc();
747 	if (fw_num_msc <= 0) {
748 		pr_err("No MSC devices found in firmware\n");
749 		return -EINVAL;
750 	}
751 
752 	return platform_driver_register(&mpam_msc_driver);
753 }
754 subsys_initcall(mpam_msc_driver_init);
755