xref: /linux/drivers/resctrl/mpam_devices.c (revision c10ca83a778304f976cbea60bbbb2f1fac003f5c)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2025 Arm Ltd.
3 
4 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
5 
6 #include <linux/acpi.h>
7 #include <linux/atomic.h>
8 #include <linux/arm_mpam.h>
9 #include <linux/bitfield.h>
10 #include <linux/cacheinfo.h>
11 #include <linux/cpu.h>
12 #include <linux/cpumask.h>
13 #include <linux/device.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/list.h>
17 #include <linux/lockdep.h>
18 #include <linux/mutex.h>
19 #include <linux/platform_device.h>
20 #include <linux/printk.h>
21 #include <linux/srcu.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/workqueue.h>
25 
26 #include "mpam_internal.h"
27 
28 /*
29  * mpam_list_lock protects the SRCU lists when writing. Once the
30  * mpam_enabled key is enabled these lists are read-only,
31  * unless the error interrupt disables the driver.
32  */
33 static DEFINE_MUTEX(mpam_list_lock);
34 static LIST_HEAD(mpam_all_msc);
35 
36 struct srcu_struct mpam_srcu;
37 
38 /*
39  * Number of MSCs that have been probed. Once all MSCs have been probed MPAM
40  * can be enabled.
41  */
42 static atomic_t mpam_num_msc;
43 
44 static int mpam_cpuhp_state;
45 static DEFINE_MUTEX(mpam_cpuhp_state_lock);
46 
47 /*
48  * The smallest common values for any CPU or MSC in the system.
49  * Generating traffic outside this range will result in screaming interrupts.
50  */
51 u16 mpam_partid_max;
52 u8 mpam_pmg_max;
53 static bool partid_max_init, partid_max_published;
54 static DEFINE_SPINLOCK(partid_max_lock);
55 
56 /*
57  * mpam is enabled once all devices have been probed from CPU online callbacks,
58  * scheduled via this work_struct. If access to an MSC depends on a CPU that
59  * was not brought online at boot, this can happen surprisingly late.
60  */
61 static DECLARE_WORK(mpam_enable_work, &mpam_enable);
62 
63 /*
64  * All mpam error interrupts indicate a software bug. On receipt, disable the
65  * driver.
66  */
67 static DECLARE_WORK(mpam_broken_work, &mpam_disable);
68 
69 /* When mpam is disabled, the printed reason to aid debugging */
70 static char *mpam_disable_reason;
71 
72 /*
73  * An MSC is a physical container for controls and monitors, each identified by
74  * their RIS index. These share a base-address, interrupts and some MMIO
75  * registers. A vMSC is a virtual container for RIS in an MSC that control or
76  * monitor the same thing. Members of a vMSC are all RIS in the same MSC, but
77  * not all RIS in an MSC share a vMSC.
78  *
79  * Components are a group of vMSC that control or monitor the same thing but
80  * are from different MSC, so have different base-address, interrupts etc.
81  * Classes are the set components of the same type.
82  *
83  * The features of a vMSC is the union of the RIS it contains.
84  * The features of a Class and Component are the common subset of the vMSC
85  * they contain.
86  *
87  * e.g. The system cache may have bandwidth controls on multiple interfaces,
88  * for regulating traffic from devices independently of traffic from CPUs.
89  * If these are two RIS in one MSC, they will be treated as controlling
90  * different things, and will not share a vMSC/component/class.
91  *
92  * e.g. The L2 may have one MSC and two RIS, one for cache-controls another
93  * for bandwidth. These two RIS are members of the same vMSC.
94  *
95  * e.g. The set of RIS that make up the L2 are grouped as a component. These
96  * are sometimes termed slices. They should be configured the same, as if there
97  * were only one.
98  *
99  * e.g. The SoC probably has more than one L2, each attached to a distinct set
100  * of CPUs. All the L2 components are grouped as a class.
101  *
102  * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list,
103  * then linked via struct mpam_ris to a vmsc, component and class.
104  * The same MSC may exist under different class->component->vmsc paths, but the
105  * RIS index will be unique.
106  */
107 LIST_HEAD(mpam_classes);
108 
109 /* List of all objects that can be free()d after synchronise_srcu() */
110 static LLIST_HEAD(mpam_garbage);
111 
112 static inline void init_garbage(struct mpam_garbage *garbage)
113 {
114 	init_llist_node(&garbage->llist);
115 }
116 
117 #define add_to_garbage(x)				\
118 do {							\
119 	__typeof__(x) _x = (x);				\
120 	_x->garbage.to_free = _x;			\
121 	llist_add(&_x->garbage.llist, &mpam_garbage);	\
122 } while (0)
123 
124 static void mpam_free_garbage(void)
125 {
126 	struct mpam_garbage *iter, *tmp;
127 	struct llist_node *to_free = llist_del_all(&mpam_garbage);
128 
129 	if (!to_free)
130 		return;
131 
132 	synchronize_srcu(&mpam_srcu);
133 
134 	llist_for_each_entry_safe(iter, tmp, to_free, llist) {
135 		if (iter->pdev)
136 			devm_kfree(&iter->pdev->dev, iter->to_free);
137 		else
138 			kfree(iter->to_free);
139 	}
140 }
141 
142 static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg)
143 {
144 	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility));
145 
146 	return readl_relaxed(msc->mapped_hwpage + reg);
147 }
148 
149 static inline u32 _mpam_read_partsel_reg(struct mpam_msc *msc, u16 reg)
150 {
151 	lockdep_assert_held_once(&msc->part_sel_lock);
152 	return __mpam_read_reg(msc, reg);
153 }
154 
155 #define mpam_read_partsel_reg(msc, reg) _mpam_read_partsel_reg(msc, MPAMF_##reg)
156 
157 static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val)
158 {
159 	WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz);
160 	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility));
161 
162 	writel_relaxed(val, msc->mapped_hwpage + reg);
163 }
164 
165 static inline void _mpam_write_partsel_reg(struct mpam_msc *msc, u16 reg, u32 val)
166 {
167 	lockdep_assert_held_once(&msc->part_sel_lock);
168 	__mpam_write_reg(msc, reg, val);
169 }
170 
171 #define mpam_write_partsel_reg(msc, reg, val)  _mpam_write_partsel_reg(msc, MPAMCFG_##reg, val)
172 
173 static inline u32 _mpam_read_monsel_reg(struct mpam_msc *msc, u16 reg)
174 {
175 	mpam_mon_sel_lock_held(msc);
176 	return __mpam_read_reg(msc, reg);
177 }
178 
179 #define mpam_read_monsel_reg(msc, reg) _mpam_read_monsel_reg(msc, MSMON_##reg)
180 
181 static inline void _mpam_write_monsel_reg(struct mpam_msc *msc, u16 reg, u32 val)
182 {
183 	mpam_mon_sel_lock_held(msc);
184 	__mpam_write_reg(msc, reg, val);
185 }
186 
187 #define mpam_write_monsel_reg(msc, reg, val)   _mpam_write_monsel_reg(msc, MSMON_##reg, val)
188 
189 static u64 mpam_msc_read_idr(struct mpam_msc *msc)
190 {
191 	u64 idr_high = 0, idr_low;
192 
193 	lockdep_assert_held(&msc->part_sel_lock);
194 
195 	idr_low = mpam_read_partsel_reg(msc, IDR);
196 	if (FIELD_GET(MPAMF_IDR_EXT, idr_low))
197 		idr_high = mpam_read_partsel_reg(msc, IDR + 4);
198 
199 	return (idr_high << 32) | idr_low;
200 }
201 
202 static void __mpam_part_sel_raw(u32 partsel, struct mpam_msc *msc)
203 {
204 	lockdep_assert_held(&msc->part_sel_lock);
205 
206 	mpam_write_partsel_reg(msc, PART_SEL, partsel);
207 }
208 
209 static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc)
210 {
211 	u32 partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) |
212 		      FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid);
213 
214 	__mpam_part_sel_raw(partsel, msc);
215 }
216 
217 int mpam_register_requestor(u16 partid_max, u8 pmg_max)
218 {
219 	guard(spinlock)(&partid_max_lock);
220 	if (!partid_max_init) {
221 		mpam_partid_max = partid_max;
222 		mpam_pmg_max = pmg_max;
223 		partid_max_init = true;
224 	} else if (!partid_max_published) {
225 		mpam_partid_max = min(mpam_partid_max, partid_max);
226 		mpam_pmg_max = min(mpam_pmg_max, pmg_max);
227 	} else {
228 		/* New requestors can't lower the values */
229 		if (partid_max < mpam_partid_max || pmg_max < mpam_pmg_max)
230 			return -EBUSY;
231 	}
232 
233 	return 0;
234 }
235 EXPORT_SYMBOL(mpam_register_requestor);
236 
237 static struct mpam_class *
238 mpam_class_alloc(u8 level_idx, enum mpam_class_types type)
239 {
240 	struct mpam_class *class;
241 
242 	lockdep_assert_held(&mpam_list_lock);
243 
244 	class = kzalloc(sizeof(*class), GFP_KERNEL);
245 	if (!class)
246 		return ERR_PTR(-ENOMEM);
247 	init_garbage(&class->garbage);
248 
249 	INIT_LIST_HEAD_RCU(&class->components);
250 	/* Affinity is updated when ris are added */
251 	class->level = level_idx;
252 	class->type = type;
253 	INIT_LIST_HEAD_RCU(&class->classes_list);
254 
255 	list_add_rcu(&class->classes_list, &mpam_classes);
256 
257 	return class;
258 }
259 
260 static void mpam_class_destroy(struct mpam_class *class)
261 {
262 	lockdep_assert_held(&mpam_list_lock);
263 
264 	list_del_rcu(&class->classes_list);
265 	add_to_garbage(class);
266 }
267 
268 static struct mpam_class *
269 mpam_class_find(u8 level_idx, enum mpam_class_types type)
270 {
271 	struct mpam_class *class;
272 
273 	lockdep_assert_held(&mpam_list_lock);
274 
275 	list_for_each_entry(class, &mpam_classes, classes_list) {
276 		if (class->type == type && class->level == level_idx)
277 			return class;
278 	}
279 
280 	return mpam_class_alloc(level_idx, type);
281 }
282 
283 static struct mpam_component *
284 mpam_component_alloc(struct mpam_class *class, int id)
285 {
286 	struct mpam_component *comp;
287 
288 	lockdep_assert_held(&mpam_list_lock);
289 
290 	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
291 	if (!comp)
292 		return ERR_PTR(-ENOMEM);
293 	init_garbage(&comp->garbage);
294 
295 	comp->comp_id = id;
296 	INIT_LIST_HEAD_RCU(&comp->vmsc);
297 	/* Affinity is updated when RIS are added */
298 	INIT_LIST_HEAD_RCU(&comp->class_list);
299 	comp->class = class;
300 
301 	list_add_rcu(&comp->class_list, &class->components);
302 
303 	return comp;
304 }
305 
306 static void mpam_component_destroy(struct mpam_component *comp)
307 {
308 	struct mpam_class *class = comp->class;
309 
310 	lockdep_assert_held(&mpam_list_lock);
311 
312 	list_del_rcu(&comp->class_list);
313 	add_to_garbage(comp);
314 
315 	if (list_empty(&class->components))
316 		mpam_class_destroy(class);
317 }
318 
319 static struct mpam_component *
320 mpam_component_find(struct mpam_class *class, int id)
321 {
322 	struct mpam_component *comp;
323 
324 	lockdep_assert_held(&mpam_list_lock);
325 
326 	list_for_each_entry(comp, &class->components, class_list) {
327 		if (comp->comp_id == id)
328 			return comp;
329 	}
330 
331 	return mpam_component_alloc(class, id);
332 }
333 
334 static struct mpam_vmsc *
335 mpam_vmsc_alloc(struct mpam_component *comp, struct mpam_msc *msc)
336 {
337 	struct mpam_vmsc *vmsc;
338 
339 	lockdep_assert_held(&mpam_list_lock);
340 
341 	vmsc = kzalloc(sizeof(*vmsc), GFP_KERNEL);
342 	if (!vmsc)
343 		return ERR_PTR(-ENOMEM);
344 	init_garbage(&vmsc->garbage);
345 
346 	INIT_LIST_HEAD_RCU(&vmsc->ris);
347 	INIT_LIST_HEAD_RCU(&vmsc->comp_list);
348 	vmsc->comp = comp;
349 	vmsc->msc = msc;
350 
351 	list_add_rcu(&vmsc->comp_list, &comp->vmsc);
352 
353 	return vmsc;
354 }
355 
356 static void mpam_vmsc_destroy(struct mpam_vmsc *vmsc)
357 {
358 	struct mpam_component *comp = vmsc->comp;
359 
360 	lockdep_assert_held(&mpam_list_lock);
361 
362 	list_del_rcu(&vmsc->comp_list);
363 	add_to_garbage(vmsc);
364 
365 	if (list_empty(&comp->vmsc))
366 		mpam_component_destroy(comp);
367 }
368 
369 static struct mpam_vmsc *
370 mpam_vmsc_find(struct mpam_component *comp, struct mpam_msc *msc)
371 {
372 	struct mpam_vmsc *vmsc;
373 
374 	lockdep_assert_held(&mpam_list_lock);
375 
376 	list_for_each_entry(vmsc, &comp->vmsc, comp_list) {
377 		if (vmsc->msc->id == msc->id)
378 			return vmsc;
379 	}
380 
381 	return mpam_vmsc_alloc(comp, msc);
382 }
383 
384 /*
385  * The cacheinfo structures are only populated when CPUs are online.
386  * This helper walks the acpi tables to include offline CPUs too.
387  */
388 int mpam_get_cpumask_from_cache_id(unsigned long cache_id, u32 cache_level,
389 				   cpumask_t *affinity)
390 {
391 	return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity);
392 }
393 
394 /*
395  * cpumask_of_node() only knows about online CPUs. This can't tell us whether
396  * a class is represented on all possible CPUs.
397  */
398 static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity)
399 {
400 	int cpu;
401 
402 	for_each_possible_cpu(cpu) {
403 		if (node_id == cpu_to_node(cpu))
404 			cpumask_set_cpu(cpu, affinity);
405 	}
406 }
407 
408 static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity,
409 				 enum mpam_class_types type,
410 				 struct mpam_class *class,
411 				 struct mpam_component *comp)
412 {
413 	int err;
414 
415 	switch (type) {
416 	case MPAM_CLASS_CACHE:
417 		err = mpam_get_cpumask_from_cache_id(comp->comp_id, class->level,
418 						     affinity);
419 		if (err) {
420 			dev_warn_once(&msc->pdev->dev,
421 				      "Failed to determine CPU affinity\n");
422 			return err;
423 		}
424 
425 		if (cpumask_empty(affinity))
426 			dev_warn_once(&msc->pdev->dev, "no CPUs associated with cache node\n");
427 
428 		break;
429 	case MPAM_CLASS_MEMORY:
430 		get_cpumask_from_node_id(comp->comp_id, affinity);
431 		/* affinity may be empty for CPU-less memory nodes */
432 		break;
433 	case MPAM_CLASS_UNKNOWN:
434 		return 0;
435 	}
436 
437 	cpumask_and(affinity, affinity, &msc->accessibility);
438 
439 	return 0;
440 }
441 
442 static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx,
443 				  enum mpam_class_types type, u8 class_id,
444 				  int component_id)
445 {
446 	int err;
447 	struct mpam_vmsc *vmsc;
448 	struct mpam_msc_ris *ris;
449 	struct mpam_class *class;
450 	struct mpam_component *comp;
451 	struct platform_device *pdev = msc->pdev;
452 
453 	lockdep_assert_held(&mpam_list_lock);
454 
455 	if (ris_idx > MPAM_MSC_MAX_NUM_RIS)
456 		return -EINVAL;
457 
458 	if (test_and_set_bit(ris_idx, &msc->ris_idxs))
459 		return -EBUSY;
460 
461 	ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), GFP_KERNEL);
462 	if (!ris)
463 		return -ENOMEM;
464 	init_garbage(&ris->garbage);
465 	ris->garbage.pdev = pdev;
466 
467 	class = mpam_class_find(class_id, type);
468 	if (IS_ERR(class))
469 		return PTR_ERR(class);
470 
471 	comp = mpam_component_find(class, component_id);
472 	if (IS_ERR(comp)) {
473 		if (list_empty(&class->components))
474 			mpam_class_destroy(class);
475 		return PTR_ERR(comp);
476 	}
477 
478 	vmsc = mpam_vmsc_find(comp, msc);
479 	if (IS_ERR(vmsc)) {
480 		if (list_empty(&comp->vmsc))
481 			mpam_component_destroy(comp);
482 		return PTR_ERR(vmsc);
483 	}
484 
485 	err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp);
486 	if (err) {
487 		if (list_empty(&vmsc->ris))
488 			mpam_vmsc_destroy(vmsc);
489 		return err;
490 	}
491 
492 	ris->ris_idx = ris_idx;
493 	INIT_LIST_HEAD_RCU(&ris->msc_list);
494 	INIT_LIST_HEAD_RCU(&ris->vmsc_list);
495 	ris->vmsc = vmsc;
496 
497 	cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity);
498 	cpumask_or(&class->affinity, &class->affinity, &ris->affinity);
499 	list_add_rcu(&ris->vmsc_list, &vmsc->ris);
500 	list_add_rcu(&ris->msc_list, &msc->ris);
501 
502 	return 0;
503 }
504 
505 static void mpam_ris_destroy(struct mpam_msc_ris *ris)
506 {
507 	struct mpam_vmsc *vmsc = ris->vmsc;
508 	struct mpam_msc *msc = vmsc->msc;
509 	struct mpam_component *comp = vmsc->comp;
510 	struct mpam_class *class = comp->class;
511 
512 	lockdep_assert_held(&mpam_list_lock);
513 
514 	/*
515 	 * It is assumed affinities don't overlap. If they do the class becomes
516 	 * unusable immediately.
517 	 */
518 	cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity);
519 	cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity);
520 	clear_bit(ris->ris_idx, &msc->ris_idxs);
521 	list_del_rcu(&ris->msc_list);
522 	list_del_rcu(&ris->vmsc_list);
523 	add_to_garbage(ris);
524 
525 	if (list_empty(&vmsc->ris))
526 		mpam_vmsc_destroy(vmsc);
527 }
528 
529 int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
530 		    enum mpam_class_types type, u8 class_id, int component_id)
531 {
532 	int err;
533 
534 	mutex_lock(&mpam_list_lock);
535 	err = mpam_ris_create_locked(msc, ris_idx, type, class_id,
536 				     component_id);
537 	mutex_unlock(&mpam_list_lock);
538 	if (err)
539 		mpam_free_garbage();
540 
541 	return err;
542 }
543 
544 static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc,
545 						   u8 ris_idx)
546 {
547 	int err;
548 	struct mpam_msc_ris *ris;
549 
550 	lockdep_assert_held(&mpam_list_lock);
551 
552 	if (!test_bit(ris_idx, &msc->ris_idxs)) {
553 		err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN,
554 					     0, 0);
555 		if (err)
556 			return ERR_PTR(err);
557 	}
558 
559 	list_for_each_entry(ris, &msc->ris, msc_list) {
560 		if (ris->ris_idx == ris_idx)
561 			return ris;
562 	}
563 
564 	return ERR_PTR(-ENOENT);
565 }
566 
567 /*
568  * IHI009A.a has this nugget: "If a monitor does not support automatic behaviour
569  * of NRDY, software can use this bit for any purpose" - so hardware might not
570  * implement this - but it isn't RES0.
571  *
572  * Try and see what values stick in this bit. If we can write either value,
573  * its probably not implemented by hardware.
574  */
575 static bool _mpam_ris_hw_probe_hw_nrdy(struct mpam_msc_ris *ris, u32 mon_reg)
576 {
577 	u32 now;
578 	u64 mon_sel;
579 	bool can_set, can_clear;
580 	struct mpam_msc *msc = ris->vmsc->msc;
581 
582 	if (WARN_ON_ONCE(!mpam_mon_sel_lock(msc)))
583 		return false;
584 
585 	mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, 0) |
586 		  FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx);
587 	_mpam_write_monsel_reg(msc, mon_reg, mon_sel);
588 
589 	_mpam_write_monsel_reg(msc, mon_reg, MSMON___NRDY);
590 	now = _mpam_read_monsel_reg(msc, mon_reg);
591 	can_set = now & MSMON___NRDY;
592 
593 	_mpam_write_monsel_reg(msc, mon_reg, 0);
594 	now = _mpam_read_monsel_reg(msc, mon_reg);
595 	can_clear = !(now & MSMON___NRDY);
596 	mpam_mon_sel_unlock(msc);
597 
598 	return (!can_set || !can_clear);
599 }
600 
601 #define mpam_ris_hw_probe_hw_nrdy(_ris, _mon_reg)			\
602 	_mpam_ris_hw_probe_hw_nrdy(_ris, MSMON_##_mon_reg)
603 
604 static void mpam_ris_hw_probe(struct mpam_msc_ris *ris)
605 {
606 	int err;
607 	struct mpam_msc *msc = ris->vmsc->msc;
608 	struct device *dev = &msc->pdev->dev;
609 	struct mpam_props *props = &ris->props;
610 
611 	lockdep_assert_held(&msc->probe_lock);
612 	lockdep_assert_held(&msc->part_sel_lock);
613 
614 	/* Cache Portion partitioning */
615 	if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) {
616 		u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR);
617 
618 		props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features);
619 		if (props->cpbm_wd)
620 			mpam_set_feature(mpam_feat_cpor_part, props);
621 	}
622 
623 	/* Memory bandwidth partitioning */
624 	if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) {
625 		u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR);
626 
627 		/* portion bitmap resolution */
628 		props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features);
629 		if (props->mbw_pbm_bits &&
630 		    FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features))
631 			mpam_set_feature(mpam_feat_mbw_part, props);
632 
633 		props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features);
634 		if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features))
635 			mpam_set_feature(mpam_feat_mbw_max, props);
636 	}
637 
638 	/* Performance Monitoring */
639 	if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) {
640 		u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR);
641 
642 		/*
643 		 * If the firmware max-nrdy-us property is missing, the
644 		 * CSU counters can't be used. Should we wait forever?
645 		 */
646 		err = device_property_read_u32(&msc->pdev->dev,
647 					       "arm,not-ready-us",
648 					       &msc->nrdy_usec);
649 
650 		if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) {
651 			u32 csumonidr;
652 
653 			csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR);
654 			props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr);
655 			if (props->num_csu_mon) {
656 				bool hw_managed;
657 
658 				mpam_set_feature(mpam_feat_msmon_csu, props);
659 
660 				/* Is NRDY hardware managed? */
661 				hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, CSU);
662 				if (hw_managed)
663 					mpam_set_feature(mpam_feat_msmon_csu_hw_nrdy, props);
664 			}
665 
666 			/*
667 			 * Accept the missing firmware property if NRDY appears
668 			 * un-implemented.
669 			 */
670 			if (err && mpam_has_feature(mpam_feat_msmon_csu_hw_nrdy, props))
671 				dev_err_once(dev, "Counters are not usable because not-ready timeout was not provided by firmware.");
672 		}
673 		if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) {
674 			bool hw_managed;
675 			u32 mbwumon_idr = mpam_read_partsel_reg(msc, MBWUMON_IDR);
676 
677 			props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumon_idr);
678 			if (props->num_mbwu_mon)
679 				mpam_set_feature(mpam_feat_msmon_mbwu, props);
680 
681 			/* Is NRDY hardware managed? */
682 			hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, MBWU);
683 			if (hw_managed)
684 				mpam_set_feature(mpam_feat_msmon_mbwu_hw_nrdy, props);
685 
686 			/*
687 			 * Don't warn about any missing firmware property for
688 			 * MBWU NRDY - it doesn't make any sense!
689 			 */
690 		}
691 	}
692 }
693 
694 static int mpam_msc_hw_probe(struct mpam_msc *msc)
695 {
696 	u64 idr;
697 	u16 partid_max;
698 	u8 ris_idx, pmg_max;
699 	struct mpam_msc_ris *ris;
700 	struct device *dev = &msc->pdev->dev;
701 
702 	lockdep_assert_held(&msc->probe_lock);
703 
704 	idr = __mpam_read_reg(msc, MPAMF_AIDR);
705 	if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) {
706 		dev_err_once(dev, "MSC does not match MPAM architecture v1.x\n");
707 		return -EIO;
708 	}
709 
710 	/* Grab an IDR value to find out how many RIS there are */
711 	mutex_lock(&msc->part_sel_lock);
712 	idr = mpam_msc_read_idr(msc);
713 	mutex_unlock(&msc->part_sel_lock);
714 
715 	msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr);
716 
717 	/* Use these values so partid/pmg always starts with a valid value */
718 	msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr);
719 	msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr);
720 
721 	for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) {
722 		mutex_lock(&msc->part_sel_lock);
723 		__mpam_part_sel(ris_idx, 0, msc);
724 		idr = mpam_msc_read_idr(msc);
725 		mutex_unlock(&msc->part_sel_lock);
726 
727 		partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr);
728 		pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr);
729 		msc->partid_max = min(msc->partid_max, partid_max);
730 		msc->pmg_max = min(msc->pmg_max, pmg_max);
731 
732 		mutex_lock(&mpam_list_lock);
733 		ris = mpam_get_or_create_ris(msc, ris_idx);
734 		mutex_unlock(&mpam_list_lock);
735 		if (IS_ERR(ris))
736 			return PTR_ERR(ris);
737 		ris->idr = idr;
738 
739 		mutex_lock(&msc->part_sel_lock);
740 		__mpam_part_sel(ris_idx, 0, msc);
741 		mpam_ris_hw_probe(ris);
742 		mutex_unlock(&msc->part_sel_lock);
743 	}
744 
745 	spin_lock(&partid_max_lock);
746 	mpam_partid_max = min(mpam_partid_max, msc->partid_max);
747 	mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max);
748 	spin_unlock(&partid_max_lock);
749 
750 	msc->probed = true;
751 
752 	return 0;
753 }
754 
755 static int mpam_cpu_online(unsigned int cpu)
756 {
757 	return 0;
758 }
759 
760 /* Before mpam is enabled, try to probe new MSC */
761 static int mpam_discovery_cpu_online(unsigned int cpu)
762 {
763 	int err = 0;
764 	struct mpam_msc *msc;
765 	bool new_device_probed = false;
766 
767 	guard(srcu)(&mpam_srcu);
768 	list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list,
769 				 srcu_read_lock_held(&mpam_srcu)) {
770 		if (!cpumask_test_cpu(cpu, &msc->accessibility))
771 			continue;
772 
773 		mutex_lock(&msc->probe_lock);
774 		if (!msc->probed)
775 			err = mpam_msc_hw_probe(msc);
776 		mutex_unlock(&msc->probe_lock);
777 
778 		if (err)
779 			break;
780 		new_device_probed = true;
781 	}
782 
783 	if (new_device_probed && !err)
784 		schedule_work(&mpam_enable_work);
785 	if (err) {
786 		mpam_disable_reason = "error during probing";
787 		schedule_work(&mpam_broken_work);
788 	}
789 
790 	return err;
791 }
792 
793 static int mpam_cpu_offline(unsigned int cpu)
794 {
795 	return 0;
796 }
797 
798 static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online),
799 					  int (*offline)(unsigned int offline),
800 					  char *name)
801 {
802 	mutex_lock(&mpam_cpuhp_state_lock);
803 	if (mpam_cpuhp_state) {
804 		cpuhp_remove_state(mpam_cpuhp_state);
805 		mpam_cpuhp_state = 0;
806 	}
807 
808 	mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, name, online,
809 					     offline);
810 	if (mpam_cpuhp_state <= 0) {
811 		pr_err("Failed to register cpuhp callbacks");
812 		mpam_cpuhp_state = 0;
813 	}
814 	mutex_unlock(&mpam_cpuhp_state_lock);
815 }
816 
817 /*
818  * An MSC can control traffic from a set of CPUs, but may only be accessible
819  * from a (hopefully wider) set of CPUs. The common reason for this is power
820  * management. If all the CPUs in a cluster are in PSCI:CPU_SUSPEND, the
821  * corresponding cache may also be powered off. By making accesses from
822  * one of those CPUs, we ensure we don't access a cache that's powered off.
823  */
824 static void update_msc_accessibility(struct mpam_msc *msc)
825 {
826 	u32 affinity_id;
827 	int err;
828 
829 	err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity",
830 				       &affinity_id);
831 	if (err)
832 		cpumask_copy(&msc->accessibility, cpu_possible_mask);
833 	else
834 		acpi_pptt_get_cpus_from_container(affinity_id, &msc->accessibility);
835 }
836 
837 /*
838  * There are two ways of reaching a struct mpam_msc_ris. Via the
839  * class->component->vmsc->ris, or via the msc.
840  * When destroying the msc, the other side needs unlinking and cleaning up too.
841  */
842 static void mpam_msc_destroy(struct mpam_msc *msc)
843 {
844 	struct platform_device *pdev = msc->pdev;
845 	struct mpam_msc_ris *ris, *tmp;
846 
847 	lockdep_assert_held(&mpam_list_lock);
848 
849 	list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list)
850 		mpam_ris_destroy(ris);
851 
852 	list_del_rcu(&msc->all_msc_list);
853 	platform_set_drvdata(pdev, NULL);
854 
855 	add_to_garbage(msc);
856 }
857 
858 static void mpam_msc_drv_remove(struct platform_device *pdev)
859 {
860 	struct mpam_msc *msc = platform_get_drvdata(pdev);
861 
862 	mutex_lock(&mpam_list_lock);
863 	mpam_msc_destroy(msc);
864 	mutex_unlock(&mpam_list_lock);
865 
866 	mpam_free_garbage();
867 }
868 
869 static struct mpam_msc *do_mpam_msc_drv_probe(struct platform_device *pdev)
870 {
871 	int err;
872 	u32 tmp;
873 	struct mpam_msc *msc;
874 	struct resource *msc_res;
875 	struct device *dev = &pdev->dev;
876 
877 	lockdep_assert_held(&mpam_list_lock);
878 
879 	msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL);
880 	if (!msc)
881 		return ERR_PTR(-ENOMEM);
882 	init_garbage(&msc->garbage);
883 	msc->garbage.pdev = pdev;
884 
885 	err = devm_mutex_init(dev, &msc->probe_lock);
886 	if (err)
887 		return ERR_PTR(err);
888 
889 	err = devm_mutex_init(dev, &msc->part_sel_lock);
890 	if (err)
891 		return ERR_PTR(err);
892 
893 	mpam_mon_sel_lock_init(msc);
894 	msc->id = pdev->id;
895 	msc->pdev = pdev;
896 	INIT_LIST_HEAD_RCU(&msc->all_msc_list);
897 	INIT_LIST_HEAD_RCU(&msc->ris);
898 
899 	update_msc_accessibility(msc);
900 	if (cpumask_empty(&msc->accessibility)) {
901 		dev_err_once(dev, "MSC is not accessible from any CPU!");
902 		return ERR_PTR(-EINVAL);
903 	}
904 
905 	if (device_property_read_u32(&pdev->dev, "pcc-channel", &tmp))
906 		msc->iface = MPAM_IFACE_MMIO;
907 	else
908 		msc->iface = MPAM_IFACE_PCC;
909 
910 	if (msc->iface == MPAM_IFACE_MMIO) {
911 		void __iomem *io;
912 
913 		io = devm_platform_get_and_ioremap_resource(pdev, 0,
914 							    &msc_res);
915 		if (IS_ERR(io)) {
916 			dev_err_once(dev, "Failed to map MSC base address\n");
917 			return ERR_CAST(io);
918 		}
919 		msc->mapped_hwpage_sz = msc_res->end - msc_res->start;
920 		msc->mapped_hwpage = io;
921 	} else {
922 		return ERR_PTR(-EINVAL);
923 	}
924 
925 	list_add_rcu(&msc->all_msc_list, &mpam_all_msc);
926 	platform_set_drvdata(pdev, msc);
927 
928 	return msc;
929 }
930 
931 static int fw_num_msc;
932 
933 static int mpam_msc_drv_probe(struct platform_device *pdev)
934 {
935 	int err;
936 	struct mpam_msc *msc = NULL;
937 	void *plat_data = pdev->dev.platform_data;
938 
939 	mutex_lock(&mpam_list_lock);
940 	msc = do_mpam_msc_drv_probe(pdev);
941 	mutex_unlock(&mpam_list_lock);
942 
943 	if (IS_ERR(msc))
944 		return PTR_ERR(msc);
945 
946 	/* Create RIS entries described by firmware */
947 	err = acpi_mpam_parse_resources(msc, plat_data);
948 	if (err) {
949 		mpam_msc_drv_remove(pdev);
950 		return err;
951 	}
952 
953 	if (atomic_add_return(1, &mpam_num_msc) == fw_num_msc)
954 		mpam_register_cpuhp_callbacks(mpam_discovery_cpu_online, NULL,
955 					      "mpam:drv_probe");
956 
957 	return 0;
958 }
959 
960 static struct platform_driver mpam_msc_driver = {
961 	.driver = {
962 		.name = "mpam_msc",
963 	},
964 	.probe = mpam_msc_drv_probe,
965 	.remove = mpam_msc_drv_remove,
966 };
967 
968 /* Any of these features mean the BWA_WD field is valid. */
969 static bool mpam_has_bwa_wd_feature(struct mpam_props *props)
970 {
971 	if (mpam_has_feature(mpam_feat_mbw_min, props))
972 		return true;
973 	if (mpam_has_feature(mpam_feat_mbw_max, props))
974 		return true;
975 	return false;
976 }
977 
978 #define MISMATCHED_HELPER(parent, child, helper, field, alias)		\
979 	helper(parent) &&						\
980 	((helper(child) && (parent)->field != (child)->field) ||	\
981 	 (!helper(child) && !(alias)))
982 
983 #define MISMATCHED_FEAT(parent, child, feat, field, alias)		     \
984 	mpam_has_feature((feat), (parent)) &&				     \
985 	((mpam_has_feature((feat), (child)) && (parent)->field != (child)->field) || \
986 	 (!mpam_has_feature((feat), (child)) && !(alias)))
987 
988 #define CAN_MERGE_FEAT(parent, child, feat, alias)			\
989 	(alias) && !mpam_has_feature((feat), (parent)) &&		\
990 	mpam_has_feature((feat), (child))
991 
992 /*
993  * Combine two props fields.
994  * If this is for controls that alias the same resource, it is safe to just
995  * copy the values over. If two aliasing controls implement the same scheme
996  * a safe value must be picked.
997  * For non-aliasing controls, these control different resources, and the
998  * resulting safe value must be compatible with both. When merging values in
999  * the tree, all the aliasing resources must be handled first.
1000  * On mismatch, parent is modified.
1001  */
1002 static void __props_mismatch(struct mpam_props *parent,
1003 			     struct mpam_props *child, bool alias)
1004 {
1005 	if (CAN_MERGE_FEAT(parent, child, mpam_feat_cpor_part, alias)) {
1006 		parent->cpbm_wd = child->cpbm_wd;
1007 	} else if (MISMATCHED_FEAT(parent, child, mpam_feat_cpor_part,
1008 				   cpbm_wd, alias)) {
1009 		pr_debug("cleared cpor_part\n");
1010 		mpam_clear_feature(mpam_feat_cpor_part, parent);
1011 		parent->cpbm_wd = 0;
1012 	}
1013 
1014 	if (CAN_MERGE_FEAT(parent, child, mpam_feat_mbw_part, alias)) {
1015 		parent->mbw_pbm_bits = child->mbw_pbm_bits;
1016 	} else if (MISMATCHED_FEAT(parent, child, mpam_feat_mbw_part,
1017 				   mbw_pbm_bits, alias)) {
1018 		pr_debug("cleared mbw_part\n");
1019 		mpam_clear_feature(mpam_feat_mbw_part, parent);
1020 		parent->mbw_pbm_bits = 0;
1021 	}
1022 
1023 	/* bwa_wd is a count of bits, fewer bits means less precision */
1024 	if (alias && !mpam_has_bwa_wd_feature(parent) &&
1025 	    mpam_has_bwa_wd_feature(child)) {
1026 		parent->bwa_wd = child->bwa_wd;
1027 	} else if (MISMATCHED_HELPER(parent, child, mpam_has_bwa_wd_feature,
1028 				     bwa_wd, alias)) {
1029 		pr_debug("took the min bwa_wd\n");
1030 		parent->bwa_wd = min(parent->bwa_wd, child->bwa_wd);
1031 	}
1032 
1033 	/* For num properties, take the minimum */
1034 	if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_csu, alias)) {
1035 		parent->num_csu_mon = child->num_csu_mon;
1036 	} else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_csu,
1037 				   num_csu_mon, alias)) {
1038 		pr_debug("took the min num_csu_mon\n");
1039 		parent->num_csu_mon = min(parent->num_csu_mon,
1040 					  child->num_csu_mon);
1041 	}
1042 
1043 	if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_mbwu, alias)) {
1044 		parent->num_mbwu_mon = child->num_mbwu_mon;
1045 	} else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_mbwu,
1046 				   num_mbwu_mon, alias)) {
1047 		pr_debug("took the min num_mbwu_mon\n");
1048 		parent->num_mbwu_mon = min(parent->num_mbwu_mon,
1049 					   child->num_mbwu_mon);
1050 	}
1051 
1052 	if (alias) {
1053 		/* Merge features for aliased resources */
1054 		bitmap_or(parent->features, parent->features, child->features, MPAM_FEATURE_LAST);
1055 	} else {
1056 		/* Clear missing features for non aliasing */
1057 		bitmap_and(parent->features, parent->features, child->features, MPAM_FEATURE_LAST);
1058 	}
1059 }
1060 
1061 /*
1062  * If a vmsc doesn't match class feature/configuration, do the right thing(tm).
1063  * For 'num' properties we can just take the minimum.
1064  * For properties where the mismatched unused bits would make a difference, we
1065  * nobble the class feature, as we can't configure all the resources.
1066  * e.g. The L3 cache is composed of two resources with 13 and 17 portion
1067  * bitmaps respectively.
1068  */
1069 static void
1070 __class_props_mismatch(struct mpam_class *class, struct mpam_vmsc *vmsc)
1071 {
1072 	struct mpam_props *cprops = &class->props;
1073 	struct mpam_props *vprops = &vmsc->props;
1074 	struct device *dev = &vmsc->msc->pdev->dev;
1075 
1076 	lockdep_assert_held(&mpam_list_lock); /* we modify class */
1077 
1078 	dev_dbg(dev, "Merging features for class:0x%lx &= vmsc:0x%lx\n",
1079 		(long)cprops->features, (long)vprops->features);
1080 
1081 	/* Take the safe value for any common features */
1082 	__props_mismatch(cprops, vprops, false);
1083 }
1084 
1085 static void
1086 __vmsc_props_mismatch(struct mpam_vmsc *vmsc, struct mpam_msc_ris *ris)
1087 {
1088 	struct mpam_props *rprops = &ris->props;
1089 	struct mpam_props *vprops = &vmsc->props;
1090 	struct device *dev = &vmsc->msc->pdev->dev;
1091 
1092 	lockdep_assert_held(&mpam_list_lock); /* we modify vmsc */
1093 
1094 	dev_dbg(dev, "Merging features for vmsc:0x%lx |= ris:0x%lx\n",
1095 		(long)vprops->features, (long)rprops->features);
1096 
1097 	/*
1098 	 * Merge mismatched features - Copy any features that aren't common,
1099 	 * but take the safe value for any common features.
1100 	 */
1101 	__props_mismatch(vprops, rprops, true);
1102 }
1103 
1104 /*
1105  * Copy the first component's first vMSC's properties and features to the
1106  * class. __class_props_mismatch() will remove conflicts.
1107  * It is not possible to have a class with no components, or a component with
1108  * no resources. The vMSC properties have already been built.
1109  */
1110 static void mpam_enable_init_class_features(struct mpam_class *class)
1111 {
1112 	struct mpam_vmsc *vmsc;
1113 	struct mpam_component *comp;
1114 
1115 	comp = list_first_entry(&class->components,
1116 				struct mpam_component, class_list);
1117 	vmsc = list_first_entry(&comp->vmsc,
1118 				struct mpam_vmsc, comp_list);
1119 
1120 	class->props = vmsc->props;
1121 }
1122 
1123 static void mpam_enable_merge_vmsc_features(struct mpam_component *comp)
1124 {
1125 	struct mpam_vmsc *vmsc;
1126 	struct mpam_msc_ris *ris;
1127 	struct mpam_class *class = comp->class;
1128 
1129 	list_for_each_entry(vmsc, &comp->vmsc, comp_list) {
1130 		list_for_each_entry(ris, &vmsc->ris, vmsc_list) {
1131 			__vmsc_props_mismatch(vmsc, ris);
1132 			class->nrdy_usec = max(class->nrdy_usec,
1133 					       vmsc->msc->nrdy_usec);
1134 		}
1135 	}
1136 }
1137 
1138 static void mpam_enable_merge_class_features(struct mpam_component *comp)
1139 {
1140 	struct mpam_vmsc *vmsc;
1141 	struct mpam_class *class = comp->class;
1142 
1143 	list_for_each_entry(vmsc, &comp->vmsc, comp_list)
1144 		__class_props_mismatch(class, vmsc);
1145 }
1146 
1147 /*
1148  * Merge all the common resource features into class.
1149  * vmsc features are bitwise-or'd together by mpam_enable_merge_vmsc_features()
1150  * as the first step so that mpam_enable_init_class_features() can initialise
1151  * the class with a representative set of features.
1152  * Next the mpam_enable_merge_class_features() bitwise-and's all the vmsc
1153  * features to form the class features.
1154  * Other features are the min/max as appropriate.
1155  *
1156  * To avoid walking the whole tree twice, the class->nrdy_usec property is
1157  * updated when working with the vmsc as it is a max(), and doesn't need
1158  * initialising first.
1159  */
1160 static void mpam_enable_merge_features(struct list_head *all_classes_list)
1161 {
1162 	struct mpam_class *class;
1163 	struct mpam_component *comp;
1164 
1165 	lockdep_assert_held(&mpam_list_lock);
1166 
1167 	list_for_each_entry(class, all_classes_list, classes_list) {
1168 		list_for_each_entry(comp, &class->components, class_list)
1169 			mpam_enable_merge_vmsc_features(comp);
1170 
1171 		mpam_enable_init_class_features(class);
1172 
1173 		list_for_each_entry(comp, &class->components, class_list)
1174 			mpam_enable_merge_class_features(comp);
1175 	}
1176 }
1177 
1178 static void mpam_enable_once(void)
1179 {
1180 	/*
1181 	 * Once the cpuhp callbacks have been changed, mpam_partid_max can no
1182 	 * longer change.
1183 	 */
1184 	spin_lock(&partid_max_lock);
1185 	partid_max_published = true;
1186 	spin_unlock(&partid_max_lock);
1187 
1188 	mutex_lock(&mpam_list_lock);
1189 	mpam_enable_merge_features(&mpam_classes);
1190 	mutex_unlock(&mpam_list_lock);
1191 
1192 	mpam_register_cpuhp_callbacks(mpam_cpu_online, mpam_cpu_offline,
1193 				      "mpam:online");
1194 
1195 	/* Use printk() to avoid the pr_fmt adding the function name. */
1196 	printk(KERN_INFO "MPAM enabled with %u PARTIDs and %u PMGs\n",
1197 	       mpam_partid_max + 1, mpam_pmg_max + 1);
1198 }
1199 
1200 void mpam_disable(struct work_struct *ignored)
1201 {
1202 	struct mpam_msc *msc, *tmp;
1203 
1204 	mutex_lock(&mpam_cpuhp_state_lock);
1205 	if (mpam_cpuhp_state) {
1206 		cpuhp_remove_state(mpam_cpuhp_state);
1207 		mpam_cpuhp_state = 0;
1208 	}
1209 	mutex_unlock(&mpam_cpuhp_state_lock);
1210 
1211 	mutex_lock(&mpam_list_lock);
1212 	list_for_each_entry_safe(msc, tmp, &mpam_all_msc, all_msc_list)
1213 		mpam_msc_destroy(msc);
1214 	mutex_unlock(&mpam_list_lock);
1215 	mpam_free_garbage();
1216 
1217 	pr_err_once("MPAM disabled due to %s\n", mpam_disable_reason);
1218 }
1219 
1220 /*
1221  * Enable mpam once all devices have been probed.
1222  * Scheduled by mpam_discovery_cpu_online() once all devices have been created.
1223  * Also scheduled when new devices are probed when new CPUs come online.
1224  */
1225 void mpam_enable(struct work_struct *work)
1226 {
1227 	static atomic_t once;
1228 	struct mpam_msc *msc;
1229 	bool all_devices_probed = true;
1230 
1231 	/* Have we probed all the hw devices? */
1232 	guard(srcu)(&mpam_srcu);
1233 	list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list,
1234 				 srcu_read_lock_held(&mpam_srcu)) {
1235 		mutex_lock(&msc->probe_lock);
1236 		if (!msc->probed)
1237 			all_devices_probed = false;
1238 		mutex_unlock(&msc->probe_lock);
1239 
1240 		if (!all_devices_probed)
1241 			break;
1242 	}
1243 
1244 	if (all_devices_probed && !atomic_fetch_inc(&once))
1245 		mpam_enable_once();
1246 }
1247 
1248 static int __init mpam_msc_driver_init(void)
1249 {
1250 	if (!system_supports_mpam())
1251 		return -EOPNOTSUPP;
1252 
1253 	init_srcu_struct(&mpam_srcu);
1254 
1255 	fw_num_msc = acpi_mpam_count_msc();
1256 	if (fw_num_msc <= 0) {
1257 		pr_err("No MSC devices found in firmware\n");
1258 		return -EINVAL;
1259 	}
1260 
1261 	return platform_driver_register(&mpam_msc_driver);
1262 }
1263 
1264 /* Must occur after arm64_mpam_register_cpus() from arch_initcall() */
1265 subsys_initcall(mpam_msc_driver_init);
1266