xref: /linux/fs/resctrl/monitor.c (revision ad5c2ff75e0c53d2588dfc10eb87458e759b6bbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Monitoring code
5  *
6  * Copyright (C) 2017 Intel Corporation
7  *
8  * Author:
9  *    Vikas Shivappa <vikas.shivappa@intel.com>
10  *
11  * This replaces the cqm.c based on perf but we reuse a lot of
12  * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13  *
14  * More information about RDT be found in the Intel (R) x86 Architecture
15  * Software Developer Manual June 2016, volume 3, section 17.17.
16  */
17 
18 #define pr_fmt(fmt)	"resctrl: " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/resctrl.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 
25 #include "internal.h"
26 
27 #define CREATE_TRACE_POINTS
28 
29 #include "monitor_trace.h"
30 
31 /**
32  * struct rmid_entry - dirty tracking for all RMID.
33  * @closid:	The CLOSID for this entry.
34  * @rmid:	The RMID for this entry.
35  * @busy:	The number of domains with cached data using this RMID.
36  * @list:	Member of the rmid_free_lru list when busy == 0.
37  *
38  * Depending on the architecture the correct monitor is accessed using
39  * both @closid and @rmid, or @rmid only.
40  *
41  * Take the rdtgroup_mutex when accessing.
42  */
43 struct rmid_entry {
44 	u32				closid;
45 	u32				rmid;
46 	int				busy;
47 	struct list_head		list;
48 };
49 
50 /*
51  * @rmid_free_lru - A least recently used list of free RMIDs
52  *     These RMIDs are guaranteed to have an occupancy less than the
53  *     threshold occupancy
54  */
55 static LIST_HEAD(rmid_free_lru);
56 
57 /*
58  * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
59  *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
60  *     Indexed by CLOSID. Protected by rdtgroup_mutex.
61  */
62 static u32 *closid_num_dirty_rmid;
63 
64 /*
65  * @rmid_limbo_count - count of currently unused but (potentially)
66  *     dirty RMIDs.
67  *     This counts RMIDs that no one is currently using but that
68  *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
69  *     change the threshold occupancy value.
70  */
71 static unsigned int rmid_limbo_count;
72 
73 /*
74  * @rmid_entry - The entry in the limbo and free lists.
75  */
76 static struct rmid_entry	*rmid_ptrs;
77 
78 /*
79  * This is the threshold cache occupancy in bytes at which we will consider an
80  * RMID available for re-allocation.
81  */
82 unsigned int resctrl_rmid_realloc_threshold;
83 
84 /*
85  * This is the maximum value for the reallocation threshold, in bytes.
86  */
87 unsigned int resctrl_rmid_realloc_limit;
88 
89 /*
90  * x86 and arm64 differ in their handling of monitoring.
91  * x86's RMID are independent numbers, there is only one source of traffic
92  * with an RMID value of '1'.
93  * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
94  * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
95  * value is no longer unique.
96  * To account for this, resctrl uses an index. On x86 this is just the RMID,
97  * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
98  *
99  * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
100  * must accept an attempt to read every index.
101  */
102 static inline struct rmid_entry *__rmid_entry(u32 idx)
103 {
104 	struct rmid_entry *entry;
105 	u32 closid, rmid;
106 
107 	entry = &rmid_ptrs[idx];
108 	resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
109 
110 	WARN_ON_ONCE(entry->closid != closid);
111 	WARN_ON_ONCE(entry->rmid != rmid);
112 
113 	return entry;
114 }
115 
116 static void limbo_release_entry(struct rmid_entry *entry)
117 {
118 	lockdep_assert_held(&rdtgroup_mutex);
119 
120 	rmid_limbo_count--;
121 	list_add_tail(&entry->list, &rmid_free_lru);
122 
123 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
124 		closid_num_dirty_rmid[entry->closid]--;
125 }
126 
127 /*
128  * Check the RMIDs that are marked as busy for this domain. If the
129  * reported LLC occupancy is below the threshold clear the busy bit and
130  * decrement the count. If the busy count gets to zero on an RMID, we
131  * free the RMID
132  */
133 void __check_limbo(struct rdt_mon_domain *d, bool force_free)
134 {
135 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
136 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
137 	struct rmid_entry *entry;
138 	u32 idx, cur_idx = 1;
139 	void *arch_mon_ctx;
140 	bool rmid_dirty;
141 	u64 val = 0;
142 
143 	arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
144 	if (IS_ERR(arch_mon_ctx)) {
145 		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
146 				    PTR_ERR(arch_mon_ctx));
147 		return;
148 	}
149 
150 	/*
151 	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
152 	 * are marked as busy for occupancy < threshold. If the occupancy
153 	 * is less than the threshold decrement the busy counter of the
154 	 * RMID and move it to the free list when the counter reaches 0.
155 	 */
156 	for (;;) {
157 		idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
158 		if (idx >= idx_limit)
159 			break;
160 
161 		entry = __rmid_entry(idx);
162 		if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
163 					   QOS_L3_OCCUP_EVENT_ID, &val,
164 					   arch_mon_ctx)) {
165 			rmid_dirty = true;
166 		} else {
167 			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
168 
169 			/*
170 			 * x86's CLOSID and RMID are independent numbers, so the entry's
171 			 * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
172 			 * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
173 			 * used to select the configuration. It is thus necessary to track both
174 			 * CLOSID and RMID because there may be dependencies between them
175 			 * on some architectures.
176 			 */
177 			trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
178 		}
179 
180 		if (force_free || !rmid_dirty) {
181 			clear_bit(idx, d->rmid_busy_llc);
182 			if (!--entry->busy)
183 				limbo_release_entry(entry);
184 		}
185 		cur_idx = idx + 1;
186 	}
187 
188 	resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
189 }
190 
191 bool has_busy_rmid(struct rdt_mon_domain *d)
192 {
193 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
194 
195 	return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
196 }
197 
198 static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
199 {
200 	struct rmid_entry *itr;
201 	u32 itr_idx, cmp_idx;
202 
203 	if (list_empty(&rmid_free_lru))
204 		return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
205 
206 	list_for_each_entry(itr, &rmid_free_lru, list) {
207 		/*
208 		 * Get the index of this free RMID, and the index it would need
209 		 * to be if it were used with this CLOSID.
210 		 * If the CLOSID is irrelevant on this architecture, the two
211 		 * index values are always the same on every entry and thus the
212 		 * very first entry will be returned.
213 		 */
214 		itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
215 		cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
216 
217 		if (itr_idx == cmp_idx)
218 			return itr;
219 	}
220 
221 	return ERR_PTR(-ENOSPC);
222 }
223 
224 /**
225  * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
226  *                                  RMID are clean, or the CLOSID that has
227  *                                  the most clean RMID.
228  *
229  * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
230  * may not be able to allocate clean RMID. To avoid this the allocator will
231  * choose the CLOSID with the most clean RMID.
232  *
233  * When the CLOSID and RMID are independent numbers, the first free CLOSID will
234  * be returned.
235  */
236 int resctrl_find_cleanest_closid(void)
237 {
238 	u32 cleanest_closid = ~0;
239 	int i = 0;
240 
241 	lockdep_assert_held(&rdtgroup_mutex);
242 
243 	if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
244 		return -EIO;
245 
246 	for (i = 0; i < closids_supported(); i++) {
247 		int num_dirty;
248 
249 		if (closid_allocated(i))
250 			continue;
251 
252 		num_dirty = closid_num_dirty_rmid[i];
253 		if (num_dirty == 0)
254 			return i;
255 
256 		if (cleanest_closid == ~0)
257 			cleanest_closid = i;
258 
259 		if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
260 			cleanest_closid = i;
261 	}
262 
263 	if (cleanest_closid == ~0)
264 		return -ENOSPC;
265 
266 	return cleanest_closid;
267 }
268 
269 /*
270  * For MPAM the RMID value is not unique, and has to be considered with
271  * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
272  * allows all domains to be managed by a single free list.
273  * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
274  */
275 int alloc_rmid(u32 closid)
276 {
277 	struct rmid_entry *entry;
278 
279 	lockdep_assert_held(&rdtgroup_mutex);
280 
281 	entry = resctrl_find_free_rmid(closid);
282 	if (IS_ERR(entry))
283 		return PTR_ERR(entry);
284 
285 	list_del(&entry->list);
286 	return entry->rmid;
287 }
288 
289 static void add_rmid_to_limbo(struct rmid_entry *entry)
290 {
291 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
292 	struct rdt_mon_domain *d;
293 	u32 idx;
294 
295 	lockdep_assert_held(&rdtgroup_mutex);
296 
297 	/* Walking r->domains, ensure it can't race with cpuhp */
298 	lockdep_assert_cpus_held();
299 
300 	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
301 
302 	entry->busy = 0;
303 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
304 		/*
305 		 * For the first limbo RMID in the domain,
306 		 * setup up the limbo worker.
307 		 */
308 		if (!has_busy_rmid(d))
309 			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
310 						RESCTRL_PICK_ANY_CPU);
311 		set_bit(idx, d->rmid_busy_llc);
312 		entry->busy++;
313 	}
314 
315 	rmid_limbo_count++;
316 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
317 		closid_num_dirty_rmid[entry->closid]++;
318 }
319 
320 void free_rmid(u32 closid, u32 rmid)
321 {
322 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
323 	struct rmid_entry *entry;
324 
325 	lockdep_assert_held(&rdtgroup_mutex);
326 
327 	/*
328 	 * Do not allow the default rmid to be free'd. Comparing by index
329 	 * allows architectures that ignore the closid parameter to avoid an
330 	 * unnecessary check.
331 	 */
332 	if (!resctrl_arch_mon_capable() ||
333 	    idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
334 						RESCTRL_RESERVED_RMID))
335 		return;
336 
337 	entry = __rmid_entry(idx);
338 
339 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
340 		add_rmid_to_limbo(entry);
341 	else
342 		list_add_tail(&entry->list, &rmid_free_lru);
343 }
344 
345 static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
346 				       u32 rmid, enum resctrl_event_id evtid)
347 {
348 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
349 	struct mbm_state *state;
350 
351 	if (!resctrl_is_mbm_event(evtid))
352 		return NULL;
353 
354 	state = d->mbm_states[MBM_STATE_IDX(evtid)];
355 
356 	return state ? &state[idx] : NULL;
357 }
358 
359 /*
360  * mbm_cntr_get() - Return the counter ID for the matching @evtid and @rdtgrp.
361  *
362  * Return:
363  * Valid counter ID on success, or -ENOENT on failure.
364  */
365 static int mbm_cntr_get(struct rdt_resource *r, struct rdt_mon_domain *d,
366 			struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
367 {
368 	int cntr_id;
369 
370 	if (!r->mon.mbm_cntr_assignable)
371 		return -ENOENT;
372 
373 	if (!resctrl_is_mbm_event(evtid))
374 		return -ENOENT;
375 
376 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
377 		if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp &&
378 		    d->cntr_cfg[cntr_id].evtid == evtid)
379 			return cntr_id;
380 	}
381 
382 	return -ENOENT;
383 }
384 
385 /*
386  * mbm_cntr_alloc() - Initialize and return a new counter ID in the domain @d.
387  * Caller must ensure that the specified event is not assigned already.
388  *
389  * Return:
390  * Valid counter ID on success, or -ENOSPC on failure.
391  */
392 static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_mon_domain *d,
393 			  struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
394 {
395 	int cntr_id;
396 
397 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
398 		if (!d->cntr_cfg[cntr_id].rdtgrp) {
399 			d->cntr_cfg[cntr_id].rdtgrp = rdtgrp;
400 			d->cntr_cfg[cntr_id].evtid = evtid;
401 			return cntr_id;
402 		}
403 	}
404 
405 	return -ENOSPC;
406 }
407 
408 /*
409  * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
410  */
411 static void mbm_cntr_free(struct rdt_mon_domain *d, int cntr_id)
412 {
413 	memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
414 }
415 
416 static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
417 {
418 	int cpu = smp_processor_id();
419 	u32 closid = rdtgrp->closid;
420 	u32 rmid = rdtgrp->mon.rmid;
421 	struct rdt_mon_domain *d;
422 	int cntr_id = -ENOENT;
423 	struct mbm_state *m;
424 	int err, ret;
425 	u64 tval = 0;
426 
427 	if (rr->is_mbm_cntr) {
428 		cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid);
429 		if (cntr_id < 0) {
430 			rr->err = -ENOENT;
431 			return -EINVAL;
432 		}
433 	}
434 
435 	if (rr->first) {
436 		if (rr->is_mbm_cntr)
437 			resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid);
438 		else
439 			resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
440 		m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
441 		if (m)
442 			memset(m, 0, sizeof(struct mbm_state));
443 		return 0;
444 	}
445 
446 	if (rr->d) {
447 		/* Reading a single domain, must be on a CPU in that domain. */
448 		if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
449 			return -EINVAL;
450 		if (rr->is_mbm_cntr)
451 			rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id,
452 							 rr->evtid, &tval);
453 		else
454 			rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
455 							 rr->evtid, &tval, rr->arch_mon_ctx);
456 		if (rr->err)
457 			return rr->err;
458 
459 		rr->val += tval;
460 
461 		return 0;
462 	}
463 
464 	/* Summing domains that share a cache, must be on a CPU for that cache. */
465 	if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
466 		return -EINVAL;
467 
468 	/*
469 	 * Legacy files must report the sum of an event across all
470 	 * domains that share the same L3 cache instance.
471 	 * Report success if a read from any domain succeeds, -EINVAL
472 	 * (translated to "Unavailable" for user space) if reading from
473 	 * all domains fail for any reason.
474 	 */
475 	ret = -EINVAL;
476 	list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
477 		if (d->ci_id != rr->ci->id)
478 			continue;
479 		if (rr->is_mbm_cntr)
480 			err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
481 						     rr->evtid, &tval);
482 		else
483 			err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
484 						     rr->evtid, &tval, rr->arch_mon_ctx);
485 		if (!err) {
486 			rr->val += tval;
487 			ret = 0;
488 		}
489 	}
490 
491 	if (ret)
492 		rr->err = ret;
493 
494 	return ret;
495 }
496 
497 static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
498 {
499 	switch (rr->r->rid) {
500 	case RDT_RESOURCE_L3:
501 		return __l3_mon_event_count(rdtgrp, rr);
502 	default:
503 		rr->err = -EINVAL;
504 		return -EINVAL;
505 	}
506 }
507 
508 /*
509  * mbm_bw_count() - Update bw count from values previously read by
510  *		    __mon_event_count().
511  * @rdtgrp:	resctrl group associated with the CLOSID and RMID to identify
512  *		the cached mbm_state.
513  * @rr:		The struct rmid_read populated by __mon_event_count().
514  *
515  * Supporting function to calculate the memory bandwidth
516  * and delta bandwidth in MBps. The chunks value previously read by
517  * __mon_event_count() is compared with the chunks value from the previous
518  * invocation. This must be called once per second to maintain values in MBps.
519  */
520 static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
521 {
522 	u64 cur_bw, bytes, cur_bytes;
523 	u32 closid = rdtgrp->closid;
524 	u32 rmid = rdtgrp->mon.rmid;
525 	struct mbm_state *m;
526 
527 	m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
528 	if (WARN_ON_ONCE(!m))
529 		return;
530 
531 	cur_bytes = rr->val;
532 	bytes = cur_bytes - m->prev_bw_bytes;
533 	m->prev_bw_bytes = cur_bytes;
534 
535 	cur_bw = bytes / SZ_1M;
536 
537 	m->prev_bw = cur_bw;
538 }
539 
540 /*
541  * This is scheduled by mon_event_read() to read the CQM/MBM counters
542  * on a domain.
543  */
544 void mon_event_count(void *info)
545 {
546 	struct rdtgroup *rdtgrp, *entry;
547 	struct rmid_read *rr = info;
548 	struct list_head *head;
549 	int ret;
550 
551 	rdtgrp = rr->rgrp;
552 
553 	ret = __mon_event_count(rdtgrp, rr);
554 
555 	/*
556 	 * For Ctrl groups read data from child monitor groups and
557 	 * add them together. Count events which are read successfully.
558 	 * Discard the rmid_read's reporting errors.
559 	 */
560 	head = &rdtgrp->mon.crdtgrp_list;
561 
562 	if (rdtgrp->type == RDTCTRL_GROUP) {
563 		list_for_each_entry(entry, head, mon.crdtgrp_list) {
564 			if (__mon_event_count(entry, rr) == 0)
565 				ret = 0;
566 		}
567 	}
568 
569 	/*
570 	 * __mon_event_count() calls for newly created monitor groups may
571 	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
572 	 * Discard error if any of the monitor event reads succeeded.
573 	 */
574 	if (ret == 0)
575 		rr->err = 0;
576 }
577 
578 static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
579 							struct rdt_resource *r)
580 {
581 	struct rdt_ctrl_domain *d;
582 
583 	lockdep_assert_cpus_held();
584 
585 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
586 		/* Find the domain that contains this CPU */
587 		if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
588 			return d;
589 	}
590 
591 	return NULL;
592 }
593 
594 /*
595  * Feedback loop for MBA software controller (mba_sc)
596  *
597  * mba_sc is a feedback loop where we periodically read MBM counters and
598  * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
599  * that:
600  *
601  *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
602  *
603  * This uses the MBM counters to measure the bandwidth and MBA throttle
604  * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
605  * fact that resctrl rdtgroups have both monitoring and control.
606  *
607  * The frequency of the checks is 1s and we just tag along the MBM overflow
608  * timer. Having 1s interval makes the calculation of bandwidth simpler.
609  *
610  * Although MBA's goal is to restrict the bandwidth to a maximum, there may
611  * be a need to increase the bandwidth to avoid unnecessarily restricting
612  * the L2 <-> L3 traffic.
613  *
614  * Since MBA controls the L2 external bandwidth where as MBM measures the
615  * L3 external bandwidth the following sequence could lead to such a
616  * situation.
617  *
618  * Consider an rdtgroup which had high L3 <-> memory traffic in initial
619  * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
620  * after some time rdtgroup has mostly L2 <-> L3 traffic.
621  *
622  * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
623  * throttle MSRs already have low percentage values.  To avoid
624  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
625  */
626 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
627 {
628 	u32 closid, rmid, cur_msr_val, new_msr_val;
629 	struct mbm_state *pmbm_data, *cmbm_data;
630 	struct rdt_ctrl_domain *dom_mba;
631 	enum resctrl_event_id evt_id;
632 	struct rdt_resource *r_mba;
633 	struct list_head *head;
634 	struct rdtgroup *entry;
635 	u32 cur_bw, user_bw;
636 
637 	r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
638 	evt_id = rgrp->mba_mbps_event;
639 
640 	closid = rgrp->closid;
641 	rmid = rgrp->mon.rmid;
642 	pmbm_data = get_mbm_state(dom_mbm, closid, rmid, evt_id);
643 	if (WARN_ON_ONCE(!pmbm_data))
644 		return;
645 
646 	dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
647 	if (!dom_mba) {
648 		pr_warn_once("Failure to get domain for MBA update\n");
649 		return;
650 	}
651 
652 	cur_bw = pmbm_data->prev_bw;
653 	user_bw = dom_mba->mbps_val[closid];
654 
655 	/* MBA resource doesn't support CDP */
656 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
657 
658 	/*
659 	 * For Ctrl groups read data from child monitor groups.
660 	 */
661 	head = &rgrp->mon.crdtgrp_list;
662 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
663 		cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id);
664 		if (WARN_ON_ONCE(!cmbm_data))
665 			return;
666 		cur_bw += cmbm_data->prev_bw;
667 	}
668 
669 	/*
670 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
671 	 * bandwidth step is the bandwidth granularity specified by the
672 	 * hardware.
673 	 * Always increase throttling if current bandwidth is above the
674 	 * target set by user.
675 	 * But avoid thrashing up and down on every poll by checking
676 	 * whether a decrease in throttling is likely to push the group
677 	 * back over target. E.g. if currently throttling to 30% of bandwidth
678 	 * on a system with 10% granularity steps, check whether moving to
679 	 * 40% would go past the limit by multiplying current bandwidth by
680 	 * "(30 + 10) / 30".
681 	 */
682 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
683 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
684 	} else if (cur_msr_val < MAX_MBA_BW &&
685 		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
686 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
687 	} else {
688 		return;
689 	}
690 
691 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
692 }
693 
694 static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
695 				 struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
696 {
697 	struct rmid_read rr = {0};
698 
699 	rr.r = r;
700 	rr.d = d;
701 	rr.evtid = evtid;
702 	if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
703 		rr.is_mbm_cntr = true;
704 	} else {
705 		rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
706 		if (IS_ERR(rr.arch_mon_ctx)) {
707 			pr_warn_ratelimited("Failed to allocate monitor context: %ld",
708 					    PTR_ERR(rr.arch_mon_ctx));
709 			return;
710 		}
711 	}
712 
713 	__mon_event_count(rdtgrp, &rr);
714 
715 	/*
716 	 * If the software controller is enabled, compute the
717 	 * bandwidth for this event id.
718 	 */
719 	if (is_mba_sc(NULL))
720 		mbm_bw_count(rdtgrp, &rr);
721 
722 	if (rr.arch_mon_ctx)
723 		resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
724 }
725 
726 static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
727 		       struct rdtgroup *rdtgrp)
728 {
729 	/*
730 	 * This is protected from concurrent reads from user as both
731 	 * the user and overflow handler hold the global mutex.
732 	 */
733 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
734 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
735 
736 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
737 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
738 }
739 
740 /*
741  * Handler to scan the limbo list and move the RMIDs
742  * to free list whose occupancy < threshold_occupancy.
743  */
744 void cqm_handle_limbo(struct work_struct *work)
745 {
746 	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
747 	struct rdt_mon_domain *d;
748 
749 	cpus_read_lock();
750 	mutex_lock(&rdtgroup_mutex);
751 
752 	d = container_of(work, struct rdt_mon_domain, cqm_limbo.work);
753 
754 	__check_limbo(d, false);
755 
756 	if (has_busy_rmid(d)) {
757 		d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
758 							   RESCTRL_PICK_ANY_CPU);
759 		schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
760 					 delay);
761 	}
762 
763 	mutex_unlock(&rdtgroup_mutex);
764 	cpus_read_unlock();
765 }
766 
767 /**
768  * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
769  *                             domain.
770  * @dom:           The domain the limbo handler should run for.
771  * @delay_ms:      How far in the future the handler should run.
772  * @exclude_cpu:   Which CPU the handler should not run on,
773  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
774  */
775 void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
776 			     int exclude_cpu)
777 {
778 	unsigned long delay = msecs_to_jiffies(delay_ms);
779 	int cpu;
780 
781 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
782 	dom->cqm_work_cpu = cpu;
783 
784 	if (cpu < nr_cpu_ids)
785 		schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
786 }
787 
788 void mbm_handle_overflow(struct work_struct *work)
789 {
790 	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
791 	struct rdtgroup *prgrp, *crgrp;
792 	struct rdt_mon_domain *d;
793 	struct list_head *head;
794 	struct rdt_resource *r;
795 
796 	cpus_read_lock();
797 	mutex_lock(&rdtgroup_mutex);
798 
799 	/*
800 	 * If the filesystem has been unmounted this work no longer needs to
801 	 * run.
802 	 */
803 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
804 		goto out_unlock;
805 
806 	r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
807 	d = container_of(work, struct rdt_mon_domain, mbm_over.work);
808 
809 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
810 		mbm_update(r, d, prgrp);
811 
812 		head = &prgrp->mon.crdtgrp_list;
813 		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
814 			mbm_update(r, d, crgrp);
815 
816 		if (is_mba_sc(NULL))
817 			update_mba_bw(prgrp, d);
818 	}
819 
820 	/*
821 	 * Re-check for housekeeping CPUs. This allows the overflow handler to
822 	 * move off a nohz_full CPU quickly.
823 	 */
824 	d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
825 						   RESCTRL_PICK_ANY_CPU);
826 	schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
827 
828 out_unlock:
829 	mutex_unlock(&rdtgroup_mutex);
830 	cpus_read_unlock();
831 }
832 
833 /**
834  * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
835  *                                domain.
836  * @dom:           The domain the overflow handler should run for.
837  * @delay_ms:      How far in the future the handler should run.
838  * @exclude_cpu:   Which CPU the handler should not run on,
839  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
840  */
841 void mbm_setup_overflow_handler(struct rdt_mon_domain *dom, unsigned long delay_ms,
842 				int exclude_cpu)
843 {
844 	unsigned long delay = msecs_to_jiffies(delay_ms);
845 	int cpu;
846 
847 	/*
848 	 * When a domain comes online there is no guarantee the filesystem is
849 	 * mounted. If not, there is no need to catch counter overflow.
850 	 */
851 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
852 		return;
853 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
854 	dom->mbm_work_cpu = cpu;
855 
856 	if (cpu < nr_cpu_ids)
857 		schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
858 }
859 
860 static int dom_data_init(struct rdt_resource *r)
861 {
862 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
863 	u32 num_closid = resctrl_arch_get_num_closid(r);
864 	struct rmid_entry *entry = NULL;
865 	int err = 0, i;
866 	u32 idx;
867 
868 	mutex_lock(&rdtgroup_mutex);
869 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
870 		u32 *tmp;
871 
872 		/*
873 		 * If the architecture hasn't provided a sanitised value here,
874 		 * this may result in larger arrays than necessary. Resctrl will
875 		 * use a smaller system wide value based on the resources in
876 		 * use.
877 		 */
878 		tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
879 		if (!tmp) {
880 			err = -ENOMEM;
881 			goto out_unlock;
882 		}
883 
884 		closid_num_dirty_rmid = tmp;
885 	}
886 
887 	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
888 	if (!rmid_ptrs) {
889 		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
890 			kfree(closid_num_dirty_rmid);
891 			closid_num_dirty_rmid = NULL;
892 		}
893 		err = -ENOMEM;
894 		goto out_unlock;
895 	}
896 
897 	for (i = 0; i < idx_limit; i++) {
898 		entry = &rmid_ptrs[i];
899 		INIT_LIST_HEAD(&entry->list);
900 
901 		resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
902 		list_add_tail(&entry->list, &rmid_free_lru);
903 	}
904 
905 	/*
906 	 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
907 	 * are always allocated. These are used for the rdtgroup_default
908 	 * control group, which will be setup later in resctrl_init().
909 	 */
910 	idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
911 					   RESCTRL_RESERVED_RMID);
912 	entry = __rmid_entry(idx);
913 	list_del(&entry->list);
914 
915 out_unlock:
916 	mutex_unlock(&rdtgroup_mutex);
917 
918 	return err;
919 }
920 
921 static void dom_data_exit(struct rdt_resource *r)
922 {
923 	mutex_lock(&rdtgroup_mutex);
924 
925 	if (!r->mon_capable)
926 		goto out_unlock;
927 
928 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
929 		kfree(closid_num_dirty_rmid);
930 		closid_num_dirty_rmid = NULL;
931 	}
932 
933 	kfree(rmid_ptrs);
934 	rmid_ptrs = NULL;
935 
936 out_unlock:
937 	mutex_unlock(&rdtgroup_mutex);
938 }
939 
940 /*
941  * All available events. Architecture code marks the ones that
942  * are supported by a system using resctrl_enable_mon_event()
943  * to set .enabled.
944  */
945 struct mon_evt mon_event_all[QOS_NUM_EVENTS] = {
946 	[QOS_L3_OCCUP_EVENT_ID] = {
947 		.name	= "llc_occupancy",
948 		.evtid	= QOS_L3_OCCUP_EVENT_ID,
949 		.rid	= RDT_RESOURCE_L3,
950 	},
951 	[QOS_L3_MBM_TOTAL_EVENT_ID] = {
952 		.name	= "mbm_total_bytes",
953 		.evtid	= QOS_L3_MBM_TOTAL_EVENT_ID,
954 		.rid	= RDT_RESOURCE_L3,
955 	},
956 	[QOS_L3_MBM_LOCAL_EVENT_ID] = {
957 		.name	= "mbm_local_bytes",
958 		.evtid	= QOS_L3_MBM_LOCAL_EVENT_ID,
959 		.rid	= RDT_RESOURCE_L3,
960 	},
961 };
962 
963 void resctrl_enable_mon_event(enum resctrl_event_id eventid)
964 {
965 	if (WARN_ON_ONCE(eventid < QOS_FIRST_EVENT || eventid >= QOS_NUM_EVENTS))
966 		return;
967 	if (mon_event_all[eventid].enabled) {
968 		pr_warn("Duplicate enable for event %d\n", eventid);
969 		return;
970 	}
971 
972 	mon_event_all[eventid].enabled = true;
973 }
974 
975 bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
976 {
977 	return eventid >= QOS_FIRST_EVENT && eventid < QOS_NUM_EVENTS &&
978 	       mon_event_all[eventid].enabled;
979 }
980 
981 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id evtid)
982 {
983 	return mon_event_all[evtid].evt_cfg;
984 }
985 
986 /**
987  * struct mbm_transaction - Memory transaction an MBM event can be configured with.
988  * @name:	Name of memory transaction (read, write ...).
989  * @val:	The bit (eg. READS_TO_LOCAL_MEM or READS_TO_REMOTE_MEM) used to
990  *		represent the memory transaction within an event's configuration.
991  */
992 struct mbm_transaction {
993 	char	name[32];
994 	u32	val;
995 };
996 
997 /* Decoded values for each type of memory transaction. */
998 static struct mbm_transaction mbm_transactions[NUM_MBM_TRANSACTIONS] = {
999 	{"local_reads", READS_TO_LOCAL_MEM},
1000 	{"remote_reads", READS_TO_REMOTE_MEM},
1001 	{"local_non_temporal_writes", NON_TEMP_WRITE_TO_LOCAL_MEM},
1002 	{"remote_non_temporal_writes", NON_TEMP_WRITE_TO_REMOTE_MEM},
1003 	{"local_reads_slow_memory", READS_TO_LOCAL_S_MEM},
1004 	{"remote_reads_slow_memory", READS_TO_REMOTE_S_MEM},
1005 	{"dirty_victim_writes_all", DIRTY_VICTIMS_TO_ALL_MEM},
1006 };
1007 
1008 int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
1009 {
1010 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1011 	struct rdt_resource *r;
1012 	bool sep = false;
1013 	int ret = 0, i;
1014 
1015 	mutex_lock(&rdtgroup_mutex);
1016 	rdt_last_cmd_clear();
1017 
1018 	r = resctrl_arch_get_resource(mevt->rid);
1019 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1020 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1021 		ret = -EINVAL;
1022 		goto out_unlock;
1023 	}
1024 
1025 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1026 		if (mevt->evt_cfg & mbm_transactions[i].val) {
1027 			if (sep)
1028 				seq_putc(seq, ',');
1029 			seq_printf(seq, "%s", mbm_transactions[i].name);
1030 			sep = true;
1031 		}
1032 	}
1033 	seq_putc(seq, '\n');
1034 
1035 out_unlock:
1036 	mutex_unlock(&rdtgroup_mutex);
1037 
1038 	return ret;
1039 }
1040 
1041 int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of, struct seq_file *s,
1042 				     void *v)
1043 {
1044 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1045 	int ret = 0;
1046 
1047 	mutex_lock(&rdtgroup_mutex);
1048 	rdt_last_cmd_clear();
1049 
1050 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1051 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1052 		ret = -EINVAL;
1053 		goto out_unlock;
1054 	}
1055 
1056 	seq_printf(s, "%u\n", r->mon.mbm_assign_on_mkdir);
1057 
1058 out_unlock:
1059 	mutex_unlock(&rdtgroup_mutex);
1060 
1061 	return ret;
1062 }
1063 
1064 ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
1065 					  size_t nbytes, loff_t off)
1066 {
1067 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1068 	bool value;
1069 	int ret;
1070 
1071 	ret = kstrtobool(buf, &value);
1072 	if (ret)
1073 		return ret;
1074 
1075 	mutex_lock(&rdtgroup_mutex);
1076 	rdt_last_cmd_clear();
1077 
1078 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1079 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1080 		ret = -EINVAL;
1081 		goto out_unlock;
1082 	}
1083 
1084 	r->mon.mbm_assign_on_mkdir = value;
1085 
1086 out_unlock:
1087 	mutex_unlock(&rdtgroup_mutex);
1088 
1089 	return ret ?: nbytes;
1090 }
1091 
1092 /*
1093  * mbm_cntr_free_all() - Clear all the counter ID configuration details in the
1094  *			 domain @d. Called when mbm_assign_mode is changed.
1095  */
1096 static void mbm_cntr_free_all(struct rdt_resource *r, struct rdt_mon_domain *d)
1097 {
1098 	memset(d->cntr_cfg, 0, sizeof(*d->cntr_cfg) * r->mon.num_mbm_cntrs);
1099 }
1100 
1101 /*
1102  * resctrl_reset_rmid_all() - Reset all non-architecture states for all the
1103  *			      supported RMIDs.
1104  */
1105 static void resctrl_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d)
1106 {
1107 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
1108 	enum resctrl_event_id evt;
1109 	int idx;
1110 
1111 	for_each_mbm_event_id(evt) {
1112 		if (!resctrl_is_mon_event_enabled(evt))
1113 			continue;
1114 		idx = MBM_STATE_IDX(evt);
1115 		memset(d->mbm_states[idx], 0, sizeof(*d->mbm_states[0]) * idx_limit);
1116 	}
1117 }
1118 
1119 /*
1120  * rdtgroup_assign_cntr() - Assign/unassign the counter ID for the event, RMID
1121  * pair in the domain.
1122  *
1123  * Assign the counter if @assign is true else unassign the counter. Reset the
1124  * associated non-architectural state.
1125  */
1126 static void rdtgroup_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
1127 				 enum resctrl_event_id evtid, u32 rmid, u32 closid,
1128 				 u32 cntr_id, bool assign)
1129 {
1130 	struct mbm_state *m;
1131 
1132 	resctrl_arch_config_cntr(r, d, evtid, rmid, closid, cntr_id, assign);
1133 
1134 	m = get_mbm_state(d, closid, rmid, evtid);
1135 	if (m)
1136 		memset(m, 0, sizeof(*m));
1137 }
1138 
1139 /*
1140  * rdtgroup_alloc_assign_cntr() - Allocate a counter ID and assign it to the event
1141  * pointed to by @mevt and the resctrl group @rdtgrp within the domain @d.
1142  *
1143  * Return:
1144  * 0 on success, < 0 on failure.
1145  */
1146 static int rdtgroup_alloc_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
1147 				      struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1148 {
1149 	int cntr_id;
1150 
1151 	/* No action required if the counter is assigned already. */
1152 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1153 	if (cntr_id >= 0)
1154 		return 0;
1155 
1156 	cntr_id = mbm_cntr_alloc(r, d, rdtgrp, mevt->evtid);
1157 	if (cntr_id < 0) {
1158 		rdt_last_cmd_printf("Failed to allocate counter for %s in domain %d\n",
1159 				    mevt->name, d->hdr.id);
1160 		return cntr_id;
1161 	}
1162 
1163 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, true);
1164 
1165 	return 0;
1166 }
1167 
1168 /*
1169  * rdtgroup_assign_cntr_event() - Assign a hardware counter for the event in
1170  * @mevt to the resctrl group @rdtgrp. Assign counters to all domains if @d is
1171  * NULL; otherwise, assign the counter to the specified domain @d.
1172  *
1173  * If all counters in a domain are already in use, rdtgroup_alloc_assign_cntr()
1174  * will fail. The assignment process will abort at the first failure encountered
1175  * during domain traversal, which may result in the event being only partially
1176  * assigned.
1177  *
1178  * Return:
1179  * 0 on success, < 0 on failure.
1180  */
1181 static int rdtgroup_assign_cntr_event(struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
1182 				      struct mon_evt *mevt)
1183 {
1184 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1185 	int ret = 0;
1186 
1187 	if (!d) {
1188 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1189 			ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1190 			if (ret)
1191 				return ret;
1192 		}
1193 	} else {
1194 		ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 /*
1201  * rdtgroup_assign_cntrs() - Assign counters to MBM events. Called when
1202  *			     a new group is created.
1203  *
1204  * Each group can accommodate two counters per domain: one for the total
1205  * event and one for the local event. Assignments may fail due to the limited
1206  * number of counters. However, it is not necessary to fail the group creation
1207  * and thus no failure is returned. Users have the option to modify the
1208  * counter assignments after the group has been created.
1209  */
1210 void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
1211 {
1212 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1213 
1214 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r) ||
1215 	    !r->mon.mbm_assign_on_mkdir)
1216 		return;
1217 
1218 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1219 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1220 					   &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1221 
1222 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1223 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1224 					   &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1225 }
1226 
1227 /*
1228  * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
1229  * for the event pointed to by @mevt within the domain @d and resctrl group @rdtgrp.
1230  */
1231 static void rdtgroup_free_unassign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
1232 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1233 {
1234 	int cntr_id;
1235 
1236 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1237 
1238 	/* If there is no cntr_id assigned, nothing to do */
1239 	if (cntr_id < 0)
1240 		return;
1241 
1242 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false);
1243 
1244 	mbm_cntr_free(d, cntr_id);
1245 }
1246 
1247 /*
1248  * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
1249  * the event structure @mevt from the domain @d and the group @rdtgrp. Unassign
1250  * the counters from all the domains if @d is NULL else unassign from @d.
1251  */
1252 static void rdtgroup_unassign_cntr_event(struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
1253 					 struct mon_evt *mevt)
1254 {
1255 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1256 
1257 	if (!d) {
1258 		list_for_each_entry(d, &r->mon_domains, hdr.list)
1259 			rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1260 	} else {
1261 		rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1262 	}
1263 }
1264 
1265 /*
1266  * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
1267  *			       Called when a group is deleted.
1268  */
1269 void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
1270 {
1271 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1272 
1273 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
1274 		return;
1275 
1276 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1277 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1278 					     &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1279 
1280 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1281 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1282 					     &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1283 }
1284 
1285 static int resctrl_parse_mem_transactions(char *tok, u32 *val)
1286 {
1287 	u32 temp_val = 0;
1288 	char *evt_str;
1289 	bool found;
1290 	int i;
1291 
1292 next_config:
1293 	if (!tok || tok[0] == '\0') {
1294 		*val = temp_val;
1295 		return 0;
1296 	}
1297 
1298 	/* Start processing the strings for each memory transaction type */
1299 	evt_str = strim(strsep(&tok, ","));
1300 	found = false;
1301 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1302 		if (!strcmp(mbm_transactions[i].name, evt_str)) {
1303 			temp_val |= mbm_transactions[i].val;
1304 			found = true;
1305 			break;
1306 		}
1307 	}
1308 
1309 	if (!found) {
1310 		rdt_last_cmd_printf("Invalid memory transaction type %s\n", evt_str);
1311 		return -EINVAL;
1312 	}
1313 
1314 	goto next_config;
1315 }
1316 
1317 /*
1318  * rdtgroup_update_cntr_event - Update the counter assignments for the event
1319  *				in a group.
1320  * @r:		Resource to which update needs to be done.
1321  * @rdtgrp:	Resctrl group.
1322  * @evtid:	MBM monitor event.
1323  */
1324 static void rdtgroup_update_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1325 				       enum resctrl_event_id evtid)
1326 {
1327 	struct rdt_mon_domain *d;
1328 	int cntr_id;
1329 
1330 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1331 		cntr_id = mbm_cntr_get(r, d, rdtgrp, evtid);
1332 		if (cntr_id >= 0)
1333 			rdtgroup_assign_cntr(r, d, evtid, rdtgrp->mon.rmid,
1334 					     rdtgrp->closid, cntr_id, true);
1335 	}
1336 }
1337 
1338 /*
1339  * resctrl_update_cntr_allrdtgrp - Update the counter assignments for the event
1340  *				   for all the groups.
1341  * @mevt	MBM Monitor event.
1342  */
1343 static void resctrl_update_cntr_allrdtgrp(struct mon_evt *mevt)
1344 {
1345 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1346 	struct rdtgroup *prgrp, *crgrp;
1347 
1348 	/*
1349 	 * Find all the groups where the event is assigned and update the
1350 	 * configuration of existing assignments.
1351 	 */
1352 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1353 		rdtgroup_update_cntr_event(r, prgrp, mevt->evtid);
1354 
1355 		list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
1356 			rdtgroup_update_cntr_event(r, crgrp, mevt->evtid);
1357 	}
1358 }
1359 
1360 ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
1361 			   loff_t off)
1362 {
1363 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1364 	struct rdt_resource *r;
1365 	u32 evt_cfg = 0;
1366 	int ret = 0;
1367 
1368 	/* Valid input requires a trailing newline */
1369 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1370 		return -EINVAL;
1371 
1372 	buf[nbytes - 1] = '\0';
1373 
1374 	cpus_read_lock();
1375 	mutex_lock(&rdtgroup_mutex);
1376 
1377 	rdt_last_cmd_clear();
1378 
1379 	r = resctrl_arch_get_resource(mevt->rid);
1380 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1381 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1382 		ret = -EINVAL;
1383 		goto out_unlock;
1384 	}
1385 
1386 	ret = resctrl_parse_mem_transactions(buf, &evt_cfg);
1387 	if (!ret && mevt->evt_cfg != evt_cfg) {
1388 		mevt->evt_cfg = evt_cfg;
1389 		resctrl_update_cntr_allrdtgrp(mevt);
1390 	}
1391 
1392 out_unlock:
1393 	mutex_unlock(&rdtgroup_mutex);
1394 	cpus_read_unlock();
1395 
1396 	return ret ?: nbytes;
1397 }
1398 
1399 int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of,
1400 				 struct seq_file *s, void *v)
1401 {
1402 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1403 	bool enabled;
1404 
1405 	mutex_lock(&rdtgroup_mutex);
1406 	enabled = resctrl_arch_mbm_cntr_assign_enabled(r);
1407 
1408 	if (r->mon.mbm_cntr_assignable) {
1409 		if (enabled)
1410 			seq_puts(s, "[mbm_event]\n");
1411 		else
1412 			seq_puts(s, "[default]\n");
1413 
1414 		if (!IS_ENABLED(CONFIG_RESCTRL_ASSIGN_FIXED)) {
1415 			if (enabled)
1416 				seq_puts(s, "default\n");
1417 			else
1418 				seq_puts(s, "mbm_event\n");
1419 		}
1420 	} else {
1421 		seq_puts(s, "[default]\n");
1422 	}
1423 
1424 	mutex_unlock(&rdtgroup_mutex);
1425 
1426 	return 0;
1427 }
1428 
1429 ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
1430 				      size_t nbytes, loff_t off)
1431 {
1432 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1433 	struct rdt_mon_domain *d;
1434 	int ret = 0;
1435 	bool enable;
1436 
1437 	/* Valid input requires a trailing newline */
1438 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1439 		return -EINVAL;
1440 
1441 	buf[nbytes - 1] = '\0';
1442 
1443 	cpus_read_lock();
1444 	mutex_lock(&rdtgroup_mutex);
1445 
1446 	rdt_last_cmd_clear();
1447 
1448 	if (!strcmp(buf, "default")) {
1449 		enable = 0;
1450 	} else if (!strcmp(buf, "mbm_event")) {
1451 		if (r->mon.mbm_cntr_assignable) {
1452 			enable = 1;
1453 		} else {
1454 			ret = -EINVAL;
1455 			rdt_last_cmd_puts("mbm_event mode is not supported\n");
1456 			goto out_unlock;
1457 		}
1458 	} else {
1459 		ret = -EINVAL;
1460 		rdt_last_cmd_puts("Unsupported assign mode\n");
1461 		goto out_unlock;
1462 	}
1463 
1464 	if (enable != resctrl_arch_mbm_cntr_assign_enabled(r)) {
1465 		ret = resctrl_arch_mbm_cntr_assign_set(r, enable);
1466 		if (ret)
1467 			goto out_unlock;
1468 
1469 		/* Update the visibility of BMEC related files */
1470 		resctrl_bmec_files_show(r, NULL, !enable);
1471 
1472 		/*
1473 		 * Initialize the default memory transaction values for
1474 		 * total and local events.
1475 		 */
1476 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1477 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1478 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1479 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1480 									   (READS_TO_LOCAL_MEM |
1481 									    READS_TO_LOCAL_S_MEM |
1482 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1483 		/* Enable auto assignment when switching to "mbm_event" mode */
1484 		if (enable)
1485 			r->mon.mbm_assign_on_mkdir = true;
1486 		/*
1487 		 * Reset all the non-achitectural RMID state and assignable counters.
1488 		 */
1489 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1490 			mbm_cntr_free_all(r, d);
1491 			resctrl_reset_rmid_all(r, d);
1492 		}
1493 	}
1494 
1495 out_unlock:
1496 	mutex_unlock(&rdtgroup_mutex);
1497 	cpus_read_unlock();
1498 
1499 	return ret ?: nbytes;
1500 }
1501 
1502 int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of,
1503 			       struct seq_file *s, void *v)
1504 {
1505 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1506 	struct rdt_mon_domain *dom;
1507 	bool sep = false;
1508 
1509 	cpus_read_lock();
1510 	mutex_lock(&rdtgroup_mutex);
1511 
1512 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1513 		if (sep)
1514 			seq_putc(s, ';');
1515 
1516 		seq_printf(s, "%d=%d", dom->hdr.id, r->mon.num_mbm_cntrs);
1517 		sep = true;
1518 	}
1519 	seq_putc(s, '\n');
1520 
1521 	mutex_unlock(&rdtgroup_mutex);
1522 	cpus_read_unlock();
1523 	return 0;
1524 }
1525 
1526 int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of,
1527 				     struct seq_file *s, void *v)
1528 {
1529 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1530 	struct rdt_mon_domain *dom;
1531 	bool sep = false;
1532 	u32 cntrs, i;
1533 	int ret = 0;
1534 
1535 	cpus_read_lock();
1536 	mutex_lock(&rdtgroup_mutex);
1537 
1538 	rdt_last_cmd_clear();
1539 
1540 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1541 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1542 		ret = -EINVAL;
1543 		goto out_unlock;
1544 	}
1545 
1546 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1547 		if (sep)
1548 			seq_putc(s, ';');
1549 
1550 		cntrs = 0;
1551 		for (i = 0; i < r->mon.num_mbm_cntrs; i++) {
1552 			if (!dom->cntr_cfg[i].rdtgrp)
1553 				cntrs++;
1554 		}
1555 
1556 		seq_printf(s, "%d=%u", dom->hdr.id, cntrs);
1557 		sep = true;
1558 	}
1559 	seq_putc(s, '\n');
1560 
1561 out_unlock:
1562 	mutex_unlock(&rdtgroup_mutex);
1563 	cpus_read_unlock();
1564 
1565 	return ret;
1566 }
1567 
1568 int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v)
1569 {
1570 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1571 	struct rdt_mon_domain *d;
1572 	struct rdtgroup *rdtgrp;
1573 	struct mon_evt *mevt;
1574 	int ret = 0;
1575 	bool sep;
1576 
1577 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1578 	if (!rdtgrp) {
1579 		ret = -ENOENT;
1580 		goto out_unlock;
1581 	}
1582 
1583 	rdt_last_cmd_clear();
1584 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1585 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1586 		ret = -EINVAL;
1587 		goto out_unlock;
1588 	}
1589 
1590 	for_each_mon_event(mevt) {
1591 		if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
1592 			continue;
1593 
1594 		sep = false;
1595 		seq_printf(s, "%s:", mevt->name);
1596 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1597 			if (sep)
1598 				seq_putc(s, ';');
1599 
1600 			if (mbm_cntr_get(r, d, rdtgrp, mevt->evtid) < 0)
1601 				seq_printf(s, "%d=_", d->hdr.id);
1602 			else
1603 				seq_printf(s, "%d=e", d->hdr.id);
1604 
1605 			sep = true;
1606 		}
1607 		seq_putc(s, '\n');
1608 	}
1609 
1610 out_unlock:
1611 	rdtgroup_kn_unlock(of->kn);
1612 
1613 	return ret;
1614 }
1615 
1616 /*
1617  * mbm_get_mon_event_by_name() - Return the mon_evt entry for the matching
1618  * event name.
1619  */
1620 static struct mon_evt *mbm_get_mon_event_by_name(struct rdt_resource *r, char *name)
1621 {
1622 	struct mon_evt *mevt;
1623 
1624 	for_each_mon_event(mevt) {
1625 		if (mevt->rid == r->rid && mevt->enabled &&
1626 		    resctrl_is_mbm_event(mevt->evtid) &&
1627 		    !strcmp(mevt->name, name))
1628 			return mevt;
1629 	}
1630 
1631 	return NULL;
1632 }
1633 
1634 static int rdtgroup_modify_assign_state(char *assign, struct rdt_mon_domain *d,
1635 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1636 {
1637 	int ret = 0;
1638 
1639 	if (!assign || strlen(assign) != 1)
1640 		return -EINVAL;
1641 
1642 	switch (*assign) {
1643 	case 'e':
1644 		ret = rdtgroup_assign_cntr_event(d, rdtgrp, mevt);
1645 		break;
1646 	case '_':
1647 		rdtgroup_unassign_cntr_event(d, rdtgrp, mevt);
1648 		break;
1649 	default:
1650 		ret = -EINVAL;
1651 		break;
1652 	}
1653 
1654 	return ret;
1655 }
1656 
1657 static int resctrl_parse_mbm_assignment(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1658 					char *event, char *tok)
1659 {
1660 	struct rdt_mon_domain *d;
1661 	unsigned long dom_id = 0;
1662 	char *dom_str, *id_str;
1663 	struct mon_evt *mevt;
1664 	int ret;
1665 
1666 	mevt = mbm_get_mon_event_by_name(r, event);
1667 	if (!mevt) {
1668 		rdt_last_cmd_printf("Invalid event %s\n", event);
1669 		return -ENOENT;
1670 	}
1671 
1672 next:
1673 	if (!tok || tok[0] == '\0')
1674 		return 0;
1675 
1676 	/* Start processing the strings for each domain */
1677 	dom_str = strim(strsep(&tok, ";"));
1678 
1679 	id_str = strsep(&dom_str, "=");
1680 
1681 	/* Check for domain id '*' which means all domains */
1682 	if (id_str && *id_str == '*') {
1683 		ret = rdtgroup_modify_assign_state(dom_str, NULL, rdtgrp, mevt);
1684 		if (ret)
1685 			rdt_last_cmd_printf("Assign operation '%s:*=%s' failed\n",
1686 					    event, dom_str);
1687 		return ret;
1688 	} else if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1689 		rdt_last_cmd_puts("Missing domain id\n");
1690 		return -EINVAL;
1691 	}
1692 
1693 	/* Verify if the dom_id is valid */
1694 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1695 		if (d->hdr.id == dom_id) {
1696 			ret = rdtgroup_modify_assign_state(dom_str, d, rdtgrp, mevt);
1697 			if (ret) {
1698 				rdt_last_cmd_printf("Assign operation '%s:%ld=%s' failed\n",
1699 						    event, dom_id, dom_str);
1700 				return ret;
1701 			}
1702 			goto next;
1703 		}
1704 	}
1705 
1706 	rdt_last_cmd_printf("Invalid domain id %ld\n", dom_id);
1707 	return -EINVAL;
1708 }
1709 
1710 ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
1711 				 size_t nbytes, loff_t off)
1712 {
1713 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1714 	struct rdtgroup *rdtgrp;
1715 	char *token, *event;
1716 	int ret = 0;
1717 
1718 	/* Valid input requires a trailing newline */
1719 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1720 		return -EINVAL;
1721 
1722 	buf[nbytes - 1] = '\0';
1723 
1724 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1725 	if (!rdtgrp) {
1726 		rdtgroup_kn_unlock(of->kn);
1727 		return -ENOENT;
1728 	}
1729 	rdt_last_cmd_clear();
1730 
1731 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1732 		rdt_last_cmd_puts("mbm_event mode is not enabled\n");
1733 		rdtgroup_kn_unlock(of->kn);
1734 		return -EINVAL;
1735 	}
1736 
1737 	while ((token = strsep(&buf, "\n")) != NULL) {
1738 		/*
1739 		 * The write command follows the following format:
1740 		 * "<Event>:<Domain ID>=<Assignment state>"
1741 		 * Extract the event name first.
1742 		 */
1743 		event = strsep(&token, ":");
1744 
1745 		ret = resctrl_parse_mbm_assignment(r, rdtgrp, event, token);
1746 		if (ret)
1747 			break;
1748 	}
1749 
1750 	rdtgroup_kn_unlock(of->kn);
1751 
1752 	return ret ?: nbytes;
1753 }
1754 
1755 /**
1756  * resctrl_mon_resource_init() - Initialise global monitoring structures.
1757  *
1758  * Allocate and initialise global monitor resources that do not belong to a
1759  * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
1760  * Called once during boot after the struct rdt_resource's have been configured
1761  * but before the filesystem is mounted.
1762  * Resctrl's cpuhp callbacks may be called before this point to bring a domain
1763  * online.
1764  *
1765  * Returns 0 for success, or -ENOMEM.
1766  */
1767 int resctrl_mon_resource_init(void)
1768 {
1769 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1770 	int ret;
1771 
1772 	if (!r->mon_capable)
1773 		return 0;
1774 
1775 	ret = dom_data_init(r);
1776 	if (ret)
1777 		return ret;
1778 
1779 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
1780 		mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].configurable = true;
1781 		resctrl_file_fflags_init("mbm_total_bytes_config",
1782 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1783 	}
1784 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
1785 		mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].configurable = true;
1786 		resctrl_file_fflags_init("mbm_local_bytes_config",
1787 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1788 	}
1789 
1790 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1791 		mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
1792 	else if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1793 		mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
1794 
1795 	if (r->mon.mbm_cntr_assignable) {
1796 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1797 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1798 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1799 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1800 									   (READS_TO_LOCAL_MEM |
1801 									    READS_TO_LOCAL_S_MEM |
1802 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1803 		r->mon.mbm_assign_on_mkdir = true;
1804 		resctrl_file_fflags_init("num_mbm_cntrs",
1805 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1806 		resctrl_file_fflags_init("available_mbm_cntrs",
1807 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1808 		resctrl_file_fflags_init("event_filter", RFTYPE_ASSIGN_CONFIG);
1809 		resctrl_file_fflags_init("mbm_assign_on_mkdir", RFTYPE_MON_INFO |
1810 					 RFTYPE_RES_CACHE);
1811 		resctrl_file_fflags_init("mbm_L3_assignments", RFTYPE_MON_BASE);
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 void resctrl_mon_resource_exit(void)
1818 {
1819 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1820 
1821 	dom_data_exit(r);
1822 }
1823