xref: /linux/fs/resctrl/monitor.c (revision 4bc3ef46ff41d5e7ba557e56e9cd2031527cd7f8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Monitoring code
5  *
6  * Copyright (C) 2017 Intel Corporation
7  *
8  * Author:
9  *    Vikas Shivappa <vikas.shivappa@intel.com>
10  *
11  * This replaces the cqm.c based on perf but we reuse a lot of
12  * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13  *
14  * More information about RDT be found in the Intel (R) x86 Architecture
15  * Software Developer Manual June 2016, volume 3, section 17.17.
16  */
17 
18 #define pr_fmt(fmt)	"resctrl: " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/resctrl.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 
25 #include "internal.h"
26 
27 #define CREATE_TRACE_POINTS
28 
29 #include "monitor_trace.h"
30 
31 /**
32  * struct rmid_entry - dirty tracking for all RMID.
33  * @closid:	The CLOSID for this entry.
34  * @rmid:	The RMID for this entry.
35  * @busy:	The number of domains with cached data using this RMID.
36  * @list:	Member of the rmid_free_lru list when busy == 0.
37  *
38  * Depending on the architecture the correct monitor is accessed using
39  * both @closid and @rmid, or @rmid only.
40  *
41  * Take the rdtgroup_mutex when accessing.
42  */
43 struct rmid_entry {
44 	u32				closid;
45 	u32				rmid;
46 	int				busy;
47 	struct list_head		list;
48 };
49 
50 /*
51  * @rmid_free_lru - A least recently used list of free RMIDs
52  *     These RMIDs are guaranteed to have an occupancy less than the
53  *     threshold occupancy
54  */
55 static LIST_HEAD(rmid_free_lru);
56 
57 /*
58  * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
59  *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
60  *     Indexed by CLOSID. Protected by rdtgroup_mutex.
61  */
62 static u32 *closid_num_dirty_rmid;
63 
64 /*
65  * @rmid_limbo_count - count of currently unused but (potentially)
66  *     dirty RMIDs.
67  *     This counts RMIDs that no one is currently using but that
68  *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
69  *     change the threshold occupancy value.
70  */
71 static unsigned int rmid_limbo_count;
72 
73 /*
74  * @rmid_entry - The entry in the limbo and free lists.
75  */
76 static struct rmid_entry	*rmid_ptrs;
77 
78 /*
79  * This is the threshold cache occupancy in bytes at which we will consider an
80  * RMID available for re-allocation.
81  */
82 unsigned int resctrl_rmid_realloc_threshold;
83 
84 /*
85  * This is the maximum value for the reallocation threshold, in bytes.
86  */
87 unsigned int resctrl_rmid_realloc_limit;
88 
89 /*
90  * x86 and arm64 differ in their handling of monitoring.
91  * x86's RMID are independent numbers, there is only one source of traffic
92  * with an RMID value of '1'.
93  * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
94  * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
95  * value is no longer unique.
96  * To account for this, resctrl uses an index. On x86 this is just the RMID,
97  * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
98  *
99  * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
100  * must accept an attempt to read every index.
101  */
102 static inline struct rmid_entry *__rmid_entry(u32 idx)
103 {
104 	struct rmid_entry *entry;
105 	u32 closid, rmid;
106 
107 	entry = &rmid_ptrs[idx];
108 	resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
109 
110 	WARN_ON_ONCE(entry->closid != closid);
111 	WARN_ON_ONCE(entry->rmid != rmid);
112 
113 	return entry;
114 }
115 
116 static void limbo_release_entry(struct rmid_entry *entry)
117 {
118 	lockdep_assert_held(&rdtgroup_mutex);
119 
120 	rmid_limbo_count--;
121 	list_add_tail(&entry->list, &rmid_free_lru);
122 
123 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
124 		closid_num_dirty_rmid[entry->closid]--;
125 }
126 
127 /*
128  * Check the RMIDs that are marked as busy for this domain. If the
129  * reported LLC occupancy is below the threshold clear the busy bit and
130  * decrement the count. If the busy count gets to zero on an RMID, we
131  * free the RMID
132  */
133 void __check_limbo(struct rdt_l3_mon_domain *d, bool force_free)
134 {
135 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
136 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
137 	struct rmid_entry *entry;
138 	u32 idx, cur_idx = 1;
139 	void *arch_mon_ctx;
140 	bool rmid_dirty;
141 	u64 val = 0;
142 
143 	arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
144 	if (IS_ERR(arch_mon_ctx)) {
145 		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
146 				    PTR_ERR(arch_mon_ctx));
147 		return;
148 	}
149 
150 	/*
151 	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
152 	 * are marked as busy for occupancy < threshold. If the occupancy
153 	 * is less than the threshold decrement the busy counter of the
154 	 * RMID and move it to the free list when the counter reaches 0.
155 	 */
156 	for (;;) {
157 		idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
158 		if (idx >= idx_limit)
159 			break;
160 
161 		entry = __rmid_entry(idx);
162 		if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
163 					   QOS_L3_OCCUP_EVENT_ID, &val,
164 					   arch_mon_ctx)) {
165 			rmid_dirty = true;
166 		} else {
167 			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
168 
169 			/*
170 			 * x86's CLOSID and RMID are independent numbers, so the entry's
171 			 * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
172 			 * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
173 			 * used to select the configuration. It is thus necessary to track both
174 			 * CLOSID and RMID because there may be dependencies between them
175 			 * on some architectures.
176 			 */
177 			trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
178 		}
179 
180 		if (force_free || !rmid_dirty) {
181 			clear_bit(idx, d->rmid_busy_llc);
182 			if (!--entry->busy)
183 				limbo_release_entry(entry);
184 		}
185 		cur_idx = idx + 1;
186 	}
187 
188 	resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
189 }
190 
191 bool has_busy_rmid(struct rdt_l3_mon_domain *d)
192 {
193 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
194 
195 	return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
196 }
197 
198 static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
199 {
200 	struct rmid_entry *itr;
201 	u32 itr_idx, cmp_idx;
202 
203 	if (list_empty(&rmid_free_lru))
204 		return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
205 
206 	list_for_each_entry(itr, &rmid_free_lru, list) {
207 		/*
208 		 * Get the index of this free RMID, and the index it would need
209 		 * to be if it were used with this CLOSID.
210 		 * If the CLOSID is irrelevant on this architecture, the two
211 		 * index values are always the same on every entry and thus the
212 		 * very first entry will be returned.
213 		 */
214 		itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
215 		cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
216 
217 		if (itr_idx == cmp_idx)
218 			return itr;
219 	}
220 
221 	return ERR_PTR(-ENOSPC);
222 }
223 
224 /**
225  * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
226  *                                  RMID are clean, or the CLOSID that has
227  *                                  the most clean RMID.
228  *
229  * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
230  * may not be able to allocate clean RMID. To avoid this the allocator will
231  * choose the CLOSID with the most clean RMID.
232  *
233  * When the CLOSID and RMID are independent numbers, the first free CLOSID will
234  * be returned.
235  */
236 int resctrl_find_cleanest_closid(void)
237 {
238 	u32 cleanest_closid = ~0;
239 	int i = 0;
240 
241 	lockdep_assert_held(&rdtgroup_mutex);
242 
243 	if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
244 		return -EIO;
245 
246 	for (i = 0; i < closids_supported(); i++) {
247 		int num_dirty;
248 
249 		if (closid_allocated(i))
250 			continue;
251 
252 		num_dirty = closid_num_dirty_rmid[i];
253 		if (num_dirty == 0)
254 			return i;
255 
256 		if (cleanest_closid == ~0)
257 			cleanest_closid = i;
258 
259 		if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
260 			cleanest_closid = i;
261 	}
262 
263 	if (cleanest_closid == ~0)
264 		return -ENOSPC;
265 
266 	return cleanest_closid;
267 }
268 
269 /*
270  * For MPAM the RMID value is not unique, and has to be considered with
271  * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
272  * allows all domains to be managed by a single free list.
273  * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
274  */
275 int alloc_rmid(u32 closid)
276 {
277 	struct rmid_entry *entry;
278 
279 	lockdep_assert_held(&rdtgroup_mutex);
280 
281 	entry = resctrl_find_free_rmid(closid);
282 	if (IS_ERR(entry))
283 		return PTR_ERR(entry);
284 
285 	list_del(&entry->list);
286 	return entry->rmid;
287 }
288 
289 static void add_rmid_to_limbo(struct rmid_entry *entry)
290 {
291 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
292 	struct rdt_l3_mon_domain *d;
293 	u32 idx;
294 
295 	lockdep_assert_held(&rdtgroup_mutex);
296 
297 	/* Walking r->domains, ensure it can't race with cpuhp */
298 	lockdep_assert_cpus_held();
299 
300 	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
301 
302 	entry->busy = 0;
303 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
304 		/*
305 		 * For the first limbo RMID in the domain,
306 		 * setup up the limbo worker.
307 		 */
308 		if (!has_busy_rmid(d))
309 			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
310 						RESCTRL_PICK_ANY_CPU);
311 		set_bit(idx, d->rmid_busy_llc);
312 		entry->busy++;
313 	}
314 
315 	rmid_limbo_count++;
316 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
317 		closid_num_dirty_rmid[entry->closid]++;
318 }
319 
320 void free_rmid(u32 closid, u32 rmid)
321 {
322 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
323 	struct rmid_entry *entry;
324 
325 	lockdep_assert_held(&rdtgroup_mutex);
326 
327 	/*
328 	 * Do not allow the default rmid to be free'd. Comparing by index
329 	 * allows architectures that ignore the closid parameter to avoid an
330 	 * unnecessary check.
331 	 */
332 	if (!resctrl_arch_mon_capable() ||
333 	    idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
334 						RESCTRL_RESERVED_RMID))
335 		return;
336 
337 	entry = __rmid_entry(idx);
338 
339 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
340 		add_rmid_to_limbo(entry);
341 	else
342 		list_add_tail(&entry->list, &rmid_free_lru);
343 }
344 
345 static struct mbm_state *get_mbm_state(struct rdt_l3_mon_domain *d, u32 closid,
346 				       u32 rmid, enum resctrl_event_id evtid)
347 {
348 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
349 	struct mbm_state *state;
350 
351 	if (!resctrl_is_mbm_event(evtid))
352 		return NULL;
353 
354 	state = d->mbm_states[MBM_STATE_IDX(evtid)];
355 
356 	return state ? &state[idx] : NULL;
357 }
358 
359 /*
360  * mbm_cntr_get() - Return the counter ID for the matching @evtid and @rdtgrp.
361  *
362  * Return:
363  * Valid counter ID on success, or -ENOENT on failure.
364  */
365 static int mbm_cntr_get(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
366 			struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
367 {
368 	int cntr_id;
369 
370 	if (!r->mon.mbm_cntr_assignable)
371 		return -ENOENT;
372 
373 	if (!resctrl_is_mbm_event(evtid))
374 		return -ENOENT;
375 
376 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
377 		if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp &&
378 		    d->cntr_cfg[cntr_id].evtid == evtid)
379 			return cntr_id;
380 	}
381 
382 	return -ENOENT;
383 }
384 
385 /*
386  * mbm_cntr_alloc() - Initialize and return a new counter ID in the domain @d.
387  * Caller must ensure that the specified event is not assigned already.
388  *
389  * Return:
390  * Valid counter ID on success, or -ENOSPC on failure.
391  */
392 static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
393 			  struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
394 {
395 	int cntr_id;
396 
397 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
398 		if (!d->cntr_cfg[cntr_id].rdtgrp) {
399 			d->cntr_cfg[cntr_id].rdtgrp = rdtgrp;
400 			d->cntr_cfg[cntr_id].evtid = evtid;
401 			return cntr_id;
402 		}
403 	}
404 
405 	return -ENOSPC;
406 }
407 
408 /*
409  * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
410  */
411 static void mbm_cntr_free(struct rdt_l3_mon_domain *d, int cntr_id)
412 {
413 	memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
414 }
415 
416 static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
417 {
418 	int cpu = smp_processor_id();
419 	u32 closid = rdtgrp->closid;
420 	u32 rmid = rdtgrp->mon.rmid;
421 	struct rdt_l3_mon_domain *d;
422 	int cntr_id = -ENOENT;
423 	struct mbm_state *m;
424 	u64 tval = 0;
425 
426 	if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) {
427 		rr->err = -EIO;
428 		return -EINVAL;
429 	}
430 	d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
431 
432 	if (rr->is_mbm_cntr) {
433 		cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evtid);
434 		if (cntr_id < 0) {
435 			rr->err = -ENOENT;
436 			return -EINVAL;
437 		}
438 	}
439 
440 	if (rr->first) {
441 		if (rr->is_mbm_cntr)
442 			resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evtid);
443 		else
444 			resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evtid);
445 		m = get_mbm_state(d, closid, rmid, rr->evtid);
446 		if (m)
447 			memset(m, 0, sizeof(struct mbm_state));
448 		return 0;
449 	}
450 
451 	/* Reading a single domain, must be on a CPU in that domain. */
452 	if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
453 		return -EINVAL;
454 	if (rr->is_mbm_cntr)
455 		rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
456 						 rr->evtid, &tval);
457 	else
458 		rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
459 						 rr->evtid, &tval, rr->arch_mon_ctx);
460 	if (rr->err)
461 		return rr->err;
462 
463 	rr->val += tval;
464 
465 	return 0;
466 }
467 
468 static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
469 {
470 	int cpu = smp_processor_id();
471 	u32 closid = rdtgrp->closid;
472 	u32 rmid = rdtgrp->mon.rmid;
473 	struct rdt_l3_mon_domain *d;
474 	u64 tval = 0;
475 	int err, ret;
476 
477 	/*
478 	 * Summing across domains is only done for systems that implement
479 	 * Sub-NUMA Cluster. There is no overlap with systems that support
480 	 * assignable counters.
481 	 */
482 	if (rr->is_mbm_cntr) {
483 		pr_warn_once("Summing domains using assignable counters is not supported\n");
484 		rr->err = -EINVAL;
485 		return -EINVAL;
486 	}
487 
488 	/* Summing domains that share a cache, must be on a CPU for that cache. */
489 	if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
490 		return -EINVAL;
491 
492 	/*
493 	 * Legacy files must report the sum of an event across all
494 	 * domains that share the same L3 cache instance.
495 	 * Report success if a read from any domain succeeds, -EINVAL
496 	 * (translated to "Unavailable" for user space) if reading from
497 	 * all domains fail for any reason.
498 	 */
499 	ret = -EINVAL;
500 	list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
501 		if (d->ci_id != rr->ci->id)
502 			continue;
503 		err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
504 					     rr->evtid, &tval, rr->arch_mon_ctx);
505 		if (!err) {
506 			rr->val += tval;
507 			ret = 0;
508 		}
509 	}
510 
511 	if (ret)
512 		rr->err = ret;
513 
514 	return ret;
515 }
516 
517 static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
518 {
519 	switch (rr->r->rid) {
520 	case RDT_RESOURCE_L3:
521 		if (rr->hdr)
522 			return __l3_mon_event_count(rdtgrp, rr);
523 		else
524 			return __l3_mon_event_count_sum(rdtgrp, rr);
525 	default:
526 		rr->err = -EINVAL;
527 		return -EINVAL;
528 	}
529 }
530 
531 /*
532  * mbm_bw_count() - Update bw count from values previously read by
533  *		    __mon_event_count().
534  * @rdtgrp:	resctrl group associated with the CLOSID and RMID to identify
535  *		the cached mbm_state.
536  * @rr:		The struct rmid_read populated by __mon_event_count().
537  *
538  * Supporting function to calculate the memory bandwidth
539  * and delta bandwidth in MBps. The chunks value previously read by
540  * __mon_event_count() is compared with the chunks value from the previous
541  * invocation. This must be called once per second to maintain values in MBps.
542  */
543 static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
544 {
545 	u64 cur_bw, bytes, cur_bytes;
546 	u32 closid = rdtgrp->closid;
547 	u32 rmid = rdtgrp->mon.rmid;
548 	struct rdt_l3_mon_domain *d;
549 	struct mbm_state *m;
550 
551 	if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
552 		return;
553 	d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
554 	m = get_mbm_state(d, closid, rmid, rr->evtid);
555 	if (WARN_ON_ONCE(!m))
556 		return;
557 
558 	cur_bytes = rr->val;
559 	bytes = cur_bytes - m->prev_bw_bytes;
560 	m->prev_bw_bytes = cur_bytes;
561 
562 	cur_bw = bytes / SZ_1M;
563 
564 	m->prev_bw = cur_bw;
565 }
566 
567 /*
568  * This is scheduled by mon_event_read() to read the CQM/MBM counters
569  * on a domain.
570  */
571 void mon_event_count(void *info)
572 {
573 	struct rdtgroup *rdtgrp, *entry;
574 	struct rmid_read *rr = info;
575 	struct list_head *head;
576 	int ret;
577 
578 	rdtgrp = rr->rgrp;
579 
580 	ret = __mon_event_count(rdtgrp, rr);
581 
582 	/*
583 	 * For Ctrl groups read data from child monitor groups and
584 	 * add them together. Count events which are read successfully.
585 	 * Discard the rmid_read's reporting errors.
586 	 */
587 	head = &rdtgrp->mon.crdtgrp_list;
588 
589 	if (rdtgrp->type == RDTCTRL_GROUP) {
590 		list_for_each_entry(entry, head, mon.crdtgrp_list) {
591 			if (__mon_event_count(entry, rr) == 0)
592 				ret = 0;
593 		}
594 	}
595 
596 	/*
597 	 * __mon_event_count() calls for newly created monitor groups may
598 	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
599 	 * Discard error if any of the monitor event reads succeeded.
600 	 */
601 	if (ret == 0)
602 		rr->err = 0;
603 }
604 
605 static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
606 							struct rdt_resource *r)
607 {
608 	struct rdt_ctrl_domain *d;
609 
610 	lockdep_assert_cpus_held();
611 
612 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
613 		/* Find the domain that contains this CPU */
614 		if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
615 			return d;
616 	}
617 
618 	return NULL;
619 }
620 
621 /*
622  * Feedback loop for MBA software controller (mba_sc)
623  *
624  * mba_sc is a feedback loop where we periodically read MBM counters and
625  * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
626  * that:
627  *
628  *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
629  *
630  * This uses the MBM counters to measure the bandwidth and MBA throttle
631  * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
632  * fact that resctrl rdtgroups have both monitoring and control.
633  *
634  * The frequency of the checks is 1s and we just tag along the MBM overflow
635  * timer. Having 1s interval makes the calculation of bandwidth simpler.
636  *
637  * Although MBA's goal is to restrict the bandwidth to a maximum, there may
638  * be a need to increase the bandwidth to avoid unnecessarily restricting
639  * the L2 <-> L3 traffic.
640  *
641  * Since MBA controls the L2 external bandwidth where as MBM measures the
642  * L3 external bandwidth the following sequence could lead to such a
643  * situation.
644  *
645  * Consider an rdtgroup which had high L3 <-> memory traffic in initial
646  * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
647  * after some time rdtgroup has mostly L2 <-> L3 traffic.
648  *
649  * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
650  * throttle MSRs already have low percentage values.  To avoid
651  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
652  */
653 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_l3_mon_domain *dom_mbm)
654 {
655 	u32 closid, rmid, cur_msr_val, new_msr_val;
656 	struct mbm_state *pmbm_data, *cmbm_data;
657 	struct rdt_ctrl_domain *dom_mba;
658 	enum resctrl_event_id evt_id;
659 	struct rdt_resource *r_mba;
660 	struct list_head *head;
661 	struct rdtgroup *entry;
662 	u32 cur_bw, user_bw;
663 
664 	r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
665 	evt_id = rgrp->mba_mbps_event;
666 
667 	closid = rgrp->closid;
668 	rmid = rgrp->mon.rmid;
669 	pmbm_data = get_mbm_state(dom_mbm, closid, rmid, evt_id);
670 	if (WARN_ON_ONCE(!pmbm_data))
671 		return;
672 
673 	dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
674 	if (!dom_mba) {
675 		pr_warn_once("Failure to get domain for MBA update\n");
676 		return;
677 	}
678 
679 	cur_bw = pmbm_data->prev_bw;
680 	user_bw = dom_mba->mbps_val[closid];
681 
682 	/* MBA resource doesn't support CDP */
683 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
684 
685 	/*
686 	 * For Ctrl groups read data from child monitor groups.
687 	 */
688 	head = &rgrp->mon.crdtgrp_list;
689 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
690 		cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id);
691 		if (WARN_ON_ONCE(!cmbm_data))
692 			return;
693 		cur_bw += cmbm_data->prev_bw;
694 	}
695 
696 	/*
697 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
698 	 * bandwidth step is the bandwidth granularity specified by the
699 	 * hardware.
700 	 * Always increase throttling if current bandwidth is above the
701 	 * target set by user.
702 	 * But avoid thrashing up and down on every poll by checking
703 	 * whether a decrease in throttling is likely to push the group
704 	 * back over target. E.g. if currently throttling to 30% of bandwidth
705 	 * on a system with 10% granularity steps, check whether moving to
706 	 * 40% would go past the limit by multiplying current bandwidth by
707 	 * "(30 + 10) / 30".
708 	 */
709 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
710 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
711 	} else if (cur_msr_val < MAX_MBA_BW &&
712 		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
713 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
714 	} else {
715 		return;
716 	}
717 
718 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
719 }
720 
721 static void mbm_update_one_event(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
722 				 struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
723 {
724 	struct rmid_read rr = {0};
725 
726 	rr.r = r;
727 	rr.hdr = &d->hdr;
728 	rr.evtid = evtid;
729 	if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
730 		rr.is_mbm_cntr = true;
731 	} else {
732 		rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
733 		if (IS_ERR(rr.arch_mon_ctx)) {
734 			pr_warn_ratelimited("Failed to allocate monitor context: %ld",
735 					    PTR_ERR(rr.arch_mon_ctx));
736 			return;
737 		}
738 	}
739 
740 	__mon_event_count(rdtgrp, &rr);
741 
742 	/*
743 	 * If the software controller is enabled, compute the
744 	 * bandwidth for this event id.
745 	 */
746 	if (is_mba_sc(NULL))
747 		mbm_bw_count(rdtgrp, &rr);
748 
749 	if (rr.arch_mon_ctx)
750 		resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
751 }
752 
753 static void mbm_update(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
754 		       struct rdtgroup *rdtgrp)
755 {
756 	/*
757 	 * This is protected from concurrent reads from user as both
758 	 * the user and overflow handler hold the global mutex.
759 	 */
760 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
761 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
762 
763 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
764 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
765 }
766 
767 /*
768  * Handler to scan the limbo list and move the RMIDs
769  * to free list whose occupancy < threshold_occupancy.
770  */
771 void cqm_handle_limbo(struct work_struct *work)
772 {
773 	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
774 	struct rdt_l3_mon_domain *d;
775 
776 	cpus_read_lock();
777 	mutex_lock(&rdtgroup_mutex);
778 
779 	d = container_of(work, struct rdt_l3_mon_domain, cqm_limbo.work);
780 
781 	__check_limbo(d, false);
782 
783 	if (has_busy_rmid(d)) {
784 		d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
785 							   RESCTRL_PICK_ANY_CPU);
786 		schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
787 					 delay);
788 	}
789 
790 	mutex_unlock(&rdtgroup_mutex);
791 	cpus_read_unlock();
792 }
793 
794 /**
795  * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
796  *                             domain.
797  * @dom:           The domain the limbo handler should run for.
798  * @delay_ms:      How far in the future the handler should run.
799  * @exclude_cpu:   Which CPU the handler should not run on,
800  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
801  */
802 void cqm_setup_limbo_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
803 			     int exclude_cpu)
804 {
805 	unsigned long delay = msecs_to_jiffies(delay_ms);
806 	int cpu;
807 
808 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
809 	dom->cqm_work_cpu = cpu;
810 
811 	if (cpu < nr_cpu_ids)
812 		schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
813 }
814 
815 void mbm_handle_overflow(struct work_struct *work)
816 {
817 	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
818 	struct rdtgroup *prgrp, *crgrp;
819 	struct rdt_l3_mon_domain *d;
820 	struct list_head *head;
821 	struct rdt_resource *r;
822 
823 	cpus_read_lock();
824 	mutex_lock(&rdtgroup_mutex);
825 
826 	/*
827 	 * If the filesystem has been unmounted this work no longer needs to
828 	 * run.
829 	 */
830 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
831 		goto out_unlock;
832 
833 	r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
834 	d = container_of(work, struct rdt_l3_mon_domain, mbm_over.work);
835 
836 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
837 		mbm_update(r, d, prgrp);
838 
839 		head = &prgrp->mon.crdtgrp_list;
840 		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
841 			mbm_update(r, d, crgrp);
842 
843 		if (is_mba_sc(NULL))
844 			update_mba_bw(prgrp, d);
845 	}
846 
847 	/*
848 	 * Re-check for housekeeping CPUs. This allows the overflow handler to
849 	 * move off a nohz_full CPU quickly.
850 	 */
851 	d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
852 						   RESCTRL_PICK_ANY_CPU);
853 	schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
854 
855 out_unlock:
856 	mutex_unlock(&rdtgroup_mutex);
857 	cpus_read_unlock();
858 }
859 
860 /**
861  * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
862  *                                domain.
863  * @dom:           The domain the overflow handler should run for.
864  * @delay_ms:      How far in the future the handler should run.
865  * @exclude_cpu:   Which CPU the handler should not run on,
866  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
867  */
868 void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
869 				int exclude_cpu)
870 {
871 	unsigned long delay = msecs_to_jiffies(delay_ms);
872 	int cpu;
873 
874 	/*
875 	 * When a domain comes online there is no guarantee the filesystem is
876 	 * mounted. If not, there is no need to catch counter overflow.
877 	 */
878 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
879 		return;
880 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
881 	dom->mbm_work_cpu = cpu;
882 
883 	if (cpu < nr_cpu_ids)
884 		schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
885 }
886 
887 static int dom_data_init(struct rdt_resource *r)
888 {
889 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
890 	u32 num_closid = resctrl_arch_get_num_closid(r);
891 	struct rmid_entry *entry = NULL;
892 	int err = 0, i;
893 	u32 idx;
894 
895 	mutex_lock(&rdtgroup_mutex);
896 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
897 		u32 *tmp;
898 
899 		/*
900 		 * If the architecture hasn't provided a sanitised value here,
901 		 * this may result in larger arrays than necessary. Resctrl will
902 		 * use a smaller system wide value based on the resources in
903 		 * use.
904 		 */
905 		tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
906 		if (!tmp) {
907 			err = -ENOMEM;
908 			goto out_unlock;
909 		}
910 
911 		closid_num_dirty_rmid = tmp;
912 	}
913 
914 	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
915 	if (!rmid_ptrs) {
916 		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
917 			kfree(closid_num_dirty_rmid);
918 			closid_num_dirty_rmid = NULL;
919 		}
920 		err = -ENOMEM;
921 		goto out_unlock;
922 	}
923 
924 	for (i = 0; i < idx_limit; i++) {
925 		entry = &rmid_ptrs[i];
926 		INIT_LIST_HEAD(&entry->list);
927 
928 		resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
929 		list_add_tail(&entry->list, &rmid_free_lru);
930 	}
931 
932 	/*
933 	 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
934 	 * are always allocated. These are used for the rdtgroup_default
935 	 * control group, which will be setup later in resctrl_init().
936 	 */
937 	idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
938 					   RESCTRL_RESERVED_RMID);
939 	entry = __rmid_entry(idx);
940 	list_del(&entry->list);
941 
942 out_unlock:
943 	mutex_unlock(&rdtgroup_mutex);
944 
945 	return err;
946 }
947 
948 static void dom_data_exit(struct rdt_resource *r)
949 {
950 	mutex_lock(&rdtgroup_mutex);
951 
952 	if (!r->mon_capable)
953 		goto out_unlock;
954 
955 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
956 		kfree(closid_num_dirty_rmid);
957 		closid_num_dirty_rmid = NULL;
958 	}
959 
960 	kfree(rmid_ptrs);
961 	rmid_ptrs = NULL;
962 
963 out_unlock:
964 	mutex_unlock(&rdtgroup_mutex);
965 }
966 
967 /*
968  * All available events. Architecture code marks the ones that
969  * are supported by a system using resctrl_enable_mon_event()
970  * to set .enabled.
971  */
972 struct mon_evt mon_event_all[QOS_NUM_EVENTS] = {
973 	[QOS_L3_OCCUP_EVENT_ID] = {
974 		.name	= "llc_occupancy",
975 		.evtid	= QOS_L3_OCCUP_EVENT_ID,
976 		.rid	= RDT_RESOURCE_L3,
977 	},
978 	[QOS_L3_MBM_TOTAL_EVENT_ID] = {
979 		.name	= "mbm_total_bytes",
980 		.evtid	= QOS_L3_MBM_TOTAL_EVENT_ID,
981 		.rid	= RDT_RESOURCE_L3,
982 	},
983 	[QOS_L3_MBM_LOCAL_EVENT_ID] = {
984 		.name	= "mbm_local_bytes",
985 		.evtid	= QOS_L3_MBM_LOCAL_EVENT_ID,
986 		.rid	= RDT_RESOURCE_L3,
987 	},
988 };
989 
990 void resctrl_enable_mon_event(enum resctrl_event_id eventid)
991 {
992 	if (WARN_ON_ONCE(eventid < QOS_FIRST_EVENT || eventid >= QOS_NUM_EVENTS))
993 		return;
994 	if (mon_event_all[eventid].enabled) {
995 		pr_warn("Duplicate enable for event %d\n", eventid);
996 		return;
997 	}
998 
999 	mon_event_all[eventid].enabled = true;
1000 }
1001 
1002 bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
1003 {
1004 	return eventid >= QOS_FIRST_EVENT && eventid < QOS_NUM_EVENTS &&
1005 	       mon_event_all[eventid].enabled;
1006 }
1007 
1008 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id evtid)
1009 {
1010 	return mon_event_all[evtid].evt_cfg;
1011 }
1012 
1013 /**
1014  * struct mbm_transaction - Memory transaction an MBM event can be configured with.
1015  * @name:	Name of memory transaction (read, write ...).
1016  * @val:	The bit (eg. READS_TO_LOCAL_MEM or READS_TO_REMOTE_MEM) used to
1017  *		represent the memory transaction within an event's configuration.
1018  */
1019 struct mbm_transaction {
1020 	char	name[32];
1021 	u32	val;
1022 };
1023 
1024 /* Decoded values for each type of memory transaction. */
1025 static struct mbm_transaction mbm_transactions[NUM_MBM_TRANSACTIONS] = {
1026 	{"local_reads", READS_TO_LOCAL_MEM},
1027 	{"remote_reads", READS_TO_REMOTE_MEM},
1028 	{"local_non_temporal_writes", NON_TEMP_WRITE_TO_LOCAL_MEM},
1029 	{"remote_non_temporal_writes", NON_TEMP_WRITE_TO_REMOTE_MEM},
1030 	{"local_reads_slow_memory", READS_TO_LOCAL_S_MEM},
1031 	{"remote_reads_slow_memory", READS_TO_REMOTE_S_MEM},
1032 	{"dirty_victim_writes_all", DIRTY_VICTIMS_TO_ALL_MEM},
1033 };
1034 
1035 int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
1036 {
1037 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1038 	struct rdt_resource *r;
1039 	bool sep = false;
1040 	int ret = 0, i;
1041 
1042 	mutex_lock(&rdtgroup_mutex);
1043 	rdt_last_cmd_clear();
1044 
1045 	r = resctrl_arch_get_resource(mevt->rid);
1046 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1047 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1048 		ret = -EINVAL;
1049 		goto out_unlock;
1050 	}
1051 
1052 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1053 		if (mevt->evt_cfg & mbm_transactions[i].val) {
1054 			if (sep)
1055 				seq_putc(seq, ',');
1056 			seq_printf(seq, "%s", mbm_transactions[i].name);
1057 			sep = true;
1058 		}
1059 	}
1060 	seq_putc(seq, '\n');
1061 
1062 out_unlock:
1063 	mutex_unlock(&rdtgroup_mutex);
1064 
1065 	return ret;
1066 }
1067 
1068 int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of, struct seq_file *s,
1069 				     void *v)
1070 {
1071 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1072 	int ret = 0;
1073 
1074 	mutex_lock(&rdtgroup_mutex);
1075 	rdt_last_cmd_clear();
1076 
1077 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1078 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1079 		ret = -EINVAL;
1080 		goto out_unlock;
1081 	}
1082 
1083 	seq_printf(s, "%u\n", r->mon.mbm_assign_on_mkdir);
1084 
1085 out_unlock:
1086 	mutex_unlock(&rdtgroup_mutex);
1087 
1088 	return ret;
1089 }
1090 
1091 ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
1092 					  size_t nbytes, loff_t off)
1093 {
1094 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1095 	bool value;
1096 	int ret;
1097 
1098 	ret = kstrtobool(buf, &value);
1099 	if (ret)
1100 		return ret;
1101 
1102 	mutex_lock(&rdtgroup_mutex);
1103 	rdt_last_cmd_clear();
1104 
1105 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1106 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1107 		ret = -EINVAL;
1108 		goto out_unlock;
1109 	}
1110 
1111 	r->mon.mbm_assign_on_mkdir = value;
1112 
1113 out_unlock:
1114 	mutex_unlock(&rdtgroup_mutex);
1115 
1116 	return ret ?: nbytes;
1117 }
1118 
1119 /*
1120  * mbm_cntr_free_all() - Clear all the counter ID configuration details in the
1121  *			 domain @d. Called when mbm_assign_mode is changed.
1122  */
1123 static void mbm_cntr_free_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
1124 {
1125 	memset(d->cntr_cfg, 0, sizeof(*d->cntr_cfg) * r->mon.num_mbm_cntrs);
1126 }
1127 
1128 /*
1129  * resctrl_reset_rmid_all() - Reset all non-architecture states for all the
1130  *			      supported RMIDs.
1131  */
1132 static void resctrl_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
1133 {
1134 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
1135 	enum resctrl_event_id evt;
1136 	int idx;
1137 
1138 	for_each_mbm_event_id(evt) {
1139 		if (!resctrl_is_mon_event_enabled(evt))
1140 			continue;
1141 		idx = MBM_STATE_IDX(evt);
1142 		memset(d->mbm_states[idx], 0, sizeof(*d->mbm_states[0]) * idx_limit);
1143 	}
1144 }
1145 
1146 /*
1147  * rdtgroup_assign_cntr() - Assign/unassign the counter ID for the event, RMID
1148  * pair in the domain.
1149  *
1150  * Assign the counter if @assign is true else unassign the counter. Reset the
1151  * associated non-architectural state.
1152  */
1153 static void rdtgroup_assign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1154 				 enum resctrl_event_id evtid, u32 rmid, u32 closid,
1155 				 u32 cntr_id, bool assign)
1156 {
1157 	struct mbm_state *m;
1158 
1159 	resctrl_arch_config_cntr(r, d, evtid, rmid, closid, cntr_id, assign);
1160 
1161 	m = get_mbm_state(d, closid, rmid, evtid);
1162 	if (m)
1163 		memset(m, 0, sizeof(*m));
1164 }
1165 
1166 /*
1167  * rdtgroup_alloc_assign_cntr() - Allocate a counter ID and assign it to the event
1168  * pointed to by @mevt and the resctrl group @rdtgrp within the domain @d.
1169  *
1170  * Return:
1171  * 0 on success, < 0 on failure.
1172  */
1173 static int rdtgroup_alloc_assign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1174 				      struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1175 {
1176 	int cntr_id;
1177 
1178 	/* No action required if the counter is assigned already. */
1179 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1180 	if (cntr_id >= 0)
1181 		return 0;
1182 
1183 	cntr_id = mbm_cntr_alloc(r, d, rdtgrp, mevt->evtid);
1184 	if (cntr_id < 0) {
1185 		rdt_last_cmd_printf("Failed to allocate counter for %s in domain %d\n",
1186 				    mevt->name, d->hdr.id);
1187 		return cntr_id;
1188 	}
1189 
1190 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, true);
1191 
1192 	return 0;
1193 }
1194 
1195 /*
1196  * rdtgroup_assign_cntr_event() - Assign a hardware counter for the event in
1197  * @mevt to the resctrl group @rdtgrp. Assign counters to all domains if @d is
1198  * NULL; otherwise, assign the counter to the specified domain @d.
1199  *
1200  * If all counters in a domain are already in use, rdtgroup_alloc_assign_cntr()
1201  * will fail. The assignment process will abort at the first failure encountered
1202  * during domain traversal, which may result in the event being only partially
1203  * assigned.
1204  *
1205  * Return:
1206  * 0 on success, < 0 on failure.
1207  */
1208 static int rdtgroup_assign_cntr_event(struct rdt_l3_mon_domain *d, struct rdtgroup *rdtgrp,
1209 				      struct mon_evt *mevt)
1210 {
1211 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1212 	int ret = 0;
1213 
1214 	if (!d) {
1215 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1216 			ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1217 			if (ret)
1218 				return ret;
1219 		}
1220 	} else {
1221 		ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1222 	}
1223 
1224 	return ret;
1225 }
1226 
1227 /*
1228  * rdtgroup_assign_cntrs() - Assign counters to MBM events. Called when
1229  *			     a new group is created.
1230  *
1231  * Each group can accommodate two counters per domain: one for the total
1232  * event and one for the local event. Assignments may fail due to the limited
1233  * number of counters. However, it is not necessary to fail the group creation
1234  * and thus no failure is returned. Users have the option to modify the
1235  * counter assignments after the group has been created.
1236  */
1237 void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
1238 {
1239 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1240 
1241 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r) ||
1242 	    !r->mon.mbm_assign_on_mkdir)
1243 		return;
1244 
1245 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1246 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1247 					   &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1248 
1249 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1250 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1251 					   &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1252 }
1253 
1254 /*
1255  * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
1256  * for the event pointed to by @mevt within the domain @d and resctrl group @rdtgrp.
1257  */
1258 static void rdtgroup_free_unassign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1259 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1260 {
1261 	int cntr_id;
1262 
1263 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1264 
1265 	/* If there is no cntr_id assigned, nothing to do */
1266 	if (cntr_id < 0)
1267 		return;
1268 
1269 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false);
1270 
1271 	mbm_cntr_free(d, cntr_id);
1272 }
1273 
1274 /*
1275  * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
1276  * the event structure @mevt from the domain @d and the group @rdtgrp. Unassign
1277  * the counters from all the domains if @d is NULL else unassign from @d.
1278  */
1279 static void rdtgroup_unassign_cntr_event(struct rdt_l3_mon_domain *d, struct rdtgroup *rdtgrp,
1280 					 struct mon_evt *mevt)
1281 {
1282 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1283 
1284 	if (!d) {
1285 		list_for_each_entry(d, &r->mon_domains, hdr.list)
1286 			rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1287 	} else {
1288 		rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1289 	}
1290 }
1291 
1292 /*
1293  * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
1294  *			       Called when a group is deleted.
1295  */
1296 void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
1297 {
1298 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1299 
1300 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
1301 		return;
1302 
1303 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1304 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1305 					     &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1306 
1307 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1308 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1309 					     &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1310 }
1311 
1312 static int resctrl_parse_mem_transactions(char *tok, u32 *val)
1313 {
1314 	u32 temp_val = 0;
1315 	char *evt_str;
1316 	bool found;
1317 	int i;
1318 
1319 next_config:
1320 	if (!tok || tok[0] == '\0') {
1321 		*val = temp_val;
1322 		return 0;
1323 	}
1324 
1325 	/* Start processing the strings for each memory transaction type */
1326 	evt_str = strim(strsep(&tok, ","));
1327 	found = false;
1328 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1329 		if (!strcmp(mbm_transactions[i].name, evt_str)) {
1330 			temp_val |= mbm_transactions[i].val;
1331 			found = true;
1332 			break;
1333 		}
1334 	}
1335 
1336 	if (!found) {
1337 		rdt_last_cmd_printf("Invalid memory transaction type %s\n", evt_str);
1338 		return -EINVAL;
1339 	}
1340 
1341 	goto next_config;
1342 }
1343 
1344 /*
1345  * rdtgroup_update_cntr_event - Update the counter assignments for the event
1346  *				in a group.
1347  * @r:		Resource to which update needs to be done.
1348  * @rdtgrp:	Resctrl group.
1349  * @evtid:	MBM monitor event.
1350  */
1351 static void rdtgroup_update_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1352 				       enum resctrl_event_id evtid)
1353 {
1354 	struct rdt_l3_mon_domain *d;
1355 	int cntr_id;
1356 
1357 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1358 		cntr_id = mbm_cntr_get(r, d, rdtgrp, evtid);
1359 		if (cntr_id >= 0)
1360 			rdtgroup_assign_cntr(r, d, evtid, rdtgrp->mon.rmid,
1361 					     rdtgrp->closid, cntr_id, true);
1362 	}
1363 }
1364 
1365 /*
1366  * resctrl_update_cntr_allrdtgrp - Update the counter assignments for the event
1367  *				   for all the groups.
1368  * @mevt	MBM Monitor event.
1369  */
1370 static void resctrl_update_cntr_allrdtgrp(struct mon_evt *mevt)
1371 {
1372 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1373 	struct rdtgroup *prgrp, *crgrp;
1374 
1375 	/*
1376 	 * Find all the groups where the event is assigned and update the
1377 	 * configuration of existing assignments.
1378 	 */
1379 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1380 		rdtgroup_update_cntr_event(r, prgrp, mevt->evtid);
1381 
1382 		list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
1383 			rdtgroup_update_cntr_event(r, crgrp, mevt->evtid);
1384 	}
1385 }
1386 
1387 ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
1388 			   loff_t off)
1389 {
1390 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1391 	struct rdt_resource *r;
1392 	u32 evt_cfg = 0;
1393 	int ret = 0;
1394 
1395 	/* Valid input requires a trailing newline */
1396 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1397 		return -EINVAL;
1398 
1399 	buf[nbytes - 1] = '\0';
1400 
1401 	cpus_read_lock();
1402 	mutex_lock(&rdtgroup_mutex);
1403 
1404 	rdt_last_cmd_clear();
1405 
1406 	r = resctrl_arch_get_resource(mevt->rid);
1407 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1408 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1409 		ret = -EINVAL;
1410 		goto out_unlock;
1411 	}
1412 
1413 	ret = resctrl_parse_mem_transactions(buf, &evt_cfg);
1414 	if (!ret && mevt->evt_cfg != evt_cfg) {
1415 		mevt->evt_cfg = evt_cfg;
1416 		resctrl_update_cntr_allrdtgrp(mevt);
1417 	}
1418 
1419 out_unlock:
1420 	mutex_unlock(&rdtgroup_mutex);
1421 	cpus_read_unlock();
1422 
1423 	return ret ?: nbytes;
1424 }
1425 
1426 int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of,
1427 				 struct seq_file *s, void *v)
1428 {
1429 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1430 	bool enabled;
1431 
1432 	mutex_lock(&rdtgroup_mutex);
1433 	enabled = resctrl_arch_mbm_cntr_assign_enabled(r);
1434 
1435 	if (r->mon.mbm_cntr_assignable) {
1436 		if (enabled)
1437 			seq_puts(s, "[mbm_event]\n");
1438 		else
1439 			seq_puts(s, "[default]\n");
1440 
1441 		if (!IS_ENABLED(CONFIG_RESCTRL_ASSIGN_FIXED)) {
1442 			if (enabled)
1443 				seq_puts(s, "default\n");
1444 			else
1445 				seq_puts(s, "mbm_event\n");
1446 		}
1447 	} else {
1448 		seq_puts(s, "[default]\n");
1449 	}
1450 
1451 	mutex_unlock(&rdtgroup_mutex);
1452 
1453 	return 0;
1454 }
1455 
1456 ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
1457 				      size_t nbytes, loff_t off)
1458 {
1459 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1460 	struct rdt_l3_mon_domain *d;
1461 	int ret = 0;
1462 	bool enable;
1463 
1464 	/* Valid input requires a trailing newline */
1465 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1466 		return -EINVAL;
1467 
1468 	buf[nbytes - 1] = '\0';
1469 
1470 	cpus_read_lock();
1471 	mutex_lock(&rdtgroup_mutex);
1472 
1473 	rdt_last_cmd_clear();
1474 
1475 	if (!strcmp(buf, "default")) {
1476 		enable = 0;
1477 	} else if (!strcmp(buf, "mbm_event")) {
1478 		if (r->mon.mbm_cntr_assignable) {
1479 			enable = 1;
1480 		} else {
1481 			ret = -EINVAL;
1482 			rdt_last_cmd_puts("mbm_event mode is not supported\n");
1483 			goto out_unlock;
1484 		}
1485 	} else {
1486 		ret = -EINVAL;
1487 		rdt_last_cmd_puts("Unsupported assign mode\n");
1488 		goto out_unlock;
1489 	}
1490 
1491 	if (enable != resctrl_arch_mbm_cntr_assign_enabled(r)) {
1492 		ret = resctrl_arch_mbm_cntr_assign_set(r, enable);
1493 		if (ret)
1494 			goto out_unlock;
1495 
1496 		/* Update the visibility of BMEC related files */
1497 		resctrl_bmec_files_show(r, NULL, !enable);
1498 
1499 		/*
1500 		 * Initialize the default memory transaction values for
1501 		 * total and local events.
1502 		 */
1503 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1504 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1505 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1506 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1507 									   (READS_TO_LOCAL_MEM |
1508 									    READS_TO_LOCAL_S_MEM |
1509 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1510 		/* Enable auto assignment when switching to "mbm_event" mode */
1511 		if (enable)
1512 			r->mon.mbm_assign_on_mkdir = true;
1513 		/*
1514 		 * Reset all the non-achitectural RMID state and assignable counters.
1515 		 */
1516 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1517 			mbm_cntr_free_all(r, d);
1518 			resctrl_reset_rmid_all(r, d);
1519 		}
1520 	}
1521 
1522 out_unlock:
1523 	mutex_unlock(&rdtgroup_mutex);
1524 	cpus_read_unlock();
1525 
1526 	return ret ?: nbytes;
1527 }
1528 
1529 int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of,
1530 			       struct seq_file *s, void *v)
1531 {
1532 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1533 	struct rdt_l3_mon_domain *dom;
1534 	bool sep = false;
1535 
1536 	cpus_read_lock();
1537 	mutex_lock(&rdtgroup_mutex);
1538 
1539 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1540 		if (sep)
1541 			seq_putc(s, ';');
1542 
1543 		seq_printf(s, "%d=%d", dom->hdr.id, r->mon.num_mbm_cntrs);
1544 		sep = true;
1545 	}
1546 	seq_putc(s, '\n');
1547 
1548 	mutex_unlock(&rdtgroup_mutex);
1549 	cpus_read_unlock();
1550 	return 0;
1551 }
1552 
1553 int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of,
1554 				     struct seq_file *s, void *v)
1555 {
1556 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1557 	struct rdt_l3_mon_domain *dom;
1558 	bool sep = false;
1559 	u32 cntrs, i;
1560 	int ret = 0;
1561 
1562 	cpus_read_lock();
1563 	mutex_lock(&rdtgroup_mutex);
1564 
1565 	rdt_last_cmd_clear();
1566 
1567 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1568 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1569 		ret = -EINVAL;
1570 		goto out_unlock;
1571 	}
1572 
1573 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1574 		if (sep)
1575 			seq_putc(s, ';');
1576 
1577 		cntrs = 0;
1578 		for (i = 0; i < r->mon.num_mbm_cntrs; i++) {
1579 			if (!dom->cntr_cfg[i].rdtgrp)
1580 				cntrs++;
1581 		}
1582 
1583 		seq_printf(s, "%d=%u", dom->hdr.id, cntrs);
1584 		sep = true;
1585 	}
1586 	seq_putc(s, '\n');
1587 
1588 out_unlock:
1589 	mutex_unlock(&rdtgroup_mutex);
1590 	cpus_read_unlock();
1591 
1592 	return ret;
1593 }
1594 
1595 int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v)
1596 {
1597 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1598 	struct rdt_l3_mon_domain *d;
1599 	struct rdtgroup *rdtgrp;
1600 	struct mon_evt *mevt;
1601 	int ret = 0;
1602 	bool sep;
1603 
1604 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1605 	if (!rdtgrp) {
1606 		ret = -ENOENT;
1607 		goto out_unlock;
1608 	}
1609 
1610 	rdt_last_cmd_clear();
1611 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1612 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1613 		ret = -EINVAL;
1614 		goto out_unlock;
1615 	}
1616 
1617 	for_each_mon_event(mevt) {
1618 		if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
1619 			continue;
1620 
1621 		sep = false;
1622 		seq_printf(s, "%s:", mevt->name);
1623 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1624 			if (sep)
1625 				seq_putc(s, ';');
1626 
1627 			if (mbm_cntr_get(r, d, rdtgrp, mevt->evtid) < 0)
1628 				seq_printf(s, "%d=_", d->hdr.id);
1629 			else
1630 				seq_printf(s, "%d=e", d->hdr.id);
1631 
1632 			sep = true;
1633 		}
1634 		seq_putc(s, '\n');
1635 	}
1636 
1637 out_unlock:
1638 	rdtgroup_kn_unlock(of->kn);
1639 
1640 	return ret;
1641 }
1642 
1643 /*
1644  * mbm_get_mon_event_by_name() - Return the mon_evt entry for the matching
1645  * event name.
1646  */
1647 static struct mon_evt *mbm_get_mon_event_by_name(struct rdt_resource *r, char *name)
1648 {
1649 	struct mon_evt *mevt;
1650 
1651 	for_each_mon_event(mevt) {
1652 		if (mevt->rid == r->rid && mevt->enabled &&
1653 		    resctrl_is_mbm_event(mevt->evtid) &&
1654 		    !strcmp(mevt->name, name))
1655 			return mevt;
1656 	}
1657 
1658 	return NULL;
1659 }
1660 
1661 static int rdtgroup_modify_assign_state(char *assign, struct rdt_l3_mon_domain *d,
1662 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1663 {
1664 	int ret = 0;
1665 
1666 	if (!assign || strlen(assign) != 1)
1667 		return -EINVAL;
1668 
1669 	switch (*assign) {
1670 	case 'e':
1671 		ret = rdtgroup_assign_cntr_event(d, rdtgrp, mevt);
1672 		break;
1673 	case '_':
1674 		rdtgroup_unassign_cntr_event(d, rdtgrp, mevt);
1675 		break;
1676 	default:
1677 		ret = -EINVAL;
1678 		break;
1679 	}
1680 
1681 	return ret;
1682 }
1683 
1684 static int resctrl_parse_mbm_assignment(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1685 					char *event, char *tok)
1686 {
1687 	struct rdt_l3_mon_domain *d;
1688 	unsigned long dom_id = 0;
1689 	char *dom_str, *id_str;
1690 	struct mon_evt *mevt;
1691 	int ret;
1692 
1693 	mevt = mbm_get_mon_event_by_name(r, event);
1694 	if (!mevt) {
1695 		rdt_last_cmd_printf("Invalid event %s\n", event);
1696 		return -ENOENT;
1697 	}
1698 
1699 next:
1700 	if (!tok || tok[0] == '\0')
1701 		return 0;
1702 
1703 	/* Start processing the strings for each domain */
1704 	dom_str = strim(strsep(&tok, ";"));
1705 
1706 	id_str = strsep(&dom_str, "=");
1707 
1708 	/* Check for domain id '*' which means all domains */
1709 	if (id_str && *id_str == '*') {
1710 		ret = rdtgroup_modify_assign_state(dom_str, NULL, rdtgrp, mevt);
1711 		if (ret)
1712 			rdt_last_cmd_printf("Assign operation '%s:*=%s' failed\n",
1713 					    event, dom_str);
1714 		return ret;
1715 	} else if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1716 		rdt_last_cmd_puts("Missing domain id\n");
1717 		return -EINVAL;
1718 	}
1719 
1720 	/* Verify if the dom_id is valid */
1721 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1722 		if (d->hdr.id == dom_id) {
1723 			ret = rdtgroup_modify_assign_state(dom_str, d, rdtgrp, mevt);
1724 			if (ret) {
1725 				rdt_last_cmd_printf("Assign operation '%s:%ld=%s' failed\n",
1726 						    event, dom_id, dom_str);
1727 				return ret;
1728 			}
1729 			goto next;
1730 		}
1731 	}
1732 
1733 	rdt_last_cmd_printf("Invalid domain id %ld\n", dom_id);
1734 	return -EINVAL;
1735 }
1736 
1737 ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
1738 				 size_t nbytes, loff_t off)
1739 {
1740 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1741 	struct rdtgroup *rdtgrp;
1742 	char *token, *event;
1743 	int ret = 0;
1744 
1745 	/* Valid input requires a trailing newline */
1746 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1747 		return -EINVAL;
1748 
1749 	buf[nbytes - 1] = '\0';
1750 
1751 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1752 	if (!rdtgrp) {
1753 		rdtgroup_kn_unlock(of->kn);
1754 		return -ENOENT;
1755 	}
1756 	rdt_last_cmd_clear();
1757 
1758 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1759 		rdt_last_cmd_puts("mbm_event mode is not enabled\n");
1760 		rdtgroup_kn_unlock(of->kn);
1761 		return -EINVAL;
1762 	}
1763 
1764 	while ((token = strsep(&buf, "\n")) != NULL) {
1765 		/*
1766 		 * The write command follows the following format:
1767 		 * "<Event>:<Domain ID>=<Assignment state>"
1768 		 * Extract the event name first.
1769 		 */
1770 		event = strsep(&token, ":");
1771 
1772 		ret = resctrl_parse_mbm_assignment(r, rdtgrp, event, token);
1773 		if (ret)
1774 			break;
1775 	}
1776 
1777 	rdtgroup_kn_unlock(of->kn);
1778 
1779 	return ret ?: nbytes;
1780 }
1781 
1782 /**
1783  * resctrl_mon_resource_init() - Initialise global monitoring structures.
1784  *
1785  * Allocate and initialise global monitor resources that do not belong to a
1786  * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
1787  * Called once during boot after the struct rdt_resource's have been configured
1788  * but before the filesystem is mounted.
1789  * Resctrl's cpuhp callbacks may be called before this point to bring a domain
1790  * online.
1791  *
1792  * Returns 0 for success, or -ENOMEM.
1793  */
1794 int resctrl_mon_resource_init(void)
1795 {
1796 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1797 	int ret;
1798 
1799 	if (!r->mon_capable)
1800 		return 0;
1801 
1802 	ret = dom_data_init(r);
1803 	if (ret)
1804 		return ret;
1805 
1806 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
1807 		mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].configurable = true;
1808 		resctrl_file_fflags_init("mbm_total_bytes_config",
1809 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1810 	}
1811 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
1812 		mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].configurable = true;
1813 		resctrl_file_fflags_init("mbm_local_bytes_config",
1814 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1815 	}
1816 
1817 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1818 		mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
1819 	else if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1820 		mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
1821 
1822 	if (r->mon.mbm_cntr_assignable) {
1823 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1824 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1825 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1826 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1827 									   (READS_TO_LOCAL_MEM |
1828 									    READS_TO_LOCAL_S_MEM |
1829 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1830 		r->mon.mbm_assign_on_mkdir = true;
1831 		resctrl_file_fflags_init("num_mbm_cntrs",
1832 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1833 		resctrl_file_fflags_init("available_mbm_cntrs",
1834 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1835 		resctrl_file_fflags_init("event_filter", RFTYPE_ASSIGN_CONFIG);
1836 		resctrl_file_fflags_init("mbm_assign_on_mkdir", RFTYPE_MON_INFO |
1837 					 RFTYPE_RES_CACHE);
1838 		resctrl_file_fflags_init("mbm_L3_assignments", RFTYPE_MON_BASE);
1839 	}
1840 
1841 	return 0;
1842 }
1843 
1844 void resctrl_mon_resource_exit(void)
1845 {
1846 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1847 
1848 	dom_data_exit(r);
1849 }
1850