xref: /linux/fs/resctrl/monitor.c (revision ab0308aee3819a3eccde42f9eb5bb01d6733be38)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Monitoring code
5  *
6  * Copyright (C) 2017 Intel Corporation
7  *
8  * Author:
9  *    Vikas Shivappa <vikas.shivappa@intel.com>
10  *
11  * This replaces the cqm.c based on perf but we reuse a lot of
12  * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13  *
14  * More information about RDT be found in the Intel (R) x86 Architecture
15  * Software Developer Manual June 2016, volume 3, section 17.17.
16  */
17 
18 #define pr_fmt(fmt)	"resctrl: " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/resctrl.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 
25 #include "internal.h"
26 
27 #define CREATE_TRACE_POINTS
28 
29 #include "monitor_trace.h"
30 
31 /**
32  * struct rmid_entry - dirty tracking for all RMID.
33  * @closid:	The CLOSID for this entry.
34  * @rmid:	The RMID for this entry.
35  * @busy:	The number of domains with cached data using this RMID.
36  * @list:	Member of the rmid_free_lru list when busy == 0.
37  *
38  * Depending on the architecture the correct monitor is accessed using
39  * both @closid and @rmid, or @rmid only.
40  *
41  * Take the rdtgroup_mutex when accessing.
42  */
43 struct rmid_entry {
44 	u32				closid;
45 	u32				rmid;
46 	int				busy;
47 	struct list_head		list;
48 };
49 
50 /*
51  * @rmid_free_lru - A least recently used list of free RMIDs
52  *     These RMIDs are guaranteed to have an occupancy less than the
53  *     threshold occupancy
54  */
55 static LIST_HEAD(rmid_free_lru);
56 
57 /*
58  * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
59  *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
60  *     Indexed by CLOSID. Protected by rdtgroup_mutex.
61  */
62 static u32 *closid_num_dirty_rmid;
63 
64 /*
65  * @rmid_limbo_count - count of currently unused but (potentially)
66  *     dirty RMIDs.
67  *     This counts RMIDs that no one is currently using but that
68  *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
69  *     change the threshold occupancy value.
70  */
71 static unsigned int rmid_limbo_count;
72 
73 /*
74  * @rmid_entry - The entry in the limbo and free lists.
75  */
76 static struct rmid_entry	*rmid_ptrs;
77 
78 /*
79  * This is the threshold cache occupancy in bytes at which we will consider an
80  * RMID available for re-allocation.
81  */
82 unsigned int resctrl_rmid_realloc_threshold;
83 
84 /*
85  * This is the maximum value for the reallocation threshold, in bytes.
86  */
87 unsigned int resctrl_rmid_realloc_limit;
88 
89 /*
90  * x86 and arm64 differ in their handling of monitoring.
91  * x86's RMID are independent numbers, there is only one source of traffic
92  * with an RMID value of '1'.
93  * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
94  * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
95  * value is no longer unique.
96  * To account for this, resctrl uses an index. On x86 this is just the RMID,
97  * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
98  *
99  * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
100  * must accept an attempt to read every index.
101  */
102 static inline struct rmid_entry *__rmid_entry(u32 idx)
103 {
104 	struct rmid_entry *entry;
105 	u32 closid, rmid;
106 
107 	entry = &rmid_ptrs[idx];
108 	resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
109 
110 	WARN_ON_ONCE(entry->closid != closid);
111 	WARN_ON_ONCE(entry->rmid != rmid);
112 
113 	return entry;
114 }
115 
116 static void limbo_release_entry(struct rmid_entry *entry)
117 {
118 	lockdep_assert_held(&rdtgroup_mutex);
119 
120 	rmid_limbo_count--;
121 	list_add_tail(&entry->list, &rmid_free_lru);
122 
123 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
124 		closid_num_dirty_rmid[entry->closid]--;
125 }
126 
127 /*
128  * Check the RMIDs that are marked as busy for this domain. If the
129  * reported LLC occupancy is below the threshold clear the busy bit and
130  * decrement the count. If the busy count gets to zero on an RMID, we
131  * free the RMID
132  */
133 void __check_limbo(struct rdt_l3_mon_domain *d, bool force_free)
134 {
135 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
136 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
137 	struct rmid_entry *entry;
138 	u32 idx, cur_idx = 1;
139 	void *arch_mon_ctx;
140 	bool rmid_dirty;
141 	u64 val = 0;
142 
143 	arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
144 	if (IS_ERR(arch_mon_ctx)) {
145 		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
146 				    PTR_ERR(arch_mon_ctx));
147 		return;
148 	}
149 
150 	/*
151 	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
152 	 * are marked as busy for occupancy < threshold. If the occupancy
153 	 * is less than the threshold decrement the busy counter of the
154 	 * RMID and move it to the free list when the counter reaches 0.
155 	 */
156 	for (;;) {
157 		idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
158 		if (idx >= idx_limit)
159 			break;
160 
161 		entry = __rmid_entry(idx);
162 		if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
163 					   QOS_L3_OCCUP_EVENT_ID, &val,
164 					   arch_mon_ctx)) {
165 			rmid_dirty = true;
166 		} else {
167 			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
168 
169 			/*
170 			 * x86's CLOSID and RMID are independent numbers, so the entry's
171 			 * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
172 			 * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
173 			 * used to select the configuration. It is thus necessary to track both
174 			 * CLOSID and RMID because there may be dependencies between them
175 			 * on some architectures.
176 			 */
177 			trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
178 		}
179 
180 		if (force_free || !rmid_dirty) {
181 			clear_bit(idx, d->rmid_busy_llc);
182 			if (!--entry->busy)
183 				limbo_release_entry(entry);
184 		}
185 		cur_idx = idx + 1;
186 	}
187 
188 	resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
189 }
190 
191 bool has_busy_rmid(struct rdt_l3_mon_domain *d)
192 {
193 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
194 
195 	return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
196 }
197 
198 static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
199 {
200 	struct rmid_entry *itr;
201 	u32 itr_idx, cmp_idx;
202 
203 	if (list_empty(&rmid_free_lru))
204 		return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
205 
206 	list_for_each_entry(itr, &rmid_free_lru, list) {
207 		/*
208 		 * Get the index of this free RMID, and the index it would need
209 		 * to be if it were used with this CLOSID.
210 		 * If the CLOSID is irrelevant on this architecture, the two
211 		 * index values are always the same on every entry and thus the
212 		 * very first entry will be returned.
213 		 */
214 		itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
215 		cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
216 
217 		if (itr_idx == cmp_idx)
218 			return itr;
219 	}
220 
221 	return ERR_PTR(-ENOSPC);
222 }
223 
224 /**
225  * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
226  *                                  RMID are clean, or the CLOSID that has
227  *                                  the most clean RMID.
228  *
229  * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
230  * may not be able to allocate clean RMID. To avoid this the allocator will
231  * choose the CLOSID with the most clean RMID.
232  *
233  * When the CLOSID and RMID are independent numbers, the first free CLOSID will
234  * be returned.
235  */
236 int resctrl_find_cleanest_closid(void)
237 {
238 	u32 cleanest_closid = ~0;
239 	int i = 0;
240 
241 	lockdep_assert_held(&rdtgroup_mutex);
242 
243 	if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
244 		return -EIO;
245 
246 	for (i = 0; i < closids_supported(); i++) {
247 		int num_dirty;
248 
249 		if (closid_allocated(i))
250 			continue;
251 
252 		num_dirty = closid_num_dirty_rmid[i];
253 		if (num_dirty == 0)
254 			return i;
255 
256 		if (cleanest_closid == ~0)
257 			cleanest_closid = i;
258 
259 		if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
260 			cleanest_closid = i;
261 	}
262 
263 	if (cleanest_closid == ~0)
264 		return -ENOSPC;
265 
266 	return cleanest_closid;
267 }
268 
269 /*
270  * For MPAM the RMID value is not unique, and has to be considered with
271  * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
272  * allows all domains to be managed by a single free list.
273  * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
274  */
275 int alloc_rmid(u32 closid)
276 {
277 	struct rmid_entry *entry;
278 
279 	lockdep_assert_held(&rdtgroup_mutex);
280 
281 	entry = resctrl_find_free_rmid(closid);
282 	if (IS_ERR(entry))
283 		return PTR_ERR(entry);
284 
285 	list_del(&entry->list);
286 	return entry->rmid;
287 }
288 
289 static void add_rmid_to_limbo(struct rmid_entry *entry)
290 {
291 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
292 	struct rdt_l3_mon_domain *d;
293 	u32 idx;
294 
295 	lockdep_assert_held(&rdtgroup_mutex);
296 
297 	/* Walking r->domains, ensure it can't race with cpuhp */
298 	lockdep_assert_cpus_held();
299 
300 	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
301 
302 	entry->busy = 0;
303 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
304 		/*
305 		 * For the first limbo RMID in the domain,
306 		 * setup up the limbo worker.
307 		 */
308 		if (!has_busy_rmid(d))
309 			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
310 						RESCTRL_PICK_ANY_CPU);
311 		set_bit(idx, d->rmid_busy_llc);
312 		entry->busy++;
313 	}
314 
315 	rmid_limbo_count++;
316 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
317 		closid_num_dirty_rmid[entry->closid]++;
318 }
319 
320 void free_rmid(u32 closid, u32 rmid)
321 {
322 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
323 	struct rmid_entry *entry;
324 
325 	lockdep_assert_held(&rdtgroup_mutex);
326 
327 	/*
328 	 * Do not allow the default rmid to be free'd. Comparing by index
329 	 * allows architectures that ignore the closid parameter to avoid an
330 	 * unnecessary check.
331 	 */
332 	if (!resctrl_arch_mon_capable() ||
333 	    idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
334 						RESCTRL_RESERVED_RMID))
335 		return;
336 
337 	entry = __rmid_entry(idx);
338 
339 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
340 		add_rmid_to_limbo(entry);
341 	else
342 		list_add_tail(&entry->list, &rmid_free_lru);
343 }
344 
345 static struct mbm_state *get_mbm_state(struct rdt_l3_mon_domain *d, u32 closid,
346 				       u32 rmid, enum resctrl_event_id evtid)
347 {
348 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
349 	struct mbm_state *state;
350 
351 	if (!resctrl_is_mbm_event(evtid))
352 		return NULL;
353 
354 	state = d->mbm_states[MBM_STATE_IDX(evtid)];
355 
356 	return state ? &state[idx] : NULL;
357 }
358 
359 /*
360  * mbm_cntr_get() - Return the counter ID for the matching @evtid and @rdtgrp.
361  *
362  * Return:
363  * Valid counter ID on success, or -ENOENT on failure.
364  */
365 static int mbm_cntr_get(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
366 			struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
367 {
368 	int cntr_id;
369 
370 	if (!r->mon.mbm_cntr_assignable)
371 		return -ENOENT;
372 
373 	if (!resctrl_is_mbm_event(evtid))
374 		return -ENOENT;
375 
376 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
377 		if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp &&
378 		    d->cntr_cfg[cntr_id].evtid == evtid)
379 			return cntr_id;
380 	}
381 
382 	return -ENOENT;
383 }
384 
385 /*
386  * mbm_cntr_alloc() - Initialize and return a new counter ID in the domain @d.
387  * Caller must ensure that the specified event is not assigned already.
388  *
389  * Return:
390  * Valid counter ID on success, or -ENOSPC on failure.
391  */
392 static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
393 			  struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
394 {
395 	int cntr_id;
396 
397 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
398 		if (!d->cntr_cfg[cntr_id].rdtgrp) {
399 			d->cntr_cfg[cntr_id].rdtgrp = rdtgrp;
400 			d->cntr_cfg[cntr_id].evtid = evtid;
401 			return cntr_id;
402 		}
403 	}
404 
405 	return -ENOSPC;
406 }
407 
408 /*
409  * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
410  */
411 static void mbm_cntr_free(struct rdt_l3_mon_domain *d, int cntr_id)
412 {
413 	memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
414 }
415 
416 static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
417 {
418 	int cpu = smp_processor_id();
419 	u32 closid = rdtgrp->closid;
420 	u32 rmid = rdtgrp->mon.rmid;
421 	struct rdt_l3_mon_domain *d;
422 	int cntr_id = -ENOENT;
423 	struct mbm_state *m;
424 	u64 tval = 0;
425 
426 	if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) {
427 		rr->err = -EIO;
428 		return -EINVAL;
429 	}
430 	d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
431 
432 	if (rr->is_mbm_cntr) {
433 		cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evt->evtid);
434 		if (cntr_id < 0) {
435 			rr->err = -ENOENT;
436 			return -EINVAL;
437 		}
438 	}
439 
440 	if (rr->first) {
441 		if (rr->is_mbm_cntr)
442 			resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evt->evtid);
443 		else
444 			resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evt->evtid);
445 		m = get_mbm_state(d, closid, rmid, rr->evt->evtid);
446 		if (m)
447 			memset(m, 0, sizeof(struct mbm_state));
448 		return 0;
449 	}
450 
451 	/* Reading a single domain, must be on a CPU in that domain. */
452 	if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
453 		return -EINVAL;
454 	if (rr->is_mbm_cntr)
455 		rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
456 						 rr->evt->evtid, &tval);
457 	else
458 		rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
459 						 rr->evt->evtid, &tval, rr->arch_mon_ctx);
460 	if (rr->err)
461 		return rr->err;
462 
463 	rr->val += tval;
464 
465 	return 0;
466 }
467 
468 static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
469 {
470 	int cpu = smp_processor_id();
471 	u32 closid = rdtgrp->closid;
472 	u32 rmid = rdtgrp->mon.rmid;
473 	struct rdt_l3_mon_domain *d;
474 	u64 tval = 0;
475 	int err, ret;
476 
477 	/*
478 	 * Summing across domains is only done for systems that implement
479 	 * Sub-NUMA Cluster. There is no overlap with systems that support
480 	 * assignable counters.
481 	 */
482 	if (rr->is_mbm_cntr) {
483 		pr_warn_once("Summing domains using assignable counters is not supported\n");
484 		rr->err = -EINVAL;
485 		return -EINVAL;
486 	}
487 
488 	/* Summing domains that share a cache, must be on a CPU for that cache. */
489 	if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
490 		return -EINVAL;
491 
492 	/*
493 	 * Legacy files must report the sum of an event across all
494 	 * domains that share the same L3 cache instance.
495 	 * Report success if a read from any domain succeeds, -EINVAL
496 	 * (translated to "Unavailable" for user space) if reading from
497 	 * all domains fail for any reason.
498 	 */
499 	ret = -EINVAL;
500 	list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
501 		if (d->ci_id != rr->ci->id)
502 			continue;
503 		err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
504 					     rr->evt->evtid, &tval, rr->arch_mon_ctx);
505 		if (!err) {
506 			rr->val += tval;
507 			ret = 0;
508 		}
509 	}
510 
511 	if (ret)
512 		rr->err = ret;
513 
514 	return ret;
515 }
516 
517 static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
518 {
519 	switch (rr->r->rid) {
520 	case RDT_RESOURCE_L3:
521 		WARN_ON_ONCE(rr->evt->any_cpu);
522 		if (rr->hdr)
523 			return __l3_mon_event_count(rdtgrp, rr);
524 		else
525 			return __l3_mon_event_count_sum(rdtgrp, rr);
526 	default:
527 		rr->err = -EINVAL;
528 		return -EINVAL;
529 	}
530 }
531 
532 /*
533  * mbm_bw_count() - Update bw count from values previously read by
534  *		    __mon_event_count().
535  * @rdtgrp:	resctrl group associated with the CLOSID and RMID to identify
536  *		the cached mbm_state.
537  * @rr:		The struct rmid_read populated by __mon_event_count().
538  *
539  * Supporting function to calculate the memory bandwidth
540  * and delta bandwidth in MBps. The chunks value previously read by
541  * __mon_event_count() is compared with the chunks value from the previous
542  * invocation. This must be called once per second to maintain values in MBps.
543  */
544 static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
545 {
546 	u64 cur_bw, bytes, cur_bytes;
547 	u32 closid = rdtgrp->closid;
548 	u32 rmid = rdtgrp->mon.rmid;
549 	struct rdt_l3_mon_domain *d;
550 	struct mbm_state *m;
551 
552 	if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
553 		return;
554 	d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
555 	m = get_mbm_state(d, closid, rmid, rr->evt->evtid);
556 	if (WARN_ON_ONCE(!m))
557 		return;
558 
559 	cur_bytes = rr->val;
560 	bytes = cur_bytes - m->prev_bw_bytes;
561 	m->prev_bw_bytes = cur_bytes;
562 
563 	cur_bw = bytes / SZ_1M;
564 
565 	m->prev_bw = cur_bw;
566 }
567 
568 /*
569  * This is scheduled by mon_event_read() to read the CQM/MBM counters
570  * on a domain.
571  */
572 void mon_event_count(void *info)
573 {
574 	struct rdtgroup *rdtgrp, *entry;
575 	struct rmid_read *rr = info;
576 	struct list_head *head;
577 	int ret;
578 
579 	rdtgrp = rr->rgrp;
580 
581 	ret = __mon_event_count(rdtgrp, rr);
582 
583 	/*
584 	 * For Ctrl groups read data from child monitor groups and
585 	 * add them together. Count events which are read successfully.
586 	 * Discard the rmid_read's reporting errors.
587 	 */
588 	head = &rdtgrp->mon.crdtgrp_list;
589 
590 	if (rdtgrp->type == RDTCTRL_GROUP) {
591 		list_for_each_entry(entry, head, mon.crdtgrp_list) {
592 			if (__mon_event_count(entry, rr) == 0)
593 				ret = 0;
594 		}
595 	}
596 
597 	/*
598 	 * __mon_event_count() calls for newly created monitor groups may
599 	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
600 	 * Discard error if any of the monitor event reads succeeded.
601 	 */
602 	if (ret == 0)
603 		rr->err = 0;
604 }
605 
606 static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
607 							struct rdt_resource *r)
608 {
609 	struct rdt_ctrl_domain *d;
610 
611 	lockdep_assert_cpus_held();
612 
613 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
614 		/* Find the domain that contains this CPU */
615 		if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
616 			return d;
617 	}
618 
619 	return NULL;
620 }
621 
622 /*
623  * Feedback loop for MBA software controller (mba_sc)
624  *
625  * mba_sc is a feedback loop where we periodically read MBM counters and
626  * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
627  * that:
628  *
629  *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
630  *
631  * This uses the MBM counters to measure the bandwidth and MBA throttle
632  * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
633  * fact that resctrl rdtgroups have both monitoring and control.
634  *
635  * The frequency of the checks is 1s and we just tag along the MBM overflow
636  * timer. Having 1s interval makes the calculation of bandwidth simpler.
637  *
638  * Although MBA's goal is to restrict the bandwidth to a maximum, there may
639  * be a need to increase the bandwidth to avoid unnecessarily restricting
640  * the L2 <-> L3 traffic.
641  *
642  * Since MBA controls the L2 external bandwidth where as MBM measures the
643  * L3 external bandwidth the following sequence could lead to such a
644  * situation.
645  *
646  * Consider an rdtgroup which had high L3 <-> memory traffic in initial
647  * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
648  * after some time rdtgroup has mostly L2 <-> L3 traffic.
649  *
650  * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
651  * throttle MSRs already have low percentage values.  To avoid
652  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
653  */
654 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_l3_mon_domain *dom_mbm)
655 {
656 	u32 closid, rmid, cur_msr_val, new_msr_val;
657 	struct mbm_state *pmbm_data, *cmbm_data;
658 	struct rdt_ctrl_domain *dom_mba;
659 	enum resctrl_event_id evt_id;
660 	struct rdt_resource *r_mba;
661 	struct list_head *head;
662 	struct rdtgroup *entry;
663 	u32 cur_bw, user_bw;
664 
665 	r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
666 	evt_id = rgrp->mba_mbps_event;
667 
668 	closid = rgrp->closid;
669 	rmid = rgrp->mon.rmid;
670 	pmbm_data = get_mbm_state(dom_mbm, closid, rmid, evt_id);
671 	if (WARN_ON_ONCE(!pmbm_data))
672 		return;
673 
674 	dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
675 	if (!dom_mba) {
676 		pr_warn_once("Failure to get domain for MBA update\n");
677 		return;
678 	}
679 
680 	cur_bw = pmbm_data->prev_bw;
681 	user_bw = dom_mba->mbps_val[closid];
682 
683 	/* MBA resource doesn't support CDP */
684 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
685 
686 	/*
687 	 * For Ctrl groups read data from child monitor groups.
688 	 */
689 	head = &rgrp->mon.crdtgrp_list;
690 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
691 		cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id);
692 		if (WARN_ON_ONCE(!cmbm_data))
693 			return;
694 		cur_bw += cmbm_data->prev_bw;
695 	}
696 
697 	/*
698 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
699 	 * bandwidth step is the bandwidth granularity specified by the
700 	 * hardware.
701 	 * Always increase throttling if current bandwidth is above the
702 	 * target set by user.
703 	 * But avoid thrashing up and down on every poll by checking
704 	 * whether a decrease in throttling is likely to push the group
705 	 * back over target. E.g. if currently throttling to 30% of bandwidth
706 	 * on a system with 10% granularity steps, check whether moving to
707 	 * 40% would go past the limit by multiplying current bandwidth by
708 	 * "(30 + 10) / 30".
709 	 */
710 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
711 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
712 	} else if (cur_msr_val < MAX_MBA_BW &&
713 		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
714 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
715 	} else {
716 		return;
717 	}
718 
719 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
720 }
721 
722 static void mbm_update_one_event(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
723 				 struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
724 {
725 	struct rmid_read rr = {0};
726 
727 	rr.r = r;
728 	rr.hdr = &d->hdr;
729 	rr.evt = &mon_event_all[evtid];
730 	if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
731 		rr.is_mbm_cntr = true;
732 	} else {
733 		rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, evtid);
734 		if (IS_ERR(rr.arch_mon_ctx)) {
735 			pr_warn_ratelimited("Failed to allocate monitor context: %ld",
736 					    PTR_ERR(rr.arch_mon_ctx));
737 			return;
738 		}
739 	}
740 
741 	__mon_event_count(rdtgrp, &rr);
742 
743 	/*
744 	 * If the software controller is enabled, compute the
745 	 * bandwidth for this event id.
746 	 */
747 	if (is_mba_sc(NULL))
748 		mbm_bw_count(rdtgrp, &rr);
749 
750 	if (rr.arch_mon_ctx)
751 		resctrl_arch_mon_ctx_free(rr.r, evtid, rr.arch_mon_ctx);
752 }
753 
754 static void mbm_update(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
755 		       struct rdtgroup *rdtgrp)
756 {
757 	/*
758 	 * This is protected from concurrent reads from user as both
759 	 * the user and overflow handler hold the global mutex.
760 	 */
761 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
762 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
763 
764 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
765 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
766 }
767 
768 /*
769  * Handler to scan the limbo list and move the RMIDs
770  * to free list whose occupancy < threshold_occupancy.
771  */
772 void cqm_handle_limbo(struct work_struct *work)
773 {
774 	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
775 	struct rdt_l3_mon_domain *d;
776 
777 	cpus_read_lock();
778 	mutex_lock(&rdtgroup_mutex);
779 
780 	d = container_of(work, struct rdt_l3_mon_domain, cqm_limbo.work);
781 
782 	__check_limbo(d, false);
783 
784 	if (has_busy_rmid(d)) {
785 		d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
786 							   RESCTRL_PICK_ANY_CPU);
787 		schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
788 					 delay);
789 	}
790 
791 	mutex_unlock(&rdtgroup_mutex);
792 	cpus_read_unlock();
793 }
794 
795 /**
796  * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
797  *                             domain.
798  * @dom:           The domain the limbo handler should run for.
799  * @delay_ms:      How far in the future the handler should run.
800  * @exclude_cpu:   Which CPU the handler should not run on,
801  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
802  */
803 void cqm_setup_limbo_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
804 			     int exclude_cpu)
805 {
806 	unsigned long delay = msecs_to_jiffies(delay_ms);
807 	int cpu;
808 
809 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
810 	dom->cqm_work_cpu = cpu;
811 
812 	if (cpu < nr_cpu_ids)
813 		schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
814 }
815 
816 void mbm_handle_overflow(struct work_struct *work)
817 {
818 	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
819 	struct rdtgroup *prgrp, *crgrp;
820 	struct rdt_l3_mon_domain *d;
821 	struct list_head *head;
822 	struct rdt_resource *r;
823 
824 	cpus_read_lock();
825 	mutex_lock(&rdtgroup_mutex);
826 
827 	/*
828 	 * If the filesystem has been unmounted this work no longer needs to
829 	 * run.
830 	 */
831 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
832 		goto out_unlock;
833 
834 	r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
835 	d = container_of(work, struct rdt_l3_mon_domain, mbm_over.work);
836 
837 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
838 		mbm_update(r, d, prgrp);
839 
840 		head = &prgrp->mon.crdtgrp_list;
841 		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
842 			mbm_update(r, d, crgrp);
843 
844 		if (is_mba_sc(NULL))
845 			update_mba_bw(prgrp, d);
846 	}
847 
848 	/*
849 	 * Re-check for housekeeping CPUs. This allows the overflow handler to
850 	 * move off a nohz_full CPU quickly.
851 	 */
852 	d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
853 						   RESCTRL_PICK_ANY_CPU);
854 	schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
855 
856 out_unlock:
857 	mutex_unlock(&rdtgroup_mutex);
858 	cpus_read_unlock();
859 }
860 
861 /**
862  * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
863  *                                domain.
864  * @dom:           The domain the overflow handler should run for.
865  * @delay_ms:      How far in the future the handler should run.
866  * @exclude_cpu:   Which CPU the handler should not run on,
867  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
868  */
869 void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
870 				int exclude_cpu)
871 {
872 	unsigned long delay = msecs_to_jiffies(delay_ms);
873 	int cpu;
874 
875 	/*
876 	 * When a domain comes online there is no guarantee the filesystem is
877 	 * mounted. If not, there is no need to catch counter overflow.
878 	 */
879 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
880 		return;
881 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
882 	dom->mbm_work_cpu = cpu;
883 
884 	if (cpu < nr_cpu_ids)
885 		schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
886 }
887 
888 static int dom_data_init(struct rdt_resource *r)
889 {
890 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
891 	u32 num_closid = resctrl_arch_get_num_closid(r);
892 	struct rmid_entry *entry = NULL;
893 	int err = 0, i;
894 	u32 idx;
895 
896 	mutex_lock(&rdtgroup_mutex);
897 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
898 		u32 *tmp;
899 
900 		/*
901 		 * If the architecture hasn't provided a sanitised value here,
902 		 * this may result in larger arrays than necessary. Resctrl will
903 		 * use a smaller system wide value based on the resources in
904 		 * use.
905 		 */
906 		tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
907 		if (!tmp) {
908 			err = -ENOMEM;
909 			goto out_unlock;
910 		}
911 
912 		closid_num_dirty_rmid = tmp;
913 	}
914 
915 	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
916 	if (!rmid_ptrs) {
917 		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
918 			kfree(closid_num_dirty_rmid);
919 			closid_num_dirty_rmid = NULL;
920 		}
921 		err = -ENOMEM;
922 		goto out_unlock;
923 	}
924 
925 	for (i = 0; i < idx_limit; i++) {
926 		entry = &rmid_ptrs[i];
927 		INIT_LIST_HEAD(&entry->list);
928 
929 		resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
930 		list_add_tail(&entry->list, &rmid_free_lru);
931 	}
932 
933 	/*
934 	 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
935 	 * are always allocated. These are used for the rdtgroup_default
936 	 * control group, which will be setup later in resctrl_init().
937 	 */
938 	idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
939 					   RESCTRL_RESERVED_RMID);
940 	entry = __rmid_entry(idx);
941 	list_del(&entry->list);
942 
943 out_unlock:
944 	mutex_unlock(&rdtgroup_mutex);
945 
946 	return err;
947 }
948 
949 static void dom_data_exit(struct rdt_resource *r)
950 {
951 	mutex_lock(&rdtgroup_mutex);
952 
953 	if (!r->mon_capable)
954 		goto out_unlock;
955 
956 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
957 		kfree(closid_num_dirty_rmid);
958 		closid_num_dirty_rmid = NULL;
959 	}
960 
961 	kfree(rmid_ptrs);
962 	rmid_ptrs = NULL;
963 
964 out_unlock:
965 	mutex_unlock(&rdtgroup_mutex);
966 }
967 
968 /*
969  * All available events. Architecture code marks the ones that
970  * are supported by a system using resctrl_enable_mon_event()
971  * to set .enabled.
972  */
973 struct mon_evt mon_event_all[QOS_NUM_EVENTS] = {
974 	[QOS_L3_OCCUP_EVENT_ID] = {
975 		.name	= "llc_occupancy",
976 		.evtid	= QOS_L3_OCCUP_EVENT_ID,
977 		.rid	= RDT_RESOURCE_L3,
978 	},
979 	[QOS_L3_MBM_TOTAL_EVENT_ID] = {
980 		.name	= "mbm_total_bytes",
981 		.evtid	= QOS_L3_MBM_TOTAL_EVENT_ID,
982 		.rid	= RDT_RESOURCE_L3,
983 	},
984 	[QOS_L3_MBM_LOCAL_EVENT_ID] = {
985 		.name	= "mbm_local_bytes",
986 		.evtid	= QOS_L3_MBM_LOCAL_EVENT_ID,
987 		.rid	= RDT_RESOURCE_L3,
988 	},
989 };
990 
991 void resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu)
992 {
993 	if (WARN_ON_ONCE(eventid < QOS_FIRST_EVENT || eventid >= QOS_NUM_EVENTS))
994 		return;
995 	if (mon_event_all[eventid].enabled) {
996 		pr_warn("Duplicate enable for event %d\n", eventid);
997 		return;
998 	}
999 
1000 	mon_event_all[eventid].any_cpu = any_cpu;
1001 	mon_event_all[eventid].enabled = true;
1002 }
1003 
1004 bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
1005 {
1006 	return eventid >= QOS_FIRST_EVENT && eventid < QOS_NUM_EVENTS &&
1007 	       mon_event_all[eventid].enabled;
1008 }
1009 
1010 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id evtid)
1011 {
1012 	return mon_event_all[evtid].evt_cfg;
1013 }
1014 
1015 /**
1016  * struct mbm_transaction - Memory transaction an MBM event can be configured with.
1017  * @name:	Name of memory transaction (read, write ...).
1018  * @val:	The bit (eg. READS_TO_LOCAL_MEM or READS_TO_REMOTE_MEM) used to
1019  *		represent the memory transaction within an event's configuration.
1020  */
1021 struct mbm_transaction {
1022 	char	name[32];
1023 	u32	val;
1024 };
1025 
1026 /* Decoded values for each type of memory transaction. */
1027 static struct mbm_transaction mbm_transactions[NUM_MBM_TRANSACTIONS] = {
1028 	{"local_reads", READS_TO_LOCAL_MEM},
1029 	{"remote_reads", READS_TO_REMOTE_MEM},
1030 	{"local_non_temporal_writes", NON_TEMP_WRITE_TO_LOCAL_MEM},
1031 	{"remote_non_temporal_writes", NON_TEMP_WRITE_TO_REMOTE_MEM},
1032 	{"local_reads_slow_memory", READS_TO_LOCAL_S_MEM},
1033 	{"remote_reads_slow_memory", READS_TO_REMOTE_S_MEM},
1034 	{"dirty_victim_writes_all", DIRTY_VICTIMS_TO_ALL_MEM},
1035 };
1036 
1037 int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
1038 {
1039 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1040 	struct rdt_resource *r;
1041 	bool sep = false;
1042 	int ret = 0, i;
1043 
1044 	mutex_lock(&rdtgroup_mutex);
1045 	rdt_last_cmd_clear();
1046 
1047 	r = resctrl_arch_get_resource(mevt->rid);
1048 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1049 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1050 		ret = -EINVAL;
1051 		goto out_unlock;
1052 	}
1053 
1054 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1055 		if (mevt->evt_cfg & mbm_transactions[i].val) {
1056 			if (sep)
1057 				seq_putc(seq, ',');
1058 			seq_printf(seq, "%s", mbm_transactions[i].name);
1059 			sep = true;
1060 		}
1061 	}
1062 	seq_putc(seq, '\n');
1063 
1064 out_unlock:
1065 	mutex_unlock(&rdtgroup_mutex);
1066 
1067 	return ret;
1068 }
1069 
1070 int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of, struct seq_file *s,
1071 				     void *v)
1072 {
1073 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1074 	int ret = 0;
1075 
1076 	mutex_lock(&rdtgroup_mutex);
1077 	rdt_last_cmd_clear();
1078 
1079 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1080 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1081 		ret = -EINVAL;
1082 		goto out_unlock;
1083 	}
1084 
1085 	seq_printf(s, "%u\n", r->mon.mbm_assign_on_mkdir);
1086 
1087 out_unlock:
1088 	mutex_unlock(&rdtgroup_mutex);
1089 
1090 	return ret;
1091 }
1092 
1093 ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
1094 					  size_t nbytes, loff_t off)
1095 {
1096 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1097 	bool value;
1098 	int ret;
1099 
1100 	ret = kstrtobool(buf, &value);
1101 	if (ret)
1102 		return ret;
1103 
1104 	mutex_lock(&rdtgroup_mutex);
1105 	rdt_last_cmd_clear();
1106 
1107 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1108 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1109 		ret = -EINVAL;
1110 		goto out_unlock;
1111 	}
1112 
1113 	r->mon.mbm_assign_on_mkdir = value;
1114 
1115 out_unlock:
1116 	mutex_unlock(&rdtgroup_mutex);
1117 
1118 	return ret ?: nbytes;
1119 }
1120 
1121 /*
1122  * mbm_cntr_free_all() - Clear all the counter ID configuration details in the
1123  *			 domain @d. Called when mbm_assign_mode is changed.
1124  */
1125 static void mbm_cntr_free_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
1126 {
1127 	memset(d->cntr_cfg, 0, sizeof(*d->cntr_cfg) * r->mon.num_mbm_cntrs);
1128 }
1129 
1130 /*
1131  * resctrl_reset_rmid_all() - Reset all non-architecture states for all the
1132  *			      supported RMIDs.
1133  */
1134 static void resctrl_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
1135 {
1136 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
1137 	enum resctrl_event_id evt;
1138 	int idx;
1139 
1140 	for_each_mbm_event_id(evt) {
1141 		if (!resctrl_is_mon_event_enabled(evt))
1142 			continue;
1143 		idx = MBM_STATE_IDX(evt);
1144 		memset(d->mbm_states[idx], 0, sizeof(*d->mbm_states[0]) * idx_limit);
1145 	}
1146 }
1147 
1148 /*
1149  * rdtgroup_assign_cntr() - Assign/unassign the counter ID for the event, RMID
1150  * pair in the domain.
1151  *
1152  * Assign the counter if @assign is true else unassign the counter. Reset the
1153  * associated non-architectural state.
1154  */
1155 static void rdtgroup_assign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1156 				 enum resctrl_event_id evtid, u32 rmid, u32 closid,
1157 				 u32 cntr_id, bool assign)
1158 {
1159 	struct mbm_state *m;
1160 
1161 	resctrl_arch_config_cntr(r, d, evtid, rmid, closid, cntr_id, assign);
1162 
1163 	m = get_mbm_state(d, closid, rmid, evtid);
1164 	if (m)
1165 		memset(m, 0, sizeof(*m));
1166 }
1167 
1168 /*
1169  * rdtgroup_alloc_assign_cntr() - Allocate a counter ID and assign it to the event
1170  * pointed to by @mevt and the resctrl group @rdtgrp within the domain @d.
1171  *
1172  * Return:
1173  * 0 on success, < 0 on failure.
1174  */
1175 static int rdtgroup_alloc_assign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1176 				      struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1177 {
1178 	int cntr_id;
1179 
1180 	/* No action required if the counter is assigned already. */
1181 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1182 	if (cntr_id >= 0)
1183 		return 0;
1184 
1185 	cntr_id = mbm_cntr_alloc(r, d, rdtgrp, mevt->evtid);
1186 	if (cntr_id < 0) {
1187 		rdt_last_cmd_printf("Failed to allocate counter for %s in domain %d\n",
1188 				    mevt->name, d->hdr.id);
1189 		return cntr_id;
1190 	}
1191 
1192 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, true);
1193 
1194 	return 0;
1195 }
1196 
1197 /*
1198  * rdtgroup_assign_cntr_event() - Assign a hardware counter for the event in
1199  * @mevt to the resctrl group @rdtgrp. Assign counters to all domains if @d is
1200  * NULL; otherwise, assign the counter to the specified domain @d.
1201  *
1202  * If all counters in a domain are already in use, rdtgroup_alloc_assign_cntr()
1203  * will fail. The assignment process will abort at the first failure encountered
1204  * during domain traversal, which may result in the event being only partially
1205  * assigned.
1206  *
1207  * Return:
1208  * 0 on success, < 0 on failure.
1209  */
1210 static int rdtgroup_assign_cntr_event(struct rdt_l3_mon_domain *d, struct rdtgroup *rdtgrp,
1211 				      struct mon_evt *mevt)
1212 {
1213 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1214 	int ret = 0;
1215 
1216 	if (!d) {
1217 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1218 			ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1219 			if (ret)
1220 				return ret;
1221 		}
1222 	} else {
1223 		ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1224 	}
1225 
1226 	return ret;
1227 }
1228 
1229 /*
1230  * rdtgroup_assign_cntrs() - Assign counters to MBM events. Called when
1231  *			     a new group is created.
1232  *
1233  * Each group can accommodate two counters per domain: one for the total
1234  * event and one for the local event. Assignments may fail due to the limited
1235  * number of counters. However, it is not necessary to fail the group creation
1236  * and thus no failure is returned. Users have the option to modify the
1237  * counter assignments after the group has been created.
1238  */
1239 void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
1240 {
1241 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1242 
1243 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r) ||
1244 	    !r->mon.mbm_assign_on_mkdir)
1245 		return;
1246 
1247 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1248 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1249 					   &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1250 
1251 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1252 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1253 					   &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1254 }
1255 
1256 /*
1257  * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
1258  * for the event pointed to by @mevt within the domain @d and resctrl group @rdtgrp.
1259  */
1260 static void rdtgroup_free_unassign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1261 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1262 {
1263 	int cntr_id;
1264 
1265 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1266 
1267 	/* If there is no cntr_id assigned, nothing to do */
1268 	if (cntr_id < 0)
1269 		return;
1270 
1271 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false);
1272 
1273 	mbm_cntr_free(d, cntr_id);
1274 }
1275 
1276 /*
1277  * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
1278  * the event structure @mevt from the domain @d and the group @rdtgrp. Unassign
1279  * the counters from all the domains if @d is NULL else unassign from @d.
1280  */
1281 static void rdtgroup_unassign_cntr_event(struct rdt_l3_mon_domain *d, struct rdtgroup *rdtgrp,
1282 					 struct mon_evt *mevt)
1283 {
1284 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1285 
1286 	if (!d) {
1287 		list_for_each_entry(d, &r->mon_domains, hdr.list)
1288 			rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1289 	} else {
1290 		rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1291 	}
1292 }
1293 
1294 /*
1295  * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
1296  *			       Called when a group is deleted.
1297  */
1298 void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
1299 {
1300 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1301 
1302 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
1303 		return;
1304 
1305 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1306 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1307 					     &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1308 
1309 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1310 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1311 					     &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1312 }
1313 
1314 static int resctrl_parse_mem_transactions(char *tok, u32 *val)
1315 {
1316 	u32 temp_val = 0;
1317 	char *evt_str;
1318 	bool found;
1319 	int i;
1320 
1321 next_config:
1322 	if (!tok || tok[0] == '\0') {
1323 		*val = temp_val;
1324 		return 0;
1325 	}
1326 
1327 	/* Start processing the strings for each memory transaction type */
1328 	evt_str = strim(strsep(&tok, ","));
1329 	found = false;
1330 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1331 		if (!strcmp(mbm_transactions[i].name, evt_str)) {
1332 			temp_val |= mbm_transactions[i].val;
1333 			found = true;
1334 			break;
1335 		}
1336 	}
1337 
1338 	if (!found) {
1339 		rdt_last_cmd_printf("Invalid memory transaction type %s\n", evt_str);
1340 		return -EINVAL;
1341 	}
1342 
1343 	goto next_config;
1344 }
1345 
1346 /*
1347  * rdtgroup_update_cntr_event - Update the counter assignments for the event
1348  *				in a group.
1349  * @r:		Resource to which update needs to be done.
1350  * @rdtgrp:	Resctrl group.
1351  * @evtid:	MBM monitor event.
1352  */
1353 static void rdtgroup_update_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1354 				       enum resctrl_event_id evtid)
1355 {
1356 	struct rdt_l3_mon_domain *d;
1357 	int cntr_id;
1358 
1359 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1360 		cntr_id = mbm_cntr_get(r, d, rdtgrp, evtid);
1361 		if (cntr_id >= 0)
1362 			rdtgroup_assign_cntr(r, d, evtid, rdtgrp->mon.rmid,
1363 					     rdtgrp->closid, cntr_id, true);
1364 	}
1365 }
1366 
1367 /*
1368  * resctrl_update_cntr_allrdtgrp - Update the counter assignments for the event
1369  *				   for all the groups.
1370  * @mevt	MBM Monitor event.
1371  */
1372 static void resctrl_update_cntr_allrdtgrp(struct mon_evt *mevt)
1373 {
1374 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1375 	struct rdtgroup *prgrp, *crgrp;
1376 
1377 	/*
1378 	 * Find all the groups where the event is assigned and update the
1379 	 * configuration of existing assignments.
1380 	 */
1381 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1382 		rdtgroup_update_cntr_event(r, prgrp, mevt->evtid);
1383 
1384 		list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
1385 			rdtgroup_update_cntr_event(r, crgrp, mevt->evtid);
1386 	}
1387 }
1388 
1389 ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
1390 			   loff_t off)
1391 {
1392 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1393 	struct rdt_resource *r;
1394 	u32 evt_cfg = 0;
1395 	int ret = 0;
1396 
1397 	/* Valid input requires a trailing newline */
1398 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1399 		return -EINVAL;
1400 
1401 	buf[nbytes - 1] = '\0';
1402 
1403 	cpus_read_lock();
1404 	mutex_lock(&rdtgroup_mutex);
1405 
1406 	rdt_last_cmd_clear();
1407 
1408 	r = resctrl_arch_get_resource(mevt->rid);
1409 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1410 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1411 		ret = -EINVAL;
1412 		goto out_unlock;
1413 	}
1414 
1415 	ret = resctrl_parse_mem_transactions(buf, &evt_cfg);
1416 	if (!ret && mevt->evt_cfg != evt_cfg) {
1417 		mevt->evt_cfg = evt_cfg;
1418 		resctrl_update_cntr_allrdtgrp(mevt);
1419 	}
1420 
1421 out_unlock:
1422 	mutex_unlock(&rdtgroup_mutex);
1423 	cpus_read_unlock();
1424 
1425 	return ret ?: nbytes;
1426 }
1427 
1428 int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of,
1429 				 struct seq_file *s, void *v)
1430 {
1431 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1432 	bool enabled;
1433 
1434 	mutex_lock(&rdtgroup_mutex);
1435 	enabled = resctrl_arch_mbm_cntr_assign_enabled(r);
1436 
1437 	if (r->mon.mbm_cntr_assignable) {
1438 		if (enabled)
1439 			seq_puts(s, "[mbm_event]\n");
1440 		else
1441 			seq_puts(s, "[default]\n");
1442 
1443 		if (!IS_ENABLED(CONFIG_RESCTRL_ASSIGN_FIXED)) {
1444 			if (enabled)
1445 				seq_puts(s, "default\n");
1446 			else
1447 				seq_puts(s, "mbm_event\n");
1448 		}
1449 	} else {
1450 		seq_puts(s, "[default]\n");
1451 	}
1452 
1453 	mutex_unlock(&rdtgroup_mutex);
1454 
1455 	return 0;
1456 }
1457 
1458 ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
1459 				      size_t nbytes, loff_t off)
1460 {
1461 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1462 	struct rdt_l3_mon_domain *d;
1463 	int ret = 0;
1464 	bool enable;
1465 
1466 	/* Valid input requires a trailing newline */
1467 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1468 		return -EINVAL;
1469 
1470 	buf[nbytes - 1] = '\0';
1471 
1472 	cpus_read_lock();
1473 	mutex_lock(&rdtgroup_mutex);
1474 
1475 	rdt_last_cmd_clear();
1476 
1477 	if (!strcmp(buf, "default")) {
1478 		enable = 0;
1479 	} else if (!strcmp(buf, "mbm_event")) {
1480 		if (r->mon.mbm_cntr_assignable) {
1481 			enable = 1;
1482 		} else {
1483 			ret = -EINVAL;
1484 			rdt_last_cmd_puts("mbm_event mode is not supported\n");
1485 			goto out_unlock;
1486 		}
1487 	} else {
1488 		ret = -EINVAL;
1489 		rdt_last_cmd_puts("Unsupported assign mode\n");
1490 		goto out_unlock;
1491 	}
1492 
1493 	if (enable != resctrl_arch_mbm_cntr_assign_enabled(r)) {
1494 		ret = resctrl_arch_mbm_cntr_assign_set(r, enable);
1495 		if (ret)
1496 			goto out_unlock;
1497 
1498 		/* Update the visibility of BMEC related files */
1499 		resctrl_bmec_files_show(r, NULL, !enable);
1500 
1501 		/*
1502 		 * Initialize the default memory transaction values for
1503 		 * total and local events.
1504 		 */
1505 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1506 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1507 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1508 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1509 									   (READS_TO_LOCAL_MEM |
1510 									    READS_TO_LOCAL_S_MEM |
1511 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1512 		/* Enable auto assignment when switching to "mbm_event" mode */
1513 		if (enable)
1514 			r->mon.mbm_assign_on_mkdir = true;
1515 		/*
1516 		 * Reset all the non-achitectural RMID state and assignable counters.
1517 		 */
1518 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1519 			mbm_cntr_free_all(r, d);
1520 			resctrl_reset_rmid_all(r, d);
1521 		}
1522 	}
1523 
1524 out_unlock:
1525 	mutex_unlock(&rdtgroup_mutex);
1526 	cpus_read_unlock();
1527 
1528 	return ret ?: nbytes;
1529 }
1530 
1531 int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of,
1532 			       struct seq_file *s, void *v)
1533 {
1534 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1535 	struct rdt_l3_mon_domain *dom;
1536 	bool sep = false;
1537 
1538 	cpus_read_lock();
1539 	mutex_lock(&rdtgroup_mutex);
1540 
1541 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1542 		if (sep)
1543 			seq_putc(s, ';');
1544 
1545 		seq_printf(s, "%d=%d", dom->hdr.id, r->mon.num_mbm_cntrs);
1546 		sep = true;
1547 	}
1548 	seq_putc(s, '\n');
1549 
1550 	mutex_unlock(&rdtgroup_mutex);
1551 	cpus_read_unlock();
1552 	return 0;
1553 }
1554 
1555 int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of,
1556 				     struct seq_file *s, void *v)
1557 {
1558 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1559 	struct rdt_l3_mon_domain *dom;
1560 	bool sep = false;
1561 	u32 cntrs, i;
1562 	int ret = 0;
1563 
1564 	cpus_read_lock();
1565 	mutex_lock(&rdtgroup_mutex);
1566 
1567 	rdt_last_cmd_clear();
1568 
1569 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1570 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1571 		ret = -EINVAL;
1572 		goto out_unlock;
1573 	}
1574 
1575 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1576 		if (sep)
1577 			seq_putc(s, ';');
1578 
1579 		cntrs = 0;
1580 		for (i = 0; i < r->mon.num_mbm_cntrs; i++) {
1581 			if (!dom->cntr_cfg[i].rdtgrp)
1582 				cntrs++;
1583 		}
1584 
1585 		seq_printf(s, "%d=%u", dom->hdr.id, cntrs);
1586 		sep = true;
1587 	}
1588 	seq_putc(s, '\n');
1589 
1590 out_unlock:
1591 	mutex_unlock(&rdtgroup_mutex);
1592 	cpus_read_unlock();
1593 
1594 	return ret;
1595 }
1596 
1597 int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v)
1598 {
1599 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1600 	struct rdt_l3_mon_domain *d;
1601 	struct rdtgroup *rdtgrp;
1602 	struct mon_evt *mevt;
1603 	int ret = 0;
1604 	bool sep;
1605 
1606 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1607 	if (!rdtgrp) {
1608 		ret = -ENOENT;
1609 		goto out_unlock;
1610 	}
1611 
1612 	rdt_last_cmd_clear();
1613 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1614 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1615 		ret = -EINVAL;
1616 		goto out_unlock;
1617 	}
1618 
1619 	for_each_mon_event(mevt) {
1620 		if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
1621 			continue;
1622 
1623 		sep = false;
1624 		seq_printf(s, "%s:", mevt->name);
1625 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1626 			if (sep)
1627 				seq_putc(s, ';');
1628 
1629 			if (mbm_cntr_get(r, d, rdtgrp, mevt->evtid) < 0)
1630 				seq_printf(s, "%d=_", d->hdr.id);
1631 			else
1632 				seq_printf(s, "%d=e", d->hdr.id);
1633 
1634 			sep = true;
1635 		}
1636 		seq_putc(s, '\n');
1637 	}
1638 
1639 out_unlock:
1640 	rdtgroup_kn_unlock(of->kn);
1641 
1642 	return ret;
1643 }
1644 
1645 /*
1646  * mbm_get_mon_event_by_name() - Return the mon_evt entry for the matching
1647  * event name.
1648  */
1649 static struct mon_evt *mbm_get_mon_event_by_name(struct rdt_resource *r, char *name)
1650 {
1651 	struct mon_evt *mevt;
1652 
1653 	for_each_mon_event(mevt) {
1654 		if (mevt->rid == r->rid && mevt->enabled &&
1655 		    resctrl_is_mbm_event(mevt->evtid) &&
1656 		    !strcmp(mevt->name, name))
1657 			return mevt;
1658 	}
1659 
1660 	return NULL;
1661 }
1662 
1663 static int rdtgroup_modify_assign_state(char *assign, struct rdt_l3_mon_domain *d,
1664 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1665 {
1666 	int ret = 0;
1667 
1668 	if (!assign || strlen(assign) != 1)
1669 		return -EINVAL;
1670 
1671 	switch (*assign) {
1672 	case 'e':
1673 		ret = rdtgroup_assign_cntr_event(d, rdtgrp, mevt);
1674 		break;
1675 	case '_':
1676 		rdtgroup_unassign_cntr_event(d, rdtgrp, mevt);
1677 		break;
1678 	default:
1679 		ret = -EINVAL;
1680 		break;
1681 	}
1682 
1683 	return ret;
1684 }
1685 
1686 static int resctrl_parse_mbm_assignment(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1687 					char *event, char *tok)
1688 {
1689 	struct rdt_l3_mon_domain *d;
1690 	unsigned long dom_id = 0;
1691 	char *dom_str, *id_str;
1692 	struct mon_evt *mevt;
1693 	int ret;
1694 
1695 	mevt = mbm_get_mon_event_by_name(r, event);
1696 	if (!mevt) {
1697 		rdt_last_cmd_printf("Invalid event %s\n", event);
1698 		return -ENOENT;
1699 	}
1700 
1701 next:
1702 	if (!tok || tok[0] == '\0')
1703 		return 0;
1704 
1705 	/* Start processing the strings for each domain */
1706 	dom_str = strim(strsep(&tok, ";"));
1707 
1708 	id_str = strsep(&dom_str, "=");
1709 
1710 	/* Check for domain id '*' which means all domains */
1711 	if (id_str && *id_str == '*') {
1712 		ret = rdtgroup_modify_assign_state(dom_str, NULL, rdtgrp, mevt);
1713 		if (ret)
1714 			rdt_last_cmd_printf("Assign operation '%s:*=%s' failed\n",
1715 					    event, dom_str);
1716 		return ret;
1717 	} else if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1718 		rdt_last_cmd_puts("Missing domain id\n");
1719 		return -EINVAL;
1720 	}
1721 
1722 	/* Verify if the dom_id is valid */
1723 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1724 		if (d->hdr.id == dom_id) {
1725 			ret = rdtgroup_modify_assign_state(dom_str, d, rdtgrp, mevt);
1726 			if (ret) {
1727 				rdt_last_cmd_printf("Assign operation '%s:%ld=%s' failed\n",
1728 						    event, dom_id, dom_str);
1729 				return ret;
1730 			}
1731 			goto next;
1732 		}
1733 	}
1734 
1735 	rdt_last_cmd_printf("Invalid domain id %ld\n", dom_id);
1736 	return -EINVAL;
1737 }
1738 
1739 ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
1740 				 size_t nbytes, loff_t off)
1741 {
1742 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1743 	struct rdtgroup *rdtgrp;
1744 	char *token, *event;
1745 	int ret = 0;
1746 
1747 	/* Valid input requires a trailing newline */
1748 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1749 		return -EINVAL;
1750 
1751 	buf[nbytes - 1] = '\0';
1752 
1753 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1754 	if (!rdtgrp) {
1755 		rdtgroup_kn_unlock(of->kn);
1756 		return -ENOENT;
1757 	}
1758 	rdt_last_cmd_clear();
1759 
1760 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1761 		rdt_last_cmd_puts("mbm_event mode is not enabled\n");
1762 		rdtgroup_kn_unlock(of->kn);
1763 		return -EINVAL;
1764 	}
1765 
1766 	while ((token = strsep(&buf, "\n")) != NULL) {
1767 		/*
1768 		 * The write command follows the following format:
1769 		 * "<Event>:<Domain ID>=<Assignment state>"
1770 		 * Extract the event name first.
1771 		 */
1772 		event = strsep(&token, ":");
1773 
1774 		ret = resctrl_parse_mbm_assignment(r, rdtgrp, event, token);
1775 		if (ret)
1776 			break;
1777 	}
1778 
1779 	rdtgroup_kn_unlock(of->kn);
1780 
1781 	return ret ?: nbytes;
1782 }
1783 
1784 /**
1785  * resctrl_l3_mon_resource_init() - Initialise global monitoring structures.
1786  *
1787  * Allocate and initialise global monitor resources that do not belong to a
1788  * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
1789  * Called once during boot after the struct rdt_resource's have been configured
1790  * but before the filesystem is mounted.
1791  * Resctrl's cpuhp callbacks may be called before this point to bring a domain
1792  * online.
1793  *
1794  * Return: 0 for success, or -ENOMEM.
1795  */
1796 int resctrl_l3_mon_resource_init(void)
1797 {
1798 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1799 	int ret;
1800 
1801 	if (!r->mon_capable)
1802 		return 0;
1803 
1804 	ret = dom_data_init(r);
1805 	if (ret)
1806 		return ret;
1807 
1808 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
1809 		mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].configurable = true;
1810 		resctrl_file_fflags_init("mbm_total_bytes_config",
1811 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1812 	}
1813 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
1814 		mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].configurable = true;
1815 		resctrl_file_fflags_init("mbm_local_bytes_config",
1816 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1817 	}
1818 
1819 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1820 		mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
1821 	else if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1822 		mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
1823 
1824 	if (r->mon.mbm_cntr_assignable) {
1825 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1826 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1827 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1828 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1829 									   (READS_TO_LOCAL_MEM |
1830 									    READS_TO_LOCAL_S_MEM |
1831 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1832 		r->mon.mbm_assign_on_mkdir = true;
1833 		resctrl_file_fflags_init("num_mbm_cntrs",
1834 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1835 		resctrl_file_fflags_init("available_mbm_cntrs",
1836 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1837 		resctrl_file_fflags_init("event_filter", RFTYPE_ASSIGN_CONFIG);
1838 		resctrl_file_fflags_init("mbm_assign_on_mkdir", RFTYPE_MON_INFO |
1839 					 RFTYPE_RES_CACHE);
1840 		resctrl_file_fflags_init("mbm_L3_assignments", RFTYPE_MON_BASE);
1841 	}
1842 
1843 	return 0;
1844 }
1845 
1846 void resctrl_l3_mon_resource_exit(void)
1847 {
1848 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1849 
1850 	dom_data_exit(r);
1851 }
1852