xref: /linux/fs/resctrl/monitor.c (revision 7e6df9614546ae7eb1f1b2074d7b6039bb01540d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Monitoring code
5  *
6  * Copyright (C) 2017 Intel Corporation
7  *
8  * Author:
9  *    Vikas Shivappa <vikas.shivappa@intel.com>
10  *
11  * This replaces the cqm.c based on perf but we reuse a lot of
12  * code and datastructures originally from Peter Zijlstra and Matt Fleming.
13  *
14  * More information about RDT be found in the Intel (R) x86 Architecture
15  * Software Developer Manual June 2016, volume 3, section 17.17.
16  */
17 
18 #define pr_fmt(fmt)	"resctrl: " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/resctrl.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 
25 #include "internal.h"
26 
27 #define CREATE_TRACE_POINTS
28 
29 #include "monitor_trace.h"
30 
31 /**
32  * struct rmid_entry - dirty tracking for all RMID.
33  * @closid:	The CLOSID for this entry.
34  * @rmid:	The RMID for this entry.
35  * @busy:	The number of domains with cached data using this RMID.
36  * @list:	Member of the rmid_free_lru list when busy == 0.
37  *
38  * Depending on the architecture the correct monitor is accessed using
39  * both @closid and @rmid, or @rmid only.
40  *
41  * Take the rdtgroup_mutex when accessing.
42  */
43 struct rmid_entry {
44 	u32				closid;
45 	u32				rmid;
46 	int				busy;
47 	struct list_head		list;
48 };
49 
50 /*
51  * @rmid_free_lru - A least recently used list of free RMIDs
52  *     These RMIDs are guaranteed to have an occupancy less than the
53  *     threshold occupancy
54  */
55 static LIST_HEAD(rmid_free_lru);
56 
57 /*
58  * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
59  *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
60  *     Indexed by CLOSID. Protected by rdtgroup_mutex.
61  */
62 static u32 *closid_num_dirty_rmid;
63 
64 /*
65  * @rmid_limbo_count - count of currently unused but (potentially)
66  *     dirty RMIDs.
67  *     This counts RMIDs that no one is currently using but that
68  *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
69  *     change the threshold occupancy value.
70  */
71 static unsigned int rmid_limbo_count;
72 
73 /*
74  * @rmid_entry - The entry in the limbo and free lists.
75  */
76 static struct rmid_entry	*rmid_ptrs;
77 
78 /*
79  * This is the threshold cache occupancy in bytes at which we will consider an
80  * RMID available for re-allocation.
81  */
82 unsigned int resctrl_rmid_realloc_threshold;
83 
84 /*
85  * This is the maximum value for the reallocation threshold, in bytes.
86  */
87 unsigned int resctrl_rmid_realloc_limit;
88 
89 /*
90  * x86 and arm64 differ in their handling of monitoring.
91  * x86's RMID are independent numbers, there is only one source of traffic
92  * with an RMID value of '1'.
93  * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
94  * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
95  * value is no longer unique.
96  * To account for this, resctrl uses an index. On x86 this is just the RMID,
97  * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
98  *
99  * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
100  * must accept an attempt to read every index.
101  */
102 static inline struct rmid_entry *__rmid_entry(u32 idx)
103 {
104 	struct rmid_entry *entry;
105 	u32 closid, rmid;
106 
107 	entry = &rmid_ptrs[idx];
108 	resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
109 
110 	WARN_ON_ONCE(entry->closid != closid);
111 	WARN_ON_ONCE(entry->rmid != rmid);
112 
113 	return entry;
114 }
115 
116 static void limbo_release_entry(struct rmid_entry *entry)
117 {
118 	lockdep_assert_held(&rdtgroup_mutex);
119 
120 	rmid_limbo_count--;
121 	list_add_tail(&entry->list, &rmid_free_lru);
122 
123 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
124 		closid_num_dirty_rmid[entry->closid]--;
125 }
126 
127 /*
128  * Check the RMIDs that are marked as busy for this domain. If the
129  * reported LLC occupancy is below the threshold clear the busy bit and
130  * decrement the count. If the busy count gets to zero on an RMID, we
131  * free the RMID
132  */
133 void __check_limbo(struct rdt_l3_mon_domain *d, bool force_free)
134 {
135 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
136 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
137 	struct rmid_entry *entry;
138 	u32 idx, cur_idx = 1;
139 	void *arch_mon_ctx;
140 	void *arch_priv;
141 	bool rmid_dirty;
142 	u64 val = 0;
143 
144 	arch_priv = mon_event_all[QOS_L3_OCCUP_EVENT_ID].arch_priv;
145 	arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
146 	if (IS_ERR(arch_mon_ctx)) {
147 		pr_warn_ratelimited("Failed to allocate monitor context: %ld",
148 				    PTR_ERR(arch_mon_ctx));
149 		return;
150 	}
151 
152 	/*
153 	 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
154 	 * are marked as busy for occupancy < threshold. If the occupancy
155 	 * is less than the threshold decrement the busy counter of the
156 	 * RMID and move it to the free list when the counter reaches 0.
157 	 */
158 	for (;;) {
159 		idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
160 		if (idx >= idx_limit)
161 			break;
162 
163 		entry = __rmid_entry(idx);
164 		if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
165 					   QOS_L3_OCCUP_EVENT_ID, arch_priv, &val,
166 					   arch_mon_ctx)) {
167 			rmid_dirty = true;
168 		} else {
169 			rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
170 
171 			/*
172 			 * x86's CLOSID and RMID are independent numbers, so the entry's
173 			 * CLOSID is an empty CLOSID (X86_RESCTRL_EMPTY_CLOSID). On Arm the
174 			 * RMID (PMG) extends the CLOSID (PARTID) space with bits that aren't
175 			 * used to select the configuration. It is thus necessary to track both
176 			 * CLOSID and RMID because there may be dependencies between them
177 			 * on some architectures.
178 			 */
179 			trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val);
180 		}
181 
182 		if (force_free || !rmid_dirty) {
183 			clear_bit(idx, d->rmid_busy_llc);
184 			if (!--entry->busy)
185 				limbo_release_entry(entry);
186 		}
187 		cur_idx = idx + 1;
188 	}
189 
190 	resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
191 }
192 
193 bool has_busy_rmid(struct rdt_l3_mon_domain *d)
194 {
195 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
196 
197 	return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
198 }
199 
200 static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
201 {
202 	struct rmid_entry *itr;
203 	u32 itr_idx, cmp_idx;
204 
205 	if (list_empty(&rmid_free_lru))
206 		return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
207 
208 	list_for_each_entry(itr, &rmid_free_lru, list) {
209 		/*
210 		 * Get the index of this free RMID, and the index it would need
211 		 * to be if it were used with this CLOSID.
212 		 * If the CLOSID is irrelevant on this architecture, the two
213 		 * index values are always the same on every entry and thus the
214 		 * very first entry will be returned.
215 		 */
216 		itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
217 		cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
218 
219 		if (itr_idx == cmp_idx)
220 			return itr;
221 	}
222 
223 	return ERR_PTR(-ENOSPC);
224 }
225 
226 /**
227  * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
228  *                                  RMID are clean, or the CLOSID that has
229  *                                  the most clean RMID.
230  *
231  * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
232  * may not be able to allocate clean RMID. To avoid this the allocator will
233  * choose the CLOSID with the most clean RMID.
234  *
235  * When the CLOSID and RMID are independent numbers, the first free CLOSID will
236  * be returned.
237  */
238 int resctrl_find_cleanest_closid(void)
239 {
240 	u32 cleanest_closid = ~0;
241 	int i = 0;
242 
243 	lockdep_assert_held(&rdtgroup_mutex);
244 
245 	if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
246 		return -EIO;
247 
248 	for (i = 0; i < closids_supported(); i++) {
249 		int num_dirty;
250 
251 		if (closid_allocated(i))
252 			continue;
253 
254 		num_dirty = closid_num_dirty_rmid[i];
255 		if (num_dirty == 0)
256 			return i;
257 
258 		if (cleanest_closid == ~0)
259 			cleanest_closid = i;
260 
261 		if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
262 			cleanest_closid = i;
263 	}
264 
265 	if (cleanest_closid == ~0)
266 		return -ENOSPC;
267 
268 	return cleanest_closid;
269 }
270 
271 /*
272  * For MPAM the RMID value is not unique, and has to be considered with
273  * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
274  * allows all domains to be managed by a single free list.
275  * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
276  */
277 int alloc_rmid(u32 closid)
278 {
279 	struct rmid_entry *entry;
280 
281 	lockdep_assert_held(&rdtgroup_mutex);
282 
283 	entry = resctrl_find_free_rmid(closid);
284 	if (IS_ERR(entry))
285 		return PTR_ERR(entry);
286 
287 	list_del(&entry->list);
288 	return entry->rmid;
289 }
290 
291 static void add_rmid_to_limbo(struct rmid_entry *entry)
292 {
293 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
294 	struct rdt_l3_mon_domain *d;
295 	u32 idx;
296 
297 	lockdep_assert_held(&rdtgroup_mutex);
298 
299 	/* Walking r->domains, ensure it can't race with cpuhp */
300 	lockdep_assert_cpus_held();
301 
302 	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
303 
304 	entry->busy = 0;
305 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
306 		/*
307 		 * For the first limbo RMID in the domain,
308 		 * setup up the limbo worker.
309 		 */
310 		if (!has_busy_rmid(d))
311 			cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL,
312 						RESCTRL_PICK_ANY_CPU);
313 		set_bit(idx, d->rmid_busy_llc);
314 		entry->busy++;
315 	}
316 
317 	rmid_limbo_count++;
318 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
319 		closid_num_dirty_rmid[entry->closid]++;
320 }
321 
322 void free_rmid(u32 closid, u32 rmid)
323 {
324 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
325 	struct rmid_entry *entry;
326 
327 	lockdep_assert_held(&rdtgroup_mutex);
328 
329 	/*
330 	 * Do not allow the default rmid to be free'd. Comparing by index
331 	 * allows architectures that ignore the closid parameter to avoid an
332 	 * unnecessary check.
333 	 */
334 	if (!resctrl_arch_mon_capable() ||
335 	    idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
336 						RESCTRL_RESERVED_RMID))
337 		return;
338 
339 	entry = __rmid_entry(idx);
340 
341 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
342 		add_rmid_to_limbo(entry);
343 	else
344 		list_add_tail(&entry->list, &rmid_free_lru);
345 }
346 
347 static struct mbm_state *get_mbm_state(struct rdt_l3_mon_domain *d, u32 closid,
348 				       u32 rmid, enum resctrl_event_id evtid)
349 {
350 	u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
351 	struct mbm_state *state;
352 
353 	if (!resctrl_is_mbm_event(evtid))
354 		return NULL;
355 
356 	state = d->mbm_states[MBM_STATE_IDX(evtid)];
357 
358 	return state ? &state[idx] : NULL;
359 }
360 
361 /*
362  * mbm_cntr_get() - Return the counter ID for the matching @evtid and @rdtgrp.
363  *
364  * Return:
365  * Valid counter ID on success, or -ENOENT on failure.
366  */
367 static int mbm_cntr_get(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
368 			struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
369 {
370 	int cntr_id;
371 
372 	if (!r->mon.mbm_cntr_assignable)
373 		return -ENOENT;
374 
375 	if (!resctrl_is_mbm_event(evtid))
376 		return -ENOENT;
377 
378 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
379 		if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp &&
380 		    d->cntr_cfg[cntr_id].evtid == evtid)
381 			return cntr_id;
382 	}
383 
384 	return -ENOENT;
385 }
386 
387 /*
388  * mbm_cntr_alloc() - Initialize and return a new counter ID in the domain @d.
389  * Caller must ensure that the specified event is not assigned already.
390  *
391  * Return:
392  * Valid counter ID on success, or -ENOSPC on failure.
393  */
394 static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
395 			  struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
396 {
397 	int cntr_id;
398 
399 	for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) {
400 		if (!d->cntr_cfg[cntr_id].rdtgrp) {
401 			d->cntr_cfg[cntr_id].rdtgrp = rdtgrp;
402 			d->cntr_cfg[cntr_id].evtid = evtid;
403 			return cntr_id;
404 		}
405 	}
406 
407 	return -ENOSPC;
408 }
409 
410 /*
411  * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
412  */
413 static void mbm_cntr_free(struct rdt_l3_mon_domain *d, int cntr_id)
414 {
415 	memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
416 }
417 
418 static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
419 {
420 	int cpu = smp_processor_id();
421 	u32 closid = rdtgrp->closid;
422 	u32 rmid = rdtgrp->mon.rmid;
423 	struct rdt_l3_mon_domain *d;
424 	int cntr_id = -ENOENT;
425 	struct mbm_state *m;
426 	u64 tval = 0;
427 
428 	if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) {
429 		rr->err = -EIO;
430 		return -EINVAL;
431 	}
432 	d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
433 
434 	if (rr->is_mbm_cntr) {
435 		cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evt->evtid);
436 		if (cntr_id < 0) {
437 			rr->err = -ENOENT;
438 			return -EINVAL;
439 		}
440 	}
441 
442 	if (rr->first) {
443 		if (rr->is_mbm_cntr)
444 			resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evt->evtid);
445 		else
446 			resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evt->evtid);
447 		m = get_mbm_state(d, closid, rmid, rr->evt->evtid);
448 		if (m)
449 			memset(m, 0, sizeof(struct mbm_state));
450 		return 0;
451 	}
452 
453 	/* Reading a single domain, must be on a CPU in that domain. */
454 	if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
455 		return -EINVAL;
456 	if (rr->is_mbm_cntr)
457 		rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
458 						 rr->evt->evtid, &tval);
459 	else
460 		rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
461 						 rr->evt->evtid, rr->evt->arch_priv,
462 						 &tval, rr->arch_mon_ctx);
463 	if (rr->err)
464 		return rr->err;
465 
466 	rr->val += tval;
467 
468 	return 0;
469 }
470 
471 static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
472 {
473 	int cpu = smp_processor_id();
474 	u32 closid = rdtgrp->closid;
475 	u32 rmid = rdtgrp->mon.rmid;
476 	struct rdt_l3_mon_domain *d;
477 	u64 tval = 0;
478 	int err, ret;
479 
480 	/*
481 	 * Summing across domains is only done for systems that implement
482 	 * Sub-NUMA Cluster. There is no overlap with systems that support
483 	 * assignable counters.
484 	 */
485 	if (rr->is_mbm_cntr) {
486 		pr_warn_once("Summing domains using assignable counters is not supported\n");
487 		rr->err = -EINVAL;
488 		return -EINVAL;
489 	}
490 
491 	/* Summing domains that share a cache, must be on a CPU for that cache. */
492 	if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
493 		return -EINVAL;
494 
495 	/*
496 	 * Legacy files must report the sum of an event across all
497 	 * domains that share the same L3 cache instance.
498 	 * Report success if a read from any domain succeeds, -EINVAL
499 	 * (translated to "Unavailable" for user space) if reading from
500 	 * all domains fail for any reason.
501 	 */
502 	ret = -EINVAL;
503 	list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
504 		if (d->ci_id != rr->ci->id)
505 			continue;
506 		err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
507 					     rr->evt->evtid, rr->evt->arch_priv,
508 					     &tval, rr->arch_mon_ctx);
509 		if (!err) {
510 			rr->val += tval;
511 			ret = 0;
512 		}
513 	}
514 
515 	if (ret)
516 		rr->err = ret;
517 
518 	return ret;
519 }
520 
521 static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
522 {
523 	switch (rr->r->rid) {
524 	case RDT_RESOURCE_L3:
525 		WARN_ON_ONCE(rr->evt->any_cpu);
526 		if (rr->hdr)
527 			return __l3_mon_event_count(rdtgrp, rr);
528 		else
529 			return __l3_mon_event_count_sum(rdtgrp, rr);
530 	default:
531 		rr->err = -EINVAL;
532 		return -EINVAL;
533 	}
534 }
535 
536 /*
537  * mbm_bw_count() - Update bw count from values previously read by
538  *		    __mon_event_count().
539  * @rdtgrp:	resctrl group associated with the CLOSID and RMID to identify
540  *		the cached mbm_state.
541  * @rr:		The struct rmid_read populated by __mon_event_count().
542  *
543  * Supporting function to calculate the memory bandwidth
544  * and delta bandwidth in MBps. The chunks value previously read by
545  * __mon_event_count() is compared with the chunks value from the previous
546  * invocation. This must be called once per second to maintain values in MBps.
547  */
548 static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
549 {
550 	u64 cur_bw, bytes, cur_bytes;
551 	u32 closid = rdtgrp->closid;
552 	u32 rmid = rdtgrp->mon.rmid;
553 	struct rdt_l3_mon_domain *d;
554 	struct mbm_state *m;
555 
556 	if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
557 		return;
558 	d = container_of(rr->hdr, struct rdt_l3_mon_domain, hdr);
559 	m = get_mbm_state(d, closid, rmid, rr->evt->evtid);
560 	if (WARN_ON_ONCE(!m))
561 		return;
562 
563 	cur_bytes = rr->val;
564 	bytes = cur_bytes - m->prev_bw_bytes;
565 	m->prev_bw_bytes = cur_bytes;
566 
567 	cur_bw = bytes / SZ_1M;
568 
569 	m->prev_bw = cur_bw;
570 }
571 
572 /*
573  * This is scheduled by mon_event_read() to read the CQM/MBM counters
574  * on a domain.
575  */
576 void mon_event_count(void *info)
577 {
578 	struct rdtgroup *rdtgrp, *entry;
579 	struct rmid_read *rr = info;
580 	struct list_head *head;
581 	int ret;
582 
583 	rdtgrp = rr->rgrp;
584 
585 	ret = __mon_event_count(rdtgrp, rr);
586 
587 	/*
588 	 * For Ctrl groups read data from child monitor groups and
589 	 * add them together. Count events which are read successfully.
590 	 * Discard the rmid_read's reporting errors.
591 	 */
592 	head = &rdtgrp->mon.crdtgrp_list;
593 
594 	if (rdtgrp->type == RDTCTRL_GROUP) {
595 		list_for_each_entry(entry, head, mon.crdtgrp_list) {
596 			if (__mon_event_count(entry, rr) == 0)
597 				ret = 0;
598 		}
599 	}
600 
601 	/*
602 	 * __mon_event_count() calls for newly created monitor groups may
603 	 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
604 	 * Discard error if any of the monitor event reads succeeded.
605 	 */
606 	if (ret == 0)
607 		rr->err = 0;
608 }
609 
610 static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
611 							struct rdt_resource *r)
612 {
613 	struct rdt_ctrl_domain *d;
614 
615 	lockdep_assert_cpus_held();
616 
617 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
618 		/* Find the domain that contains this CPU */
619 		if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
620 			return d;
621 	}
622 
623 	return NULL;
624 }
625 
626 /*
627  * Feedback loop for MBA software controller (mba_sc)
628  *
629  * mba_sc is a feedback loop where we periodically read MBM counters and
630  * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
631  * that:
632  *
633  *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
634  *
635  * This uses the MBM counters to measure the bandwidth and MBA throttle
636  * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
637  * fact that resctrl rdtgroups have both monitoring and control.
638  *
639  * The frequency of the checks is 1s and we just tag along the MBM overflow
640  * timer. Having 1s interval makes the calculation of bandwidth simpler.
641  *
642  * Although MBA's goal is to restrict the bandwidth to a maximum, there may
643  * be a need to increase the bandwidth to avoid unnecessarily restricting
644  * the L2 <-> L3 traffic.
645  *
646  * Since MBA controls the L2 external bandwidth where as MBM measures the
647  * L3 external bandwidth the following sequence could lead to such a
648  * situation.
649  *
650  * Consider an rdtgroup which had high L3 <-> memory traffic in initial
651  * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
652  * after some time rdtgroup has mostly L2 <-> L3 traffic.
653  *
654  * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
655  * throttle MSRs already have low percentage values.  To avoid
656  * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
657  */
658 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_l3_mon_domain *dom_mbm)
659 {
660 	u32 closid, rmid, cur_msr_val, new_msr_val;
661 	struct mbm_state *pmbm_data, *cmbm_data;
662 	struct rdt_ctrl_domain *dom_mba;
663 	enum resctrl_event_id evt_id;
664 	struct rdt_resource *r_mba;
665 	struct list_head *head;
666 	struct rdtgroup *entry;
667 	u32 cur_bw, user_bw;
668 
669 	r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
670 	evt_id = rgrp->mba_mbps_event;
671 
672 	closid = rgrp->closid;
673 	rmid = rgrp->mon.rmid;
674 	pmbm_data = get_mbm_state(dom_mbm, closid, rmid, evt_id);
675 	if (WARN_ON_ONCE(!pmbm_data))
676 		return;
677 
678 	dom_mba = get_ctrl_domain_from_cpu(smp_processor_id(), r_mba);
679 	if (!dom_mba) {
680 		pr_warn_once("Failure to get domain for MBA update\n");
681 		return;
682 	}
683 
684 	cur_bw = pmbm_data->prev_bw;
685 	user_bw = dom_mba->mbps_val[closid];
686 
687 	/* MBA resource doesn't support CDP */
688 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
689 
690 	/*
691 	 * For Ctrl groups read data from child monitor groups.
692 	 */
693 	head = &rgrp->mon.crdtgrp_list;
694 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
695 		cmbm_data = get_mbm_state(dom_mbm, entry->closid, entry->mon.rmid, evt_id);
696 		if (WARN_ON_ONCE(!cmbm_data))
697 			return;
698 		cur_bw += cmbm_data->prev_bw;
699 	}
700 
701 	/*
702 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
703 	 * bandwidth step is the bandwidth granularity specified by the
704 	 * hardware.
705 	 * Always increase throttling if current bandwidth is above the
706 	 * target set by user.
707 	 * But avoid thrashing up and down on every poll by checking
708 	 * whether a decrease in throttling is likely to push the group
709 	 * back over target. E.g. if currently throttling to 30% of bandwidth
710 	 * on a system with 10% granularity steps, check whether moving to
711 	 * 40% would go past the limit by multiplying current bandwidth by
712 	 * "(30 + 10) / 30".
713 	 */
714 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
715 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
716 	} else if (cur_msr_val < MAX_MBA_BW &&
717 		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
718 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
719 	} else {
720 		return;
721 	}
722 
723 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
724 }
725 
726 static void mbm_update_one_event(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
727 				 struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
728 {
729 	struct rmid_read rr = {0};
730 
731 	rr.r = r;
732 	rr.hdr = &d->hdr;
733 	rr.evt = &mon_event_all[evtid];
734 	if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
735 		rr.is_mbm_cntr = true;
736 	} else {
737 		rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, evtid);
738 		if (IS_ERR(rr.arch_mon_ctx)) {
739 			pr_warn_ratelimited("Failed to allocate monitor context: %ld",
740 					    PTR_ERR(rr.arch_mon_ctx));
741 			return;
742 		}
743 	}
744 
745 	__mon_event_count(rdtgrp, &rr);
746 
747 	/*
748 	 * If the software controller is enabled, compute the
749 	 * bandwidth for this event id.
750 	 */
751 	if (is_mba_sc(NULL))
752 		mbm_bw_count(rdtgrp, &rr);
753 
754 	if (rr.arch_mon_ctx)
755 		resctrl_arch_mon_ctx_free(rr.r, evtid, rr.arch_mon_ctx);
756 }
757 
758 static void mbm_update(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
759 		       struct rdtgroup *rdtgrp)
760 {
761 	/*
762 	 * This is protected from concurrent reads from user as both
763 	 * the user and overflow handler hold the global mutex.
764 	 */
765 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
766 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
767 
768 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
769 		mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
770 }
771 
772 /*
773  * Handler to scan the limbo list and move the RMIDs
774  * to free list whose occupancy < threshold_occupancy.
775  */
776 void cqm_handle_limbo(struct work_struct *work)
777 {
778 	unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
779 	struct rdt_l3_mon_domain *d;
780 
781 	cpus_read_lock();
782 	mutex_lock(&rdtgroup_mutex);
783 
784 	d = container_of(work, struct rdt_l3_mon_domain, cqm_limbo.work);
785 
786 	__check_limbo(d, false);
787 
788 	if (has_busy_rmid(d)) {
789 		d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
790 							   RESCTRL_PICK_ANY_CPU);
791 		schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
792 					 delay);
793 	}
794 
795 	mutex_unlock(&rdtgroup_mutex);
796 	cpus_read_unlock();
797 }
798 
799 /**
800  * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this
801  *                             domain.
802  * @dom:           The domain the limbo handler should run for.
803  * @delay_ms:      How far in the future the handler should run.
804  * @exclude_cpu:   Which CPU the handler should not run on,
805  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
806  */
807 void cqm_setup_limbo_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
808 			     int exclude_cpu)
809 {
810 	unsigned long delay = msecs_to_jiffies(delay_ms);
811 	int cpu;
812 
813 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
814 	dom->cqm_work_cpu = cpu;
815 
816 	if (cpu < nr_cpu_ids)
817 		schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
818 }
819 
820 void mbm_handle_overflow(struct work_struct *work)
821 {
822 	unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
823 	struct rdtgroup *prgrp, *crgrp;
824 	struct rdt_l3_mon_domain *d;
825 	struct list_head *head;
826 	struct rdt_resource *r;
827 
828 	cpus_read_lock();
829 	mutex_lock(&rdtgroup_mutex);
830 
831 	/*
832 	 * If the filesystem has been unmounted this work no longer needs to
833 	 * run.
834 	 */
835 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
836 		goto out_unlock;
837 
838 	r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
839 	d = container_of(work, struct rdt_l3_mon_domain, mbm_over.work);
840 
841 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
842 		mbm_update(r, d, prgrp);
843 
844 		head = &prgrp->mon.crdtgrp_list;
845 		list_for_each_entry(crgrp, head, mon.crdtgrp_list)
846 			mbm_update(r, d, crgrp);
847 
848 		if (is_mba_sc(NULL))
849 			update_mba_bw(prgrp, d);
850 	}
851 
852 	/*
853 	 * Re-check for housekeeping CPUs. This allows the overflow handler to
854 	 * move off a nohz_full CPU quickly.
855 	 */
856 	d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask,
857 						   RESCTRL_PICK_ANY_CPU);
858 	schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
859 
860 out_unlock:
861 	mutex_unlock(&rdtgroup_mutex);
862 	cpus_read_unlock();
863 }
864 
865 /**
866  * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this
867  *                                domain.
868  * @dom:           The domain the overflow handler should run for.
869  * @delay_ms:      How far in the future the handler should run.
870  * @exclude_cpu:   Which CPU the handler should not run on,
871  *		   RESCTRL_PICK_ANY_CPU to pick any CPU.
872  */
873 void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom, unsigned long delay_ms,
874 				int exclude_cpu)
875 {
876 	unsigned long delay = msecs_to_jiffies(delay_ms);
877 	int cpu;
878 
879 	/*
880 	 * When a domain comes online there is no guarantee the filesystem is
881 	 * mounted. If not, there is no need to catch counter overflow.
882 	 */
883 	if (!resctrl_mounted || !resctrl_arch_mon_capable())
884 		return;
885 	cpu = cpumask_any_housekeeping(&dom->hdr.cpu_mask, exclude_cpu);
886 	dom->mbm_work_cpu = cpu;
887 
888 	if (cpu < nr_cpu_ids)
889 		schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
890 }
891 
892 static int dom_data_init(struct rdt_resource *r)
893 {
894 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
895 	u32 num_closid = resctrl_arch_get_num_closid(r);
896 	struct rmid_entry *entry = NULL;
897 	int err = 0, i;
898 	u32 idx;
899 
900 	mutex_lock(&rdtgroup_mutex);
901 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
902 		u32 *tmp;
903 
904 		/*
905 		 * If the architecture hasn't provided a sanitised value here,
906 		 * this may result in larger arrays than necessary. Resctrl will
907 		 * use a smaller system wide value based on the resources in
908 		 * use.
909 		 */
910 		tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
911 		if (!tmp) {
912 			err = -ENOMEM;
913 			goto out_unlock;
914 		}
915 
916 		closid_num_dirty_rmid = tmp;
917 	}
918 
919 	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
920 	if (!rmid_ptrs) {
921 		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
922 			kfree(closid_num_dirty_rmid);
923 			closid_num_dirty_rmid = NULL;
924 		}
925 		err = -ENOMEM;
926 		goto out_unlock;
927 	}
928 
929 	for (i = 0; i < idx_limit; i++) {
930 		entry = &rmid_ptrs[i];
931 		INIT_LIST_HEAD(&entry->list);
932 
933 		resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
934 		list_add_tail(&entry->list, &rmid_free_lru);
935 	}
936 
937 	/*
938 	 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
939 	 * are always allocated. These are used for the rdtgroup_default
940 	 * control group, which will be setup later in resctrl_init().
941 	 */
942 	idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
943 					   RESCTRL_RESERVED_RMID);
944 	entry = __rmid_entry(idx);
945 	list_del(&entry->list);
946 
947 out_unlock:
948 	mutex_unlock(&rdtgroup_mutex);
949 
950 	return err;
951 }
952 
953 static void dom_data_exit(struct rdt_resource *r)
954 {
955 	mutex_lock(&rdtgroup_mutex);
956 
957 	if (!r->mon_capable)
958 		goto out_unlock;
959 
960 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
961 		kfree(closid_num_dirty_rmid);
962 		closid_num_dirty_rmid = NULL;
963 	}
964 
965 	kfree(rmid_ptrs);
966 	rmid_ptrs = NULL;
967 
968 out_unlock:
969 	mutex_unlock(&rdtgroup_mutex);
970 }
971 
972 #define MON_EVENT(_eventid, _name, _res, _fp)	\
973 	[_eventid] = {				\
974 	.name			= _name,	\
975 	.evtid			= _eventid,	\
976 	.rid			= _res,		\
977 	.is_floating_point	= _fp,		\
978 }
979 
980 /*
981  * All available events. Architecture code marks the ones that
982  * are supported by a system using resctrl_enable_mon_event()
983  * to set .enabled.
984  */
985 struct mon_evt mon_event_all[QOS_NUM_EVENTS] = {
986 	MON_EVENT(QOS_L3_OCCUP_EVENT_ID,		"llc_occupancy",	RDT_RESOURCE_L3,	false),
987 	MON_EVENT(QOS_L3_MBM_TOTAL_EVENT_ID,		"mbm_total_bytes",	RDT_RESOURCE_L3,	false),
988 	MON_EVENT(QOS_L3_MBM_LOCAL_EVENT_ID,		"mbm_local_bytes",	RDT_RESOURCE_L3,	false),
989 	MON_EVENT(PMT_EVENT_ENERGY,			"core_energy",		RDT_RESOURCE_PERF_PKG,	true),
990 	MON_EVENT(PMT_EVENT_ACTIVITY,			"activity",		RDT_RESOURCE_PERF_PKG,	true),
991 	MON_EVENT(PMT_EVENT_STALLS_LLC_HIT,		"stalls_llc_hit",	RDT_RESOURCE_PERF_PKG,	false),
992 	MON_EVENT(PMT_EVENT_C1_RES,			"c1_res",		RDT_RESOURCE_PERF_PKG,	false),
993 	MON_EVENT(PMT_EVENT_UNHALTED_CORE_CYCLES,	"unhalted_core_cycles",	RDT_RESOURCE_PERF_PKG,	false),
994 	MON_EVENT(PMT_EVENT_STALLS_LLC_MISS,		"stalls_llc_miss",	RDT_RESOURCE_PERF_PKG,	false),
995 	MON_EVENT(PMT_EVENT_AUTO_C6_RES,		"c6_res",		RDT_RESOURCE_PERF_PKG,	false),
996 	MON_EVENT(PMT_EVENT_UNHALTED_REF_CYCLES,	"unhalted_ref_cycles",	RDT_RESOURCE_PERF_PKG,	false),
997 	MON_EVENT(PMT_EVENT_UOPS_RETIRED,		"uops_retired",		RDT_RESOURCE_PERF_PKG,	false),
998 };
999 
1000 bool resctrl_enable_mon_event(enum resctrl_event_id eventid, bool any_cpu,
1001 			      unsigned int binary_bits, void *arch_priv)
1002 {
1003 	if (WARN_ON_ONCE(eventid < QOS_FIRST_EVENT || eventid >= QOS_NUM_EVENTS ||
1004 			 binary_bits > MAX_BINARY_BITS))
1005 		return false;
1006 	if (mon_event_all[eventid].enabled) {
1007 		pr_warn("Duplicate enable for event %d\n", eventid);
1008 		return false;
1009 	}
1010 	if (binary_bits && !mon_event_all[eventid].is_floating_point) {
1011 		pr_warn("Event %d may not be floating point\n", eventid);
1012 		return false;
1013 	}
1014 
1015 	mon_event_all[eventid].any_cpu = any_cpu;
1016 	mon_event_all[eventid].binary_bits = binary_bits;
1017 	mon_event_all[eventid].arch_priv = arch_priv;
1018 	mon_event_all[eventid].enabled = true;
1019 
1020 	return true;
1021 }
1022 
1023 bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
1024 {
1025 	return eventid >= QOS_FIRST_EVENT && eventid < QOS_NUM_EVENTS &&
1026 	       mon_event_all[eventid].enabled;
1027 }
1028 
1029 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id evtid)
1030 {
1031 	return mon_event_all[evtid].evt_cfg;
1032 }
1033 
1034 /**
1035  * struct mbm_transaction - Memory transaction an MBM event can be configured with.
1036  * @name:	Name of memory transaction (read, write ...).
1037  * @val:	The bit (eg. READS_TO_LOCAL_MEM or READS_TO_REMOTE_MEM) used to
1038  *		represent the memory transaction within an event's configuration.
1039  */
1040 struct mbm_transaction {
1041 	char	name[32];
1042 	u32	val;
1043 };
1044 
1045 /* Decoded values for each type of memory transaction. */
1046 static struct mbm_transaction mbm_transactions[NUM_MBM_TRANSACTIONS] = {
1047 	{"local_reads", READS_TO_LOCAL_MEM},
1048 	{"remote_reads", READS_TO_REMOTE_MEM},
1049 	{"local_non_temporal_writes", NON_TEMP_WRITE_TO_LOCAL_MEM},
1050 	{"remote_non_temporal_writes", NON_TEMP_WRITE_TO_REMOTE_MEM},
1051 	{"local_reads_slow_memory", READS_TO_LOCAL_S_MEM},
1052 	{"remote_reads_slow_memory", READS_TO_REMOTE_S_MEM},
1053 	{"dirty_victim_writes_all", DIRTY_VICTIMS_TO_ALL_MEM},
1054 };
1055 
1056 int event_filter_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
1057 {
1058 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1059 	struct rdt_resource *r;
1060 	bool sep = false;
1061 	int ret = 0, i;
1062 
1063 	mutex_lock(&rdtgroup_mutex);
1064 	rdt_last_cmd_clear();
1065 
1066 	r = resctrl_arch_get_resource(mevt->rid);
1067 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1068 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1069 		ret = -EINVAL;
1070 		goto out_unlock;
1071 	}
1072 
1073 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1074 		if (mevt->evt_cfg & mbm_transactions[i].val) {
1075 			if (sep)
1076 				seq_putc(seq, ',');
1077 			seq_printf(seq, "%s", mbm_transactions[i].name);
1078 			sep = true;
1079 		}
1080 	}
1081 	seq_putc(seq, '\n');
1082 
1083 out_unlock:
1084 	mutex_unlock(&rdtgroup_mutex);
1085 
1086 	return ret;
1087 }
1088 
1089 int resctrl_mbm_assign_on_mkdir_show(struct kernfs_open_file *of, struct seq_file *s,
1090 				     void *v)
1091 {
1092 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1093 	int ret = 0;
1094 
1095 	mutex_lock(&rdtgroup_mutex);
1096 	rdt_last_cmd_clear();
1097 
1098 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1099 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1100 		ret = -EINVAL;
1101 		goto out_unlock;
1102 	}
1103 
1104 	seq_printf(s, "%u\n", r->mon.mbm_assign_on_mkdir);
1105 
1106 out_unlock:
1107 	mutex_unlock(&rdtgroup_mutex);
1108 
1109 	return ret;
1110 }
1111 
1112 ssize_t resctrl_mbm_assign_on_mkdir_write(struct kernfs_open_file *of, char *buf,
1113 					  size_t nbytes, loff_t off)
1114 {
1115 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1116 	bool value;
1117 	int ret;
1118 
1119 	ret = kstrtobool(buf, &value);
1120 	if (ret)
1121 		return ret;
1122 
1123 	mutex_lock(&rdtgroup_mutex);
1124 	rdt_last_cmd_clear();
1125 
1126 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1127 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1128 		ret = -EINVAL;
1129 		goto out_unlock;
1130 	}
1131 
1132 	r->mon.mbm_assign_on_mkdir = value;
1133 
1134 out_unlock:
1135 	mutex_unlock(&rdtgroup_mutex);
1136 
1137 	return ret ?: nbytes;
1138 }
1139 
1140 /*
1141  * mbm_cntr_free_all() - Clear all the counter ID configuration details in the
1142  *			 domain @d. Called when mbm_assign_mode is changed.
1143  */
1144 static void mbm_cntr_free_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
1145 {
1146 	memset(d->cntr_cfg, 0, sizeof(*d->cntr_cfg) * r->mon.num_mbm_cntrs);
1147 }
1148 
1149 /*
1150  * resctrl_reset_rmid_all() - Reset all non-architecture states for all the
1151  *			      supported RMIDs.
1152  */
1153 static void resctrl_reset_rmid_all(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
1154 {
1155 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
1156 	enum resctrl_event_id evt;
1157 	int idx;
1158 
1159 	for_each_mbm_event_id(evt) {
1160 		if (!resctrl_is_mon_event_enabled(evt))
1161 			continue;
1162 		idx = MBM_STATE_IDX(evt);
1163 		memset(d->mbm_states[idx], 0, sizeof(*d->mbm_states[0]) * idx_limit);
1164 	}
1165 }
1166 
1167 /*
1168  * rdtgroup_assign_cntr() - Assign/unassign the counter ID for the event, RMID
1169  * pair in the domain.
1170  *
1171  * Assign the counter if @assign is true else unassign the counter. Reset the
1172  * associated non-architectural state.
1173  */
1174 static void rdtgroup_assign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1175 				 enum resctrl_event_id evtid, u32 rmid, u32 closid,
1176 				 u32 cntr_id, bool assign)
1177 {
1178 	struct mbm_state *m;
1179 
1180 	resctrl_arch_config_cntr(r, d, evtid, rmid, closid, cntr_id, assign);
1181 
1182 	m = get_mbm_state(d, closid, rmid, evtid);
1183 	if (m)
1184 		memset(m, 0, sizeof(*m));
1185 }
1186 
1187 /*
1188  * rdtgroup_alloc_assign_cntr() - Allocate a counter ID and assign it to the event
1189  * pointed to by @mevt and the resctrl group @rdtgrp within the domain @d.
1190  *
1191  * Return:
1192  * 0 on success, < 0 on failure.
1193  */
1194 static int rdtgroup_alloc_assign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1195 				      struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1196 {
1197 	int cntr_id;
1198 
1199 	/* No action required if the counter is assigned already. */
1200 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1201 	if (cntr_id >= 0)
1202 		return 0;
1203 
1204 	cntr_id = mbm_cntr_alloc(r, d, rdtgrp, mevt->evtid);
1205 	if (cntr_id < 0) {
1206 		rdt_last_cmd_printf("Failed to allocate counter for %s in domain %d\n",
1207 				    mevt->name, d->hdr.id);
1208 		return cntr_id;
1209 	}
1210 
1211 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, true);
1212 
1213 	return 0;
1214 }
1215 
1216 /*
1217  * rdtgroup_assign_cntr_event() - Assign a hardware counter for the event in
1218  * @mevt to the resctrl group @rdtgrp. Assign counters to all domains if @d is
1219  * NULL; otherwise, assign the counter to the specified domain @d.
1220  *
1221  * If all counters in a domain are already in use, rdtgroup_alloc_assign_cntr()
1222  * will fail. The assignment process will abort at the first failure encountered
1223  * during domain traversal, which may result in the event being only partially
1224  * assigned.
1225  *
1226  * Return:
1227  * 0 on success, < 0 on failure.
1228  */
1229 static int rdtgroup_assign_cntr_event(struct rdt_l3_mon_domain *d, struct rdtgroup *rdtgrp,
1230 				      struct mon_evt *mevt)
1231 {
1232 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1233 	int ret = 0;
1234 
1235 	if (!d) {
1236 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1237 			ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1238 			if (ret)
1239 				return ret;
1240 		}
1241 	} else {
1242 		ret = rdtgroup_alloc_assign_cntr(r, d, rdtgrp, mevt);
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 /*
1249  * rdtgroup_assign_cntrs() - Assign counters to MBM events. Called when
1250  *			     a new group is created.
1251  *
1252  * Each group can accommodate two counters per domain: one for the total
1253  * event and one for the local event. Assignments may fail due to the limited
1254  * number of counters. However, it is not necessary to fail the group creation
1255  * and thus no failure is returned. Users have the option to modify the
1256  * counter assignments after the group has been created.
1257  */
1258 void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
1259 {
1260 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1261 
1262 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r) ||
1263 	    !r->mon.mbm_assign_on_mkdir)
1264 		return;
1265 
1266 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1267 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1268 					   &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1269 
1270 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1271 		rdtgroup_assign_cntr_event(NULL, rdtgrp,
1272 					   &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1273 }
1274 
1275 /*
1276  * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
1277  * for the event pointed to by @mevt within the domain @d and resctrl group @rdtgrp.
1278  */
1279 static void rdtgroup_free_unassign_cntr(struct rdt_resource *r, struct rdt_l3_mon_domain *d,
1280 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1281 {
1282 	int cntr_id;
1283 
1284 	cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
1285 
1286 	/* If there is no cntr_id assigned, nothing to do */
1287 	if (cntr_id < 0)
1288 		return;
1289 
1290 	rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false);
1291 
1292 	mbm_cntr_free(d, cntr_id);
1293 }
1294 
1295 /*
1296  * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
1297  * the event structure @mevt from the domain @d and the group @rdtgrp. Unassign
1298  * the counters from all the domains if @d is NULL else unassign from @d.
1299  */
1300 static void rdtgroup_unassign_cntr_event(struct rdt_l3_mon_domain *d, struct rdtgroup *rdtgrp,
1301 					 struct mon_evt *mevt)
1302 {
1303 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1304 
1305 	if (!d) {
1306 		list_for_each_entry(d, &r->mon_domains, hdr.list)
1307 			rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1308 	} else {
1309 		rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
1310 	}
1311 }
1312 
1313 /*
1314  * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
1315  *			       Called when a group is deleted.
1316  */
1317 void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
1318 {
1319 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1320 
1321 	if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
1322 		return;
1323 
1324 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1325 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1326 					     &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
1327 
1328 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1329 		rdtgroup_unassign_cntr_event(NULL, rdtgrp,
1330 					     &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
1331 }
1332 
1333 static int resctrl_parse_mem_transactions(char *tok, u32 *val)
1334 {
1335 	u32 temp_val = 0;
1336 	char *evt_str;
1337 	bool found;
1338 	int i;
1339 
1340 next_config:
1341 	if (!tok || tok[0] == '\0') {
1342 		*val = temp_val;
1343 		return 0;
1344 	}
1345 
1346 	/* Start processing the strings for each memory transaction type */
1347 	evt_str = strim(strsep(&tok, ","));
1348 	found = false;
1349 	for (i = 0; i < NUM_MBM_TRANSACTIONS; i++) {
1350 		if (!strcmp(mbm_transactions[i].name, evt_str)) {
1351 			temp_val |= mbm_transactions[i].val;
1352 			found = true;
1353 			break;
1354 		}
1355 	}
1356 
1357 	if (!found) {
1358 		rdt_last_cmd_printf("Invalid memory transaction type %s\n", evt_str);
1359 		return -EINVAL;
1360 	}
1361 
1362 	goto next_config;
1363 }
1364 
1365 /*
1366  * rdtgroup_update_cntr_event - Update the counter assignments for the event
1367  *				in a group.
1368  * @r:		Resource to which update needs to be done.
1369  * @rdtgrp:	Resctrl group.
1370  * @evtid:	MBM monitor event.
1371  */
1372 static void rdtgroup_update_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1373 				       enum resctrl_event_id evtid)
1374 {
1375 	struct rdt_l3_mon_domain *d;
1376 	int cntr_id;
1377 
1378 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1379 		cntr_id = mbm_cntr_get(r, d, rdtgrp, evtid);
1380 		if (cntr_id >= 0)
1381 			rdtgroup_assign_cntr(r, d, evtid, rdtgrp->mon.rmid,
1382 					     rdtgrp->closid, cntr_id, true);
1383 	}
1384 }
1385 
1386 /*
1387  * resctrl_update_cntr_allrdtgrp - Update the counter assignments for the event
1388  *				   for all the groups.
1389  * @mevt	MBM Monitor event.
1390  */
1391 static void resctrl_update_cntr_allrdtgrp(struct mon_evt *mevt)
1392 {
1393 	struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
1394 	struct rdtgroup *prgrp, *crgrp;
1395 
1396 	/*
1397 	 * Find all the groups where the event is assigned and update the
1398 	 * configuration of existing assignments.
1399 	 */
1400 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1401 		rdtgroup_update_cntr_event(r, prgrp, mevt->evtid);
1402 
1403 		list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
1404 			rdtgroup_update_cntr_event(r, crgrp, mevt->evtid);
1405 	}
1406 }
1407 
1408 ssize_t event_filter_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
1409 			   loff_t off)
1410 {
1411 	struct mon_evt *mevt = rdt_kn_parent_priv(of->kn);
1412 	struct rdt_resource *r;
1413 	u32 evt_cfg = 0;
1414 	int ret = 0;
1415 
1416 	/* Valid input requires a trailing newline */
1417 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1418 		return -EINVAL;
1419 
1420 	buf[nbytes - 1] = '\0';
1421 
1422 	cpus_read_lock();
1423 	mutex_lock(&rdtgroup_mutex);
1424 
1425 	rdt_last_cmd_clear();
1426 
1427 	r = resctrl_arch_get_resource(mevt->rid);
1428 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1429 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1430 		ret = -EINVAL;
1431 		goto out_unlock;
1432 	}
1433 
1434 	ret = resctrl_parse_mem_transactions(buf, &evt_cfg);
1435 	if (!ret && mevt->evt_cfg != evt_cfg) {
1436 		mevt->evt_cfg = evt_cfg;
1437 		resctrl_update_cntr_allrdtgrp(mevt);
1438 	}
1439 
1440 out_unlock:
1441 	mutex_unlock(&rdtgroup_mutex);
1442 	cpus_read_unlock();
1443 
1444 	return ret ?: nbytes;
1445 }
1446 
1447 int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of,
1448 				 struct seq_file *s, void *v)
1449 {
1450 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1451 	bool enabled;
1452 
1453 	mutex_lock(&rdtgroup_mutex);
1454 	enabled = resctrl_arch_mbm_cntr_assign_enabled(r);
1455 
1456 	if (r->mon.mbm_cntr_assignable) {
1457 		if (enabled)
1458 			seq_puts(s, "[mbm_event]\n");
1459 		else
1460 			seq_puts(s, "[default]\n");
1461 
1462 		if (!IS_ENABLED(CONFIG_RESCTRL_ASSIGN_FIXED)) {
1463 			if (enabled)
1464 				seq_puts(s, "default\n");
1465 			else
1466 				seq_puts(s, "mbm_event\n");
1467 		}
1468 	} else {
1469 		seq_puts(s, "[default]\n");
1470 	}
1471 
1472 	mutex_unlock(&rdtgroup_mutex);
1473 
1474 	return 0;
1475 }
1476 
1477 ssize_t resctrl_mbm_assign_mode_write(struct kernfs_open_file *of, char *buf,
1478 				      size_t nbytes, loff_t off)
1479 {
1480 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1481 	struct rdt_l3_mon_domain *d;
1482 	int ret = 0;
1483 	bool enable;
1484 
1485 	/* Valid input requires a trailing newline */
1486 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1487 		return -EINVAL;
1488 
1489 	buf[nbytes - 1] = '\0';
1490 
1491 	cpus_read_lock();
1492 	mutex_lock(&rdtgroup_mutex);
1493 
1494 	rdt_last_cmd_clear();
1495 
1496 	if (!strcmp(buf, "default")) {
1497 		enable = 0;
1498 	} else if (!strcmp(buf, "mbm_event")) {
1499 		if (r->mon.mbm_cntr_assignable) {
1500 			enable = 1;
1501 		} else {
1502 			ret = -EINVAL;
1503 			rdt_last_cmd_puts("mbm_event mode is not supported\n");
1504 			goto out_unlock;
1505 		}
1506 	} else {
1507 		ret = -EINVAL;
1508 		rdt_last_cmd_puts("Unsupported assign mode\n");
1509 		goto out_unlock;
1510 	}
1511 
1512 	if (enable != resctrl_arch_mbm_cntr_assign_enabled(r)) {
1513 		ret = resctrl_arch_mbm_cntr_assign_set(r, enable);
1514 		if (ret)
1515 			goto out_unlock;
1516 
1517 		/* Update the visibility of BMEC related files */
1518 		resctrl_bmec_files_show(r, NULL, !enable);
1519 
1520 		/*
1521 		 * Initialize the default memory transaction values for
1522 		 * total and local events.
1523 		 */
1524 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1525 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1526 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1527 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1528 									   (READS_TO_LOCAL_MEM |
1529 									    READS_TO_LOCAL_S_MEM |
1530 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1531 		/* Enable auto assignment when switching to "mbm_event" mode */
1532 		if (enable)
1533 			r->mon.mbm_assign_on_mkdir = true;
1534 		/*
1535 		 * Reset all the non-achitectural RMID state and assignable counters.
1536 		 */
1537 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1538 			mbm_cntr_free_all(r, d);
1539 			resctrl_reset_rmid_all(r, d);
1540 		}
1541 	}
1542 
1543 out_unlock:
1544 	mutex_unlock(&rdtgroup_mutex);
1545 	cpus_read_unlock();
1546 
1547 	return ret ?: nbytes;
1548 }
1549 
1550 int resctrl_num_mbm_cntrs_show(struct kernfs_open_file *of,
1551 			       struct seq_file *s, void *v)
1552 {
1553 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1554 	struct rdt_l3_mon_domain *dom;
1555 	bool sep = false;
1556 
1557 	cpus_read_lock();
1558 	mutex_lock(&rdtgroup_mutex);
1559 
1560 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1561 		if (sep)
1562 			seq_putc(s, ';');
1563 
1564 		seq_printf(s, "%d=%d", dom->hdr.id, r->mon.num_mbm_cntrs);
1565 		sep = true;
1566 	}
1567 	seq_putc(s, '\n');
1568 
1569 	mutex_unlock(&rdtgroup_mutex);
1570 	cpus_read_unlock();
1571 	return 0;
1572 }
1573 
1574 int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of,
1575 				     struct seq_file *s, void *v)
1576 {
1577 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1578 	struct rdt_l3_mon_domain *dom;
1579 	bool sep = false;
1580 	u32 cntrs, i;
1581 	int ret = 0;
1582 
1583 	cpus_read_lock();
1584 	mutex_lock(&rdtgroup_mutex);
1585 
1586 	rdt_last_cmd_clear();
1587 
1588 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1589 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1590 		ret = -EINVAL;
1591 		goto out_unlock;
1592 	}
1593 
1594 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1595 		if (sep)
1596 			seq_putc(s, ';');
1597 
1598 		cntrs = 0;
1599 		for (i = 0; i < r->mon.num_mbm_cntrs; i++) {
1600 			if (!dom->cntr_cfg[i].rdtgrp)
1601 				cntrs++;
1602 		}
1603 
1604 		seq_printf(s, "%d=%u", dom->hdr.id, cntrs);
1605 		sep = true;
1606 	}
1607 	seq_putc(s, '\n');
1608 
1609 out_unlock:
1610 	mutex_unlock(&rdtgroup_mutex);
1611 	cpus_read_unlock();
1612 
1613 	return ret;
1614 }
1615 
1616 int mbm_L3_assignments_show(struct kernfs_open_file *of, struct seq_file *s, void *v)
1617 {
1618 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1619 	struct rdt_l3_mon_domain *d;
1620 	struct rdtgroup *rdtgrp;
1621 	struct mon_evt *mevt;
1622 	int ret = 0;
1623 	bool sep;
1624 
1625 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1626 	if (!rdtgrp) {
1627 		ret = -ENOENT;
1628 		goto out_unlock;
1629 	}
1630 
1631 	rdt_last_cmd_clear();
1632 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1633 		rdt_last_cmd_puts("mbm_event counter assignment mode is not enabled\n");
1634 		ret = -EINVAL;
1635 		goto out_unlock;
1636 	}
1637 
1638 	for_each_mon_event(mevt) {
1639 		if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
1640 			continue;
1641 
1642 		sep = false;
1643 		seq_printf(s, "%s:", mevt->name);
1644 		list_for_each_entry(d, &r->mon_domains, hdr.list) {
1645 			if (sep)
1646 				seq_putc(s, ';');
1647 
1648 			if (mbm_cntr_get(r, d, rdtgrp, mevt->evtid) < 0)
1649 				seq_printf(s, "%d=_", d->hdr.id);
1650 			else
1651 				seq_printf(s, "%d=e", d->hdr.id);
1652 
1653 			sep = true;
1654 		}
1655 		seq_putc(s, '\n');
1656 	}
1657 
1658 out_unlock:
1659 	rdtgroup_kn_unlock(of->kn);
1660 
1661 	return ret;
1662 }
1663 
1664 /*
1665  * mbm_get_mon_event_by_name() - Return the mon_evt entry for the matching
1666  * event name.
1667  */
1668 static struct mon_evt *mbm_get_mon_event_by_name(struct rdt_resource *r, char *name)
1669 {
1670 	struct mon_evt *mevt;
1671 
1672 	for_each_mon_event(mevt) {
1673 		if (mevt->rid == r->rid && mevt->enabled &&
1674 		    resctrl_is_mbm_event(mevt->evtid) &&
1675 		    !strcmp(mevt->name, name))
1676 			return mevt;
1677 	}
1678 
1679 	return NULL;
1680 }
1681 
1682 static int rdtgroup_modify_assign_state(char *assign, struct rdt_l3_mon_domain *d,
1683 					struct rdtgroup *rdtgrp, struct mon_evt *mevt)
1684 {
1685 	int ret = 0;
1686 
1687 	if (!assign || strlen(assign) != 1)
1688 		return -EINVAL;
1689 
1690 	switch (*assign) {
1691 	case 'e':
1692 		ret = rdtgroup_assign_cntr_event(d, rdtgrp, mevt);
1693 		break;
1694 	case '_':
1695 		rdtgroup_unassign_cntr_event(d, rdtgrp, mevt);
1696 		break;
1697 	default:
1698 		ret = -EINVAL;
1699 		break;
1700 	}
1701 
1702 	return ret;
1703 }
1704 
1705 static int resctrl_parse_mbm_assignment(struct rdt_resource *r, struct rdtgroup *rdtgrp,
1706 					char *event, char *tok)
1707 {
1708 	struct rdt_l3_mon_domain *d;
1709 	unsigned long dom_id = 0;
1710 	char *dom_str, *id_str;
1711 	struct mon_evt *mevt;
1712 	int ret;
1713 
1714 	mevt = mbm_get_mon_event_by_name(r, event);
1715 	if (!mevt) {
1716 		rdt_last_cmd_printf("Invalid event %s\n", event);
1717 		return -ENOENT;
1718 	}
1719 
1720 next:
1721 	if (!tok || tok[0] == '\0')
1722 		return 0;
1723 
1724 	/* Start processing the strings for each domain */
1725 	dom_str = strim(strsep(&tok, ";"));
1726 
1727 	id_str = strsep(&dom_str, "=");
1728 
1729 	/* Check for domain id '*' which means all domains */
1730 	if (id_str && *id_str == '*') {
1731 		ret = rdtgroup_modify_assign_state(dom_str, NULL, rdtgrp, mevt);
1732 		if (ret)
1733 			rdt_last_cmd_printf("Assign operation '%s:*=%s' failed\n",
1734 					    event, dom_str);
1735 		return ret;
1736 	} else if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1737 		rdt_last_cmd_puts("Missing domain id\n");
1738 		return -EINVAL;
1739 	}
1740 
1741 	/* Verify if the dom_id is valid */
1742 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1743 		if (d->hdr.id == dom_id) {
1744 			ret = rdtgroup_modify_assign_state(dom_str, d, rdtgrp, mevt);
1745 			if (ret) {
1746 				rdt_last_cmd_printf("Assign operation '%s:%ld=%s' failed\n",
1747 						    event, dom_id, dom_str);
1748 				return ret;
1749 			}
1750 			goto next;
1751 		}
1752 	}
1753 
1754 	rdt_last_cmd_printf("Invalid domain id %ld\n", dom_id);
1755 	return -EINVAL;
1756 }
1757 
1758 ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
1759 				 size_t nbytes, loff_t off)
1760 {
1761 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1762 	struct rdtgroup *rdtgrp;
1763 	char *token, *event;
1764 	int ret = 0;
1765 
1766 	/* Valid input requires a trailing newline */
1767 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1768 		return -EINVAL;
1769 
1770 	buf[nbytes - 1] = '\0';
1771 
1772 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1773 	if (!rdtgrp) {
1774 		rdtgroup_kn_unlock(of->kn);
1775 		return -ENOENT;
1776 	}
1777 	rdt_last_cmd_clear();
1778 
1779 	if (!resctrl_arch_mbm_cntr_assign_enabled(r)) {
1780 		rdt_last_cmd_puts("mbm_event mode is not enabled\n");
1781 		rdtgroup_kn_unlock(of->kn);
1782 		return -EINVAL;
1783 	}
1784 
1785 	while ((token = strsep(&buf, "\n")) != NULL) {
1786 		/*
1787 		 * The write command follows the following format:
1788 		 * "<Event>:<Domain ID>=<Assignment state>"
1789 		 * Extract the event name first.
1790 		 */
1791 		event = strsep(&token, ":");
1792 
1793 		ret = resctrl_parse_mbm_assignment(r, rdtgrp, event, token);
1794 		if (ret)
1795 			break;
1796 	}
1797 
1798 	rdtgroup_kn_unlock(of->kn);
1799 
1800 	return ret ?: nbytes;
1801 }
1802 
1803 /**
1804  * resctrl_l3_mon_resource_init() - Initialise global monitoring structures.
1805  *
1806  * Allocate and initialise global monitor resources that do not belong to a
1807  * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
1808  * Called once during boot after the struct rdt_resource's have been configured
1809  * but before the filesystem is mounted.
1810  * Resctrl's cpuhp callbacks may be called before this point to bring a domain
1811  * online.
1812  *
1813  * Return: 0 for success, or -ENOMEM.
1814  */
1815 int resctrl_l3_mon_resource_init(void)
1816 {
1817 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1818 	int ret;
1819 
1820 	if (!r->mon_capable)
1821 		return 0;
1822 
1823 	ret = dom_data_init(r);
1824 	if (ret)
1825 		return ret;
1826 
1827 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
1828 		mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].configurable = true;
1829 		resctrl_file_fflags_init("mbm_total_bytes_config",
1830 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1831 	}
1832 	if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
1833 		mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].configurable = true;
1834 		resctrl_file_fflags_init("mbm_local_bytes_config",
1835 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1836 	}
1837 
1838 	if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1839 		mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
1840 	else if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1841 		mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
1842 
1843 	if (r->mon.mbm_cntr_assignable) {
1844 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
1845 			mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
1846 		if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
1847 			mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
1848 									   (READS_TO_LOCAL_MEM |
1849 									    READS_TO_LOCAL_S_MEM |
1850 									    NON_TEMP_WRITE_TO_LOCAL_MEM);
1851 		r->mon.mbm_assign_on_mkdir = true;
1852 		resctrl_file_fflags_init("num_mbm_cntrs",
1853 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1854 		resctrl_file_fflags_init("available_mbm_cntrs",
1855 					 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
1856 		resctrl_file_fflags_init("event_filter", RFTYPE_ASSIGN_CONFIG);
1857 		resctrl_file_fflags_init("mbm_assign_on_mkdir", RFTYPE_MON_INFO |
1858 					 RFTYPE_RES_CACHE);
1859 		resctrl_file_fflags_init("mbm_L3_assignments", RFTYPE_MON_BASE);
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 void resctrl_l3_mon_resource_exit(void)
1866 {
1867 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
1868 
1869 	dom_data_exit(r);
1870 }
1871