xref: /linux/fs/resctrl/rdtgroup.c (revision 2cb8eeaf00efc037988910de17ffe592b23941a6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * User interface for Resource Allocation in Resource Director Technology(RDT)
4  *
5  * Copyright (C) 2016 Intel Corporation
6  *
7  * Author: Fenghua Yu <fenghua.yu@intel.com>
8  *
9  * More information about RDT be found in the Intel (R) x86 Architecture
10  * Software Developer Manual.
11  */
12 
13 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
14 
15 #include <linux/cpu.h>
16 #include <linux/debugfs.h>
17 #include <linux/fs.h>
18 #include <linux/fs_parser.h>
19 #include <linux/sysfs.h>
20 #include <linux/kernfs.h>
21 #include <linux/resctrl.h>
22 #include <linux/seq_buf.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched/task.h>
25 #include <linux/slab.h>
26 #include <linux/user_namespace.h>
27 
28 #include <uapi/linux/magic.h>
29 
30 #include "internal.h"
31 
32 /* Mutex to protect rdtgroup access. */
33 DEFINE_MUTEX(rdtgroup_mutex);
34 
35 static struct kernfs_root *rdt_root;
36 
37 struct rdtgroup rdtgroup_default;
38 
39 LIST_HEAD(rdt_all_groups);
40 
41 /* list of entries for the schemata file */
42 LIST_HEAD(resctrl_schema_all);
43 
44 /*
45  * List of struct mon_data containing private data of event files for use by
46  * rdtgroup_mondata_show(). Protected by rdtgroup_mutex.
47  */
48 static LIST_HEAD(mon_data_kn_priv_list);
49 
50 /* The filesystem can only be mounted once. */
51 bool resctrl_mounted;
52 
53 /* Kernel fs node for "info" directory under root */
54 static struct kernfs_node *kn_info;
55 
56 /* Kernel fs node for "mon_groups" directory under root */
57 static struct kernfs_node *kn_mongrp;
58 
59 /* Kernel fs node for "mon_data" directory under root */
60 static struct kernfs_node *kn_mondata;
61 
62 /*
63  * Used to store the max resource name width to display the schemata names in
64  * a tabular format.
65  */
66 int max_name_width;
67 
68 static struct seq_buf last_cmd_status;
69 
70 static char last_cmd_status_buf[512];
71 
72 static int rdtgroup_setup_root(struct rdt_fs_context *ctx);
73 
74 static void rdtgroup_destroy_root(void);
75 
76 struct dentry *debugfs_resctrl;
77 
78 /*
79  * Memory bandwidth monitoring event to use for the default CTRL_MON group
80  * and each new CTRL_MON group created by the user.  Only relevant when
81  * the filesystem is mounted with the "mba_MBps" option so it does not
82  * matter that it remains uninitialized on systems that do not support
83  * the "mba_MBps" option.
84  */
85 enum resctrl_event_id mba_mbps_default_event;
86 
87 static bool resctrl_debug;
88 
rdt_last_cmd_clear(void)89 void rdt_last_cmd_clear(void)
90 {
91 	lockdep_assert_held(&rdtgroup_mutex);
92 	seq_buf_clear(&last_cmd_status);
93 }
94 
rdt_last_cmd_puts(const char * s)95 void rdt_last_cmd_puts(const char *s)
96 {
97 	lockdep_assert_held(&rdtgroup_mutex);
98 	seq_buf_puts(&last_cmd_status, s);
99 }
100 
rdt_last_cmd_printf(const char * fmt,...)101 void rdt_last_cmd_printf(const char *fmt, ...)
102 {
103 	va_list ap;
104 
105 	va_start(ap, fmt);
106 	lockdep_assert_held(&rdtgroup_mutex);
107 	seq_buf_vprintf(&last_cmd_status, fmt, ap);
108 	va_end(ap);
109 }
110 
rdt_staged_configs_clear(void)111 void rdt_staged_configs_clear(void)
112 {
113 	struct rdt_ctrl_domain *dom;
114 	struct rdt_resource *r;
115 
116 	lockdep_assert_held(&rdtgroup_mutex);
117 
118 	for_each_alloc_capable_rdt_resource(r) {
119 		list_for_each_entry(dom, &r->ctrl_domains, hdr.list)
120 			memset(dom->staged_config, 0, sizeof(dom->staged_config));
121 	}
122 }
123 
resctrl_is_mbm_enabled(void)124 static bool resctrl_is_mbm_enabled(void)
125 {
126 	return (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID) ||
127 		resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID));
128 }
129 
130 /*
131  * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap
132  * of free CLOSIDs.
133  *
134  * Using a global CLOSID across all resources has some advantages and
135  * some drawbacks:
136  * + We can simply set current's closid to assign a task to a resource
137  *   group.
138  * + Context switch code can avoid extra memory references deciding which
139  *   CLOSID to load into the PQR_ASSOC MSR
140  * - We give up some options in configuring resource groups across multi-socket
141  *   systems.
142  * - Our choices on how to configure each resource become progressively more
143  *   limited as the number of resources grows.
144  */
145 static unsigned long *closid_free_map;
146 
147 static int closid_free_map_len;
148 
closids_supported(void)149 int closids_supported(void)
150 {
151 	return closid_free_map_len;
152 }
153 
closid_init(void)154 static int closid_init(void)
155 {
156 	struct resctrl_schema *s;
157 	u32 rdt_min_closid = ~0;
158 
159 	/* Monitor only platforms still call closid_init() */
160 	if (list_empty(&resctrl_schema_all))
161 		return 0;
162 
163 	/* Compute rdt_min_closid across all resources */
164 	list_for_each_entry(s, &resctrl_schema_all, list)
165 		rdt_min_closid = min(rdt_min_closid, s->num_closid);
166 
167 	closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL);
168 	if (!closid_free_map)
169 		return -ENOMEM;
170 	bitmap_fill(closid_free_map, rdt_min_closid);
171 
172 	/* RESCTRL_RESERVED_CLOSID is always reserved for the default group */
173 	__clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map);
174 	closid_free_map_len = rdt_min_closid;
175 
176 	return 0;
177 }
178 
closid_exit(void)179 static void closid_exit(void)
180 {
181 	bitmap_free(closid_free_map);
182 	closid_free_map = NULL;
183 }
184 
closid_alloc(void)185 static int closid_alloc(void)
186 {
187 	int cleanest_closid;
188 	u32 closid;
189 
190 	lockdep_assert_held(&rdtgroup_mutex);
191 
192 	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
193 	    resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) {
194 		cleanest_closid = resctrl_find_cleanest_closid();
195 		if (cleanest_closid < 0)
196 			return cleanest_closid;
197 		closid = cleanest_closid;
198 	} else {
199 		closid = find_first_bit(closid_free_map, closid_free_map_len);
200 		if (closid == closid_free_map_len)
201 			return -ENOSPC;
202 	}
203 	__clear_bit(closid, closid_free_map);
204 
205 	return closid;
206 }
207 
closid_free(int closid)208 void closid_free(int closid)
209 {
210 	lockdep_assert_held(&rdtgroup_mutex);
211 
212 	__set_bit(closid, closid_free_map);
213 }
214 
215 /**
216  * closid_allocated - test if provided closid is in use
217  * @closid: closid to be tested
218  *
219  * Return: true if @closid is currently associated with a resource group,
220  * false if @closid is free
221  */
closid_allocated(unsigned int closid)222 bool closid_allocated(unsigned int closid)
223 {
224 	lockdep_assert_held(&rdtgroup_mutex);
225 
226 	return !test_bit(closid, closid_free_map);
227 }
228 
229 /**
230  * rdtgroup_mode_by_closid - Return mode of resource group with closid
231  * @closid: closid if the resource group
232  *
233  * Each resource group is associated with a @closid. Here the mode
234  * of a resource group can be queried by searching for it using its closid.
235  *
236  * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
237  */
rdtgroup_mode_by_closid(int closid)238 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
239 {
240 	struct rdtgroup *rdtgrp;
241 
242 	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
243 		if (rdtgrp->closid == closid)
244 			return rdtgrp->mode;
245 	}
246 
247 	return RDT_NUM_MODES;
248 }
249 
250 static const char * const rdt_mode_str[] = {
251 	[RDT_MODE_SHAREABLE]		= "shareable",
252 	[RDT_MODE_EXCLUSIVE]		= "exclusive",
253 	[RDT_MODE_PSEUDO_LOCKSETUP]	= "pseudo-locksetup",
254 	[RDT_MODE_PSEUDO_LOCKED]	= "pseudo-locked",
255 };
256 
257 /**
258  * rdtgroup_mode_str - Return the string representation of mode
259  * @mode: the resource group mode as &enum rdtgroup_mode
260  *
261  * Return: string representation of valid mode, "unknown" otherwise
262  */
rdtgroup_mode_str(enum rdtgrp_mode mode)263 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
264 {
265 	if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
266 		return "unknown";
267 
268 	return rdt_mode_str[mode];
269 }
270 
271 /* set uid and gid of rdtgroup dirs and files to that of the creator */
rdtgroup_kn_set_ugid(struct kernfs_node * kn)272 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
273 {
274 	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
275 				.ia_uid = current_fsuid(),
276 				.ia_gid = current_fsgid(), };
277 
278 	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
279 	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
280 		return 0;
281 
282 	return kernfs_setattr(kn, &iattr);
283 }
284 
rdtgroup_add_file(struct kernfs_node * parent_kn,struct rftype * rft)285 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
286 {
287 	struct kernfs_node *kn;
288 	int ret;
289 
290 	kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
291 				  GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
292 				  0, rft->kf_ops, rft, NULL, NULL);
293 	if (IS_ERR(kn))
294 		return PTR_ERR(kn);
295 
296 	ret = rdtgroup_kn_set_ugid(kn);
297 	if (ret) {
298 		kernfs_remove(kn);
299 		return ret;
300 	}
301 
302 	return 0;
303 }
304 
rdtgroup_seqfile_show(struct seq_file * m,void * arg)305 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
306 {
307 	struct kernfs_open_file *of = m->private;
308 	struct rftype *rft = of->kn->priv;
309 
310 	if (rft->seq_show)
311 		return rft->seq_show(of, m, arg);
312 	return 0;
313 }
314 
rdtgroup_file_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)315 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
316 				   size_t nbytes, loff_t off)
317 {
318 	struct rftype *rft = of->kn->priv;
319 
320 	if (rft->write)
321 		return rft->write(of, buf, nbytes, off);
322 
323 	return -EINVAL;
324 }
325 
326 static const struct kernfs_ops rdtgroup_kf_single_ops = {
327 	.atomic_write_len	= PAGE_SIZE,
328 	.write			= rdtgroup_file_write,
329 	.seq_show		= rdtgroup_seqfile_show,
330 };
331 
332 static const struct kernfs_ops kf_mondata_ops = {
333 	.atomic_write_len	= PAGE_SIZE,
334 	.seq_show		= rdtgroup_mondata_show,
335 };
336 
is_cpu_list(struct kernfs_open_file * of)337 static bool is_cpu_list(struct kernfs_open_file *of)
338 {
339 	struct rftype *rft = of->kn->priv;
340 
341 	return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
342 }
343 
rdtgroup_cpus_show(struct kernfs_open_file * of,struct seq_file * s,void * v)344 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
345 			      struct seq_file *s, void *v)
346 {
347 	struct rdtgroup *rdtgrp;
348 	struct cpumask *mask;
349 	int ret = 0;
350 
351 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
352 
353 	if (rdtgrp) {
354 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
355 			if (!rdtgrp->plr->d) {
356 				rdt_last_cmd_clear();
357 				rdt_last_cmd_puts("Cache domain offline\n");
358 				ret = -ENODEV;
359 			} else {
360 				mask = &rdtgrp->plr->d->hdr.cpu_mask;
361 				seq_printf(s, is_cpu_list(of) ?
362 					   "%*pbl\n" : "%*pb\n",
363 					   cpumask_pr_args(mask));
364 			}
365 		} else {
366 			seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
367 				   cpumask_pr_args(&rdtgrp->cpu_mask));
368 		}
369 	} else {
370 		ret = -ENOENT;
371 	}
372 	rdtgroup_kn_unlock(of->kn);
373 
374 	return ret;
375 }
376 
377 /*
378  * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
379  *
380  * Per task closids/rmids must have been set up before calling this function.
381  * @r may be NULL.
382  */
383 static void
update_closid_rmid(const struct cpumask * cpu_mask,struct rdtgroup * r)384 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
385 {
386 	struct resctrl_cpu_defaults defaults, *p = NULL;
387 
388 	if (r) {
389 		defaults.closid = r->closid;
390 		defaults.rmid = r->mon.rmid;
391 		p = &defaults;
392 	}
393 
394 	on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_closid_rmid, p, 1);
395 }
396 
cpus_mon_write(struct rdtgroup * rdtgrp,cpumask_var_t newmask,cpumask_var_t tmpmask)397 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
398 			  cpumask_var_t tmpmask)
399 {
400 	struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
401 	struct list_head *head;
402 
403 	/* Check whether cpus belong to parent ctrl group */
404 	cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
405 	if (!cpumask_empty(tmpmask)) {
406 		rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
407 		return -EINVAL;
408 	}
409 
410 	/* Check whether cpus are dropped from this group */
411 	cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
412 	if (!cpumask_empty(tmpmask)) {
413 		/* Give any dropped cpus to parent rdtgroup */
414 		cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
415 		update_closid_rmid(tmpmask, prgrp);
416 	}
417 
418 	/*
419 	 * If we added cpus, remove them from previous group that owned them
420 	 * and update per-cpu rmid
421 	 */
422 	cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
423 	if (!cpumask_empty(tmpmask)) {
424 		head = &prgrp->mon.crdtgrp_list;
425 		list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
426 			if (crgrp == rdtgrp)
427 				continue;
428 			cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
429 				       tmpmask);
430 		}
431 		update_closid_rmid(tmpmask, rdtgrp);
432 	}
433 
434 	/* Done pushing/pulling - update this group with new mask */
435 	cpumask_copy(&rdtgrp->cpu_mask, newmask);
436 
437 	return 0;
438 }
439 
cpumask_rdtgrp_clear(struct rdtgroup * r,struct cpumask * m)440 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
441 {
442 	struct rdtgroup *crgrp;
443 
444 	cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
445 	/* update the child mon group masks as well*/
446 	list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
447 		cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
448 }
449 
cpus_ctrl_write(struct rdtgroup * rdtgrp,cpumask_var_t newmask,cpumask_var_t tmpmask,cpumask_var_t tmpmask1)450 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
451 			   cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
452 {
453 	struct rdtgroup *r, *crgrp;
454 	struct list_head *head;
455 
456 	/* Check whether cpus are dropped from this group */
457 	cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
458 	if (!cpumask_empty(tmpmask)) {
459 		/* Can't drop from default group */
460 		if (rdtgrp == &rdtgroup_default) {
461 			rdt_last_cmd_puts("Can't drop CPUs from default group\n");
462 			return -EINVAL;
463 		}
464 
465 		/* Give any dropped cpus to rdtgroup_default */
466 		cpumask_or(&rdtgroup_default.cpu_mask,
467 			   &rdtgroup_default.cpu_mask, tmpmask);
468 		update_closid_rmid(tmpmask, &rdtgroup_default);
469 	}
470 
471 	/*
472 	 * If we added cpus, remove them from previous group and
473 	 * the prev group's child groups that owned them
474 	 * and update per-cpu closid/rmid.
475 	 */
476 	cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
477 	if (!cpumask_empty(tmpmask)) {
478 		list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
479 			if (r == rdtgrp)
480 				continue;
481 			cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
482 			if (!cpumask_empty(tmpmask1))
483 				cpumask_rdtgrp_clear(r, tmpmask1);
484 		}
485 		update_closid_rmid(tmpmask, rdtgrp);
486 	}
487 
488 	/* Done pushing/pulling - update this group with new mask */
489 	cpumask_copy(&rdtgrp->cpu_mask, newmask);
490 
491 	/*
492 	 * Clear child mon group masks since there is a new parent mask
493 	 * now and update the rmid for the cpus the child lost.
494 	 */
495 	head = &rdtgrp->mon.crdtgrp_list;
496 	list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
497 		cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
498 		update_closid_rmid(tmpmask, rdtgrp);
499 		cpumask_clear(&crgrp->cpu_mask);
500 	}
501 
502 	return 0;
503 }
504 
rdtgroup_cpus_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)505 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
506 				   char *buf, size_t nbytes, loff_t off)
507 {
508 	cpumask_var_t tmpmask, newmask, tmpmask1;
509 	struct rdtgroup *rdtgrp;
510 	int ret;
511 
512 	if (!buf)
513 		return -EINVAL;
514 
515 	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
516 		return -ENOMEM;
517 	if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
518 		free_cpumask_var(tmpmask);
519 		return -ENOMEM;
520 	}
521 	if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
522 		free_cpumask_var(tmpmask);
523 		free_cpumask_var(newmask);
524 		return -ENOMEM;
525 	}
526 
527 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
528 	if (!rdtgrp) {
529 		ret = -ENOENT;
530 		goto unlock;
531 	}
532 
533 	rdt_last_cmd_clear();
534 
535 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
536 	    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
537 		ret = -EINVAL;
538 		rdt_last_cmd_puts("Pseudo-locking in progress\n");
539 		goto unlock;
540 	}
541 
542 	if (is_cpu_list(of))
543 		ret = cpulist_parse(buf, newmask);
544 	else
545 		ret = cpumask_parse(buf, newmask);
546 
547 	if (ret) {
548 		rdt_last_cmd_puts("Bad CPU list/mask\n");
549 		goto unlock;
550 	}
551 
552 	/* check that user didn't specify any offline cpus */
553 	cpumask_andnot(tmpmask, newmask, cpu_online_mask);
554 	if (!cpumask_empty(tmpmask)) {
555 		ret = -EINVAL;
556 		rdt_last_cmd_puts("Can only assign online CPUs\n");
557 		goto unlock;
558 	}
559 
560 	if (rdtgrp->type == RDTCTRL_GROUP)
561 		ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
562 	else if (rdtgrp->type == RDTMON_GROUP)
563 		ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
564 	else
565 		ret = -EINVAL;
566 
567 unlock:
568 	rdtgroup_kn_unlock(of->kn);
569 	free_cpumask_var(tmpmask);
570 	free_cpumask_var(newmask);
571 	free_cpumask_var(tmpmask1);
572 
573 	return ret ?: nbytes;
574 }
575 
576 /**
577  * rdtgroup_remove - the helper to remove resource group safely
578  * @rdtgrp: resource group to remove
579  *
580  * On resource group creation via a mkdir, an extra kernfs_node reference is
581  * taken to ensure that the rdtgroup structure remains accessible for the
582  * rdtgroup_kn_unlock() calls where it is removed.
583  *
584  * Drop the extra reference here, then free the rdtgroup structure.
585  *
586  * Return: void
587  */
rdtgroup_remove(struct rdtgroup * rdtgrp)588 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
589 {
590 	kernfs_put(rdtgrp->kn);
591 	kfree(rdtgrp);
592 }
593 
_update_task_closid_rmid(void * task)594 static void _update_task_closid_rmid(void *task)
595 {
596 	/*
597 	 * If the task is still current on this CPU, update PQR_ASSOC MSR.
598 	 * Otherwise, the MSR is updated when the task is scheduled in.
599 	 */
600 	if (task == current)
601 		resctrl_arch_sched_in(task);
602 }
603 
update_task_closid_rmid(struct task_struct * t)604 static void update_task_closid_rmid(struct task_struct *t)
605 {
606 	if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
607 		smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
608 	else
609 		_update_task_closid_rmid(t);
610 }
611 
task_in_rdtgroup(struct task_struct * tsk,struct rdtgroup * rdtgrp)612 static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp)
613 {
614 	u32 closid, rmid = rdtgrp->mon.rmid;
615 
616 	if (rdtgrp->type == RDTCTRL_GROUP)
617 		closid = rdtgrp->closid;
618 	else if (rdtgrp->type == RDTMON_GROUP)
619 		closid = rdtgrp->mon.parent->closid;
620 	else
621 		return false;
622 
623 	return resctrl_arch_match_closid(tsk, closid) &&
624 	       resctrl_arch_match_rmid(tsk, closid, rmid);
625 }
626 
__rdtgroup_move_task(struct task_struct * tsk,struct rdtgroup * rdtgrp)627 static int __rdtgroup_move_task(struct task_struct *tsk,
628 				struct rdtgroup *rdtgrp)
629 {
630 	/* If the task is already in rdtgrp, no need to move the task. */
631 	if (task_in_rdtgroup(tsk, rdtgrp))
632 		return 0;
633 
634 	/*
635 	 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
636 	 * updated by them.
637 	 *
638 	 * For ctrl_mon groups, move both closid and rmid.
639 	 * For monitor groups, can move the tasks only from
640 	 * their parent CTRL group.
641 	 */
642 	if (rdtgrp->type == RDTMON_GROUP &&
643 	    !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) {
644 		rdt_last_cmd_puts("Can't move task to different control group\n");
645 		return -EINVAL;
646 	}
647 
648 	if (rdtgrp->type == RDTMON_GROUP)
649 		resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid,
650 					     rdtgrp->mon.rmid);
651 	else
652 		resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid,
653 					     rdtgrp->mon.rmid);
654 
655 	/*
656 	 * Ensure the task's closid and rmid are written before determining if
657 	 * the task is current that will decide if it will be interrupted.
658 	 * This pairs with the full barrier between the rq->curr update and
659 	 * resctrl_arch_sched_in() during context switch.
660 	 */
661 	smp_mb();
662 
663 	/*
664 	 * By now, the task's closid and rmid are set. If the task is current
665 	 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
666 	 * group go into effect. If the task is not current, the MSR will be
667 	 * updated when the task is scheduled in.
668 	 */
669 	update_task_closid_rmid(tsk);
670 
671 	return 0;
672 }
673 
is_closid_match(struct task_struct * t,struct rdtgroup * r)674 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
675 {
676 	return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) &&
677 		resctrl_arch_match_closid(t, r->closid));
678 }
679 
is_rmid_match(struct task_struct * t,struct rdtgroup * r)680 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
681 {
682 	return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) &&
683 		resctrl_arch_match_rmid(t, r->mon.parent->closid,
684 					r->mon.rmid));
685 }
686 
687 /**
688  * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
689  * @r: Resource group
690  *
691  * Return: 1 if tasks have been assigned to @r, 0 otherwise
692  */
rdtgroup_tasks_assigned(struct rdtgroup * r)693 int rdtgroup_tasks_assigned(struct rdtgroup *r)
694 {
695 	struct task_struct *p, *t;
696 	int ret = 0;
697 
698 	lockdep_assert_held(&rdtgroup_mutex);
699 
700 	rcu_read_lock();
701 	for_each_process_thread(p, t) {
702 		if (is_closid_match(t, r) || is_rmid_match(t, r)) {
703 			ret = 1;
704 			break;
705 		}
706 	}
707 	rcu_read_unlock();
708 
709 	return ret;
710 }
711 
rdtgroup_task_write_permission(struct task_struct * task,struct kernfs_open_file * of)712 static int rdtgroup_task_write_permission(struct task_struct *task,
713 					  struct kernfs_open_file *of)
714 {
715 	const struct cred *tcred = get_task_cred(task);
716 	const struct cred *cred = current_cred();
717 	int ret = 0;
718 
719 	/*
720 	 * Even if we're attaching all tasks in the thread group, we only
721 	 * need to check permissions on one of them.
722 	 */
723 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
724 	    !uid_eq(cred->euid, tcred->uid) &&
725 	    !uid_eq(cred->euid, tcred->suid)) {
726 		rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
727 		ret = -EPERM;
728 	}
729 
730 	put_cred(tcred);
731 	return ret;
732 }
733 
rdtgroup_move_task(pid_t pid,struct rdtgroup * rdtgrp,struct kernfs_open_file * of)734 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
735 			      struct kernfs_open_file *of)
736 {
737 	struct task_struct *tsk;
738 	int ret;
739 
740 	rcu_read_lock();
741 	if (pid) {
742 		tsk = find_task_by_vpid(pid);
743 		if (!tsk) {
744 			rcu_read_unlock();
745 			rdt_last_cmd_printf("No task %d\n", pid);
746 			return -ESRCH;
747 		}
748 	} else {
749 		tsk = current;
750 	}
751 
752 	get_task_struct(tsk);
753 	rcu_read_unlock();
754 
755 	ret = rdtgroup_task_write_permission(tsk, of);
756 	if (!ret)
757 		ret = __rdtgroup_move_task(tsk, rdtgrp);
758 
759 	put_task_struct(tsk);
760 	return ret;
761 }
762 
rdtgroup_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)763 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
764 				    char *buf, size_t nbytes, loff_t off)
765 {
766 	struct rdtgroup *rdtgrp;
767 	char *pid_str;
768 	int ret = 0;
769 	pid_t pid;
770 
771 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
772 	if (!rdtgrp) {
773 		rdtgroup_kn_unlock(of->kn);
774 		return -ENOENT;
775 	}
776 	rdt_last_cmd_clear();
777 
778 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
779 	    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
780 		ret = -EINVAL;
781 		rdt_last_cmd_puts("Pseudo-locking in progress\n");
782 		goto unlock;
783 	}
784 
785 	while (buf && buf[0] != '\0' && buf[0] != '\n') {
786 		pid_str = strim(strsep(&buf, ","));
787 
788 		if (kstrtoint(pid_str, 0, &pid)) {
789 			rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str);
790 			ret = -EINVAL;
791 			break;
792 		}
793 
794 		if (pid < 0) {
795 			rdt_last_cmd_printf("Invalid pid %d\n", pid);
796 			ret = -EINVAL;
797 			break;
798 		}
799 
800 		ret = rdtgroup_move_task(pid, rdtgrp, of);
801 		if (ret) {
802 			rdt_last_cmd_printf("Error while processing task %d\n", pid);
803 			break;
804 		}
805 	}
806 
807 unlock:
808 	rdtgroup_kn_unlock(of->kn);
809 
810 	return ret ?: nbytes;
811 }
812 
show_rdt_tasks(struct rdtgroup * r,struct seq_file * s)813 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
814 {
815 	struct task_struct *p, *t;
816 	pid_t pid;
817 
818 	rcu_read_lock();
819 	for_each_process_thread(p, t) {
820 		if (is_closid_match(t, r) || is_rmid_match(t, r)) {
821 			pid = task_pid_vnr(t);
822 			if (pid)
823 				seq_printf(s, "%d\n", pid);
824 		}
825 	}
826 	rcu_read_unlock();
827 }
828 
rdtgroup_tasks_show(struct kernfs_open_file * of,struct seq_file * s,void * v)829 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
830 			       struct seq_file *s, void *v)
831 {
832 	struct rdtgroup *rdtgrp;
833 	int ret = 0;
834 
835 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
836 	if (rdtgrp)
837 		show_rdt_tasks(rdtgrp, s);
838 	else
839 		ret = -ENOENT;
840 	rdtgroup_kn_unlock(of->kn);
841 
842 	return ret;
843 }
844 
rdtgroup_closid_show(struct kernfs_open_file * of,struct seq_file * s,void * v)845 static int rdtgroup_closid_show(struct kernfs_open_file *of,
846 				struct seq_file *s, void *v)
847 {
848 	struct rdtgroup *rdtgrp;
849 	int ret = 0;
850 
851 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
852 	if (rdtgrp)
853 		seq_printf(s, "%u\n", rdtgrp->closid);
854 	else
855 		ret = -ENOENT;
856 	rdtgroup_kn_unlock(of->kn);
857 
858 	return ret;
859 }
860 
rdtgroup_rmid_show(struct kernfs_open_file * of,struct seq_file * s,void * v)861 static int rdtgroup_rmid_show(struct kernfs_open_file *of,
862 			      struct seq_file *s, void *v)
863 {
864 	struct rdtgroup *rdtgrp;
865 	int ret = 0;
866 
867 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
868 	if (rdtgrp)
869 		seq_printf(s, "%u\n", rdtgrp->mon.rmid);
870 	else
871 		ret = -ENOENT;
872 	rdtgroup_kn_unlock(of->kn);
873 
874 	return ret;
875 }
876 
877 #ifdef CONFIG_PROC_CPU_RESCTRL
878 /*
879  * A task can only be part of one resctrl control group and of one monitor
880  * group which is associated to that control group.
881  *
882  * 1)   res:
883  *      mon:
884  *
885  *    resctrl is not available.
886  *
887  * 2)   res:/
888  *      mon:
889  *
890  *    Task is part of the root resctrl control group, and it is not associated
891  *    to any monitor group.
892  *
893  * 3)  res:/
894  *     mon:mon0
895  *
896  *    Task is part of the root resctrl control group and monitor group mon0.
897  *
898  * 4)  res:group0
899  *     mon:
900  *
901  *    Task is part of resctrl control group group0, and it is not associated
902  *    to any monitor group.
903  *
904  * 5) res:group0
905  *    mon:mon1
906  *
907  *    Task is part of resctrl control group group0 and monitor group mon1.
908  */
proc_resctrl_show(struct seq_file * s,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)909 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
910 		      struct pid *pid, struct task_struct *tsk)
911 {
912 	struct rdtgroup *rdtg;
913 	int ret = 0;
914 
915 	mutex_lock(&rdtgroup_mutex);
916 
917 	/* Return empty if resctrl has not been mounted. */
918 	if (!resctrl_mounted) {
919 		seq_puts(s, "res:\nmon:\n");
920 		goto unlock;
921 	}
922 
923 	list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
924 		struct rdtgroup *crg;
925 
926 		/*
927 		 * Task information is only relevant for shareable
928 		 * and exclusive groups.
929 		 */
930 		if (rdtg->mode != RDT_MODE_SHAREABLE &&
931 		    rdtg->mode != RDT_MODE_EXCLUSIVE)
932 			continue;
933 
934 		if (!resctrl_arch_match_closid(tsk, rdtg->closid))
935 			continue;
936 
937 		seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
938 			   rdt_kn_name(rdtg->kn));
939 		seq_puts(s, "mon:");
940 		list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
941 				    mon.crdtgrp_list) {
942 			if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid,
943 						     crg->mon.rmid))
944 				continue;
945 			seq_printf(s, "%s", rdt_kn_name(crg->kn));
946 			break;
947 		}
948 		seq_putc(s, '\n');
949 		goto unlock;
950 	}
951 	/*
952 	 * The above search should succeed. Otherwise return
953 	 * with an error.
954 	 */
955 	ret = -ENOENT;
956 unlock:
957 	mutex_unlock(&rdtgroup_mutex);
958 
959 	return ret;
960 }
961 #endif
962 
rdt_last_cmd_status_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)963 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
964 				    struct seq_file *seq, void *v)
965 {
966 	int len;
967 
968 	mutex_lock(&rdtgroup_mutex);
969 	len = seq_buf_used(&last_cmd_status);
970 	if (len)
971 		seq_printf(seq, "%.*s", len, last_cmd_status_buf);
972 	else
973 		seq_puts(seq, "ok\n");
974 	mutex_unlock(&rdtgroup_mutex);
975 	return 0;
976 }
977 
rdt_kn_parent_priv(struct kernfs_node * kn)978 void *rdt_kn_parent_priv(struct kernfs_node *kn)
979 {
980 	/*
981 	 * The parent pointer is only valid within RCU section since it can be
982 	 * replaced.
983 	 */
984 	guard(rcu)();
985 	return rcu_dereference(kn->__parent)->priv;
986 }
987 
rdt_num_closids_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)988 static int rdt_num_closids_show(struct kernfs_open_file *of,
989 				struct seq_file *seq, void *v)
990 {
991 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
992 
993 	seq_printf(seq, "%u\n", s->num_closid);
994 	return 0;
995 }
996 
rdt_default_ctrl_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)997 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
998 				 struct seq_file *seq, void *v)
999 {
1000 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1001 	struct rdt_resource *r = s->res;
1002 
1003 	seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r));
1004 	return 0;
1005 }
1006 
rdt_min_cbm_bits_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1007 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
1008 				 struct seq_file *seq, void *v)
1009 {
1010 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1011 	struct rdt_resource *r = s->res;
1012 
1013 	seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
1014 	return 0;
1015 }
1016 
rdt_shareable_bits_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1017 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
1018 				   struct seq_file *seq, void *v)
1019 {
1020 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1021 	struct rdt_resource *r = s->res;
1022 
1023 	seq_printf(seq, "%x\n", r->cache.shareable_bits);
1024 	return 0;
1025 }
1026 
1027 /*
1028  * rdt_bit_usage_show - Display current usage of resources
1029  *
1030  * A domain is a shared resource that can now be allocated differently. Here
1031  * we display the current regions of the domain as an annotated bitmask.
1032  * For each domain of this resource its allocation bitmask
1033  * is annotated as below to indicate the current usage of the corresponding bit:
1034  *   0 - currently unused
1035  *   X - currently available for sharing and used by software and hardware
1036  *   H - currently used by hardware only but available for software use
1037  *   S - currently used and shareable by software only
1038  *   E - currently used exclusively by one resource group
1039  *   P - currently pseudo-locked by one resource group
1040  */
rdt_bit_usage_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1041 static int rdt_bit_usage_show(struct kernfs_open_file *of,
1042 			      struct seq_file *seq, void *v)
1043 {
1044 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1045 	/*
1046 	 * Use unsigned long even though only 32 bits are used to ensure
1047 	 * test_bit() is used safely.
1048 	 */
1049 	unsigned long sw_shareable = 0, hw_shareable = 0;
1050 	unsigned long exclusive = 0, pseudo_locked = 0;
1051 	struct rdt_resource *r = s->res;
1052 	struct rdt_ctrl_domain *dom;
1053 	int i, hwb, swb, excl, psl;
1054 	enum rdtgrp_mode mode;
1055 	bool sep = false;
1056 	u32 ctrl_val;
1057 
1058 	cpus_read_lock();
1059 	mutex_lock(&rdtgroup_mutex);
1060 	hw_shareable = r->cache.shareable_bits;
1061 	list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
1062 		if (sep)
1063 			seq_putc(seq, ';');
1064 		sw_shareable = 0;
1065 		exclusive = 0;
1066 		seq_printf(seq, "%d=", dom->hdr.id);
1067 		for (i = 0; i < closids_supported(); i++) {
1068 			if (!closid_allocated(i))
1069 				continue;
1070 			ctrl_val = resctrl_arch_get_config(r, dom, i,
1071 							   s->conf_type);
1072 			mode = rdtgroup_mode_by_closid(i);
1073 			switch (mode) {
1074 			case RDT_MODE_SHAREABLE:
1075 				sw_shareable |= ctrl_val;
1076 				break;
1077 			case RDT_MODE_EXCLUSIVE:
1078 				exclusive |= ctrl_val;
1079 				break;
1080 			case RDT_MODE_PSEUDO_LOCKSETUP:
1081 			/*
1082 			 * RDT_MODE_PSEUDO_LOCKSETUP is possible
1083 			 * here but not included since the CBM
1084 			 * associated with this CLOSID in this mode
1085 			 * is not initialized and no task or cpu can be
1086 			 * assigned this CLOSID.
1087 			 */
1088 				break;
1089 			case RDT_MODE_PSEUDO_LOCKED:
1090 			case RDT_NUM_MODES:
1091 				WARN(1,
1092 				     "invalid mode for closid %d\n", i);
1093 				break;
1094 			}
1095 		}
1096 		for (i = r->cache.cbm_len - 1; i >= 0; i--) {
1097 			pseudo_locked = dom->plr ? dom->plr->cbm : 0;
1098 			hwb = test_bit(i, &hw_shareable);
1099 			swb = test_bit(i, &sw_shareable);
1100 			excl = test_bit(i, &exclusive);
1101 			psl = test_bit(i, &pseudo_locked);
1102 			if (hwb && swb)
1103 				seq_putc(seq, 'X');
1104 			else if (hwb && !swb)
1105 				seq_putc(seq, 'H');
1106 			else if (!hwb && swb)
1107 				seq_putc(seq, 'S');
1108 			else if (excl)
1109 				seq_putc(seq, 'E');
1110 			else if (psl)
1111 				seq_putc(seq, 'P');
1112 			else /* Unused bits remain */
1113 				seq_putc(seq, '0');
1114 		}
1115 		sep = true;
1116 	}
1117 	seq_putc(seq, '\n');
1118 	mutex_unlock(&rdtgroup_mutex);
1119 	cpus_read_unlock();
1120 	return 0;
1121 }
1122 
rdt_min_bw_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1123 static int rdt_min_bw_show(struct kernfs_open_file *of,
1124 			   struct seq_file *seq, void *v)
1125 {
1126 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1127 	struct rdt_resource *r = s->res;
1128 
1129 	seq_printf(seq, "%u\n", r->membw.min_bw);
1130 	return 0;
1131 }
1132 
rdt_num_rmids_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1133 static int rdt_num_rmids_show(struct kernfs_open_file *of,
1134 			      struct seq_file *seq, void *v)
1135 {
1136 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1137 
1138 	seq_printf(seq, "%d\n", r->mon.num_rmid);
1139 
1140 	return 0;
1141 }
1142 
rdt_mon_features_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1143 static int rdt_mon_features_show(struct kernfs_open_file *of,
1144 				 struct seq_file *seq, void *v)
1145 {
1146 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1147 	struct mon_evt *mevt;
1148 
1149 	for_each_mon_event(mevt) {
1150 		if (mevt->rid != r->rid || !mevt->enabled)
1151 			continue;
1152 		seq_printf(seq, "%s\n", mevt->name);
1153 		if (mevt->configurable &&
1154 		    !resctrl_arch_mbm_cntr_assign_enabled(r))
1155 			seq_printf(seq, "%s_config\n", mevt->name);
1156 	}
1157 
1158 	return 0;
1159 }
1160 
rdt_bw_gran_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1161 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1162 			    struct seq_file *seq, void *v)
1163 {
1164 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1165 	struct rdt_resource *r = s->res;
1166 
1167 	seq_printf(seq, "%u\n", r->membw.bw_gran);
1168 	return 0;
1169 }
1170 
rdt_delay_linear_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1171 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1172 				 struct seq_file *seq, void *v)
1173 {
1174 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1175 	struct rdt_resource *r = s->res;
1176 
1177 	seq_printf(seq, "%u\n", r->membw.delay_linear);
1178 	return 0;
1179 }
1180 
max_threshold_occ_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1181 static int max_threshold_occ_show(struct kernfs_open_file *of,
1182 				  struct seq_file *seq, void *v)
1183 {
1184 	seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold);
1185 
1186 	return 0;
1187 }
1188 
rdt_thread_throttle_mode_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1189 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1190 					 struct seq_file *seq, void *v)
1191 {
1192 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1193 	struct rdt_resource *r = s->res;
1194 
1195 	switch (r->membw.throttle_mode) {
1196 	case THREAD_THROTTLE_PER_THREAD:
1197 		seq_puts(seq, "per-thread\n");
1198 		return 0;
1199 	case THREAD_THROTTLE_MAX:
1200 		seq_puts(seq, "max\n");
1201 		return 0;
1202 	case THREAD_THROTTLE_UNDEFINED:
1203 		seq_puts(seq, "undefined\n");
1204 		return 0;
1205 	}
1206 
1207 	WARN_ON_ONCE(1);
1208 
1209 	return 0;
1210 }
1211 
max_threshold_occ_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1212 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1213 				       char *buf, size_t nbytes, loff_t off)
1214 {
1215 	unsigned int bytes;
1216 	int ret;
1217 
1218 	ret = kstrtouint(buf, 0, &bytes);
1219 	if (ret)
1220 		return ret;
1221 
1222 	if (bytes > resctrl_rmid_realloc_limit)
1223 		return -EINVAL;
1224 
1225 	resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes);
1226 
1227 	return nbytes;
1228 }
1229 
1230 /*
1231  * rdtgroup_mode_show - Display mode of this resource group
1232  */
rdtgroup_mode_show(struct kernfs_open_file * of,struct seq_file * s,void * v)1233 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1234 			      struct seq_file *s, void *v)
1235 {
1236 	struct rdtgroup *rdtgrp;
1237 
1238 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1239 	if (!rdtgrp) {
1240 		rdtgroup_kn_unlock(of->kn);
1241 		return -ENOENT;
1242 	}
1243 
1244 	seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1245 
1246 	rdtgroup_kn_unlock(of->kn);
1247 	return 0;
1248 }
1249 
resctrl_peer_type(enum resctrl_conf_type my_type)1250 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
1251 {
1252 	switch (my_type) {
1253 	case CDP_CODE:
1254 		return CDP_DATA;
1255 	case CDP_DATA:
1256 		return CDP_CODE;
1257 	default:
1258 	case CDP_NONE:
1259 		return CDP_NONE;
1260 	}
1261 }
1262 
rdt_has_sparse_bitmasks_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1263 static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
1264 					struct seq_file *seq, void *v)
1265 {
1266 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1267 	struct rdt_resource *r = s->res;
1268 
1269 	seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks);
1270 
1271 	return 0;
1272 }
1273 
1274 /**
1275  * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1276  * @r: Resource to which domain instance @d belongs.
1277  * @d: The domain instance for which @closid is being tested.
1278  * @cbm: Capacity bitmask being tested.
1279  * @closid: Intended closid for @cbm.
1280  * @type: CDP type of @r.
1281  * @exclusive: Only check if overlaps with exclusive resource groups
1282  *
1283  * Checks if provided @cbm intended to be used for @closid on domain
1284  * @d overlaps with any other closids or other hardware usage associated
1285  * with this domain. If @exclusive is true then only overlaps with
1286  * resource groups in exclusive mode will be considered. If @exclusive
1287  * is false then overlaps with any resource group or hardware entities
1288  * will be considered.
1289  *
1290  * @cbm is unsigned long, even if only 32 bits are used, to make the
1291  * bitmap functions work correctly.
1292  *
1293  * Return: false if CBM does not overlap, true if it does.
1294  */
__rdtgroup_cbm_overlaps(struct rdt_resource * r,struct rdt_ctrl_domain * d,unsigned long cbm,int closid,enum resctrl_conf_type type,bool exclusive)1295 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d,
1296 				    unsigned long cbm, int closid,
1297 				    enum resctrl_conf_type type, bool exclusive)
1298 {
1299 	enum rdtgrp_mode mode;
1300 	unsigned long ctrl_b;
1301 	int i;
1302 
1303 	/* Check for any overlap with regions used by hardware directly */
1304 	if (!exclusive) {
1305 		ctrl_b = r->cache.shareable_bits;
1306 		if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1307 			return true;
1308 	}
1309 
1310 	/* Check for overlap with other resource groups */
1311 	for (i = 0; i < closids_supported(); i++) {
1312 		ctrl_b = resctrl_arch_get_config(r, d, i, type);
1313 		mode = rdtgroup_mode_by_closid(i);
1314 		if (closid_allocated(i) && i != closid &&
1315 		    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1316 			if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1317 				if (exclusive) {
1318 					if (mode == RDT_MODE_EXCLUSIVE)
1319 						return true;
1320 					continue;
1321 				}
1322 				return true;
1323 			}
1324 		}
1325 	}
1326 
1327 	return false;
1328 }
1329 
1330 /**
1331  * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1332  * @s: Schema for the resource to which domain instance @d belongs.
1333  * @d: The domain instance for which @closid is being tested.
1334  * @cbm: Capacity bitmask being tested.
1335  * @closid: Intended closid for @cbm.
1336  * @exclusive: Only check if overlaps with exclusive resource groups
1337  *
1338  * Resources that can be allocated using a CBM can use the CBM to control
1339  * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1340  * for overlap. Overlap test is not limited to the specific resource for
1341  * which the CBM is intended though - when dealing with CDP resources that
1342  * share the underlying hardware the overlap check should be performed on
1343  * the CDP resource sharing the hardware also.
1344  *
1345  * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1346  * overlap test.
1347  *
1348  * Return: true if CBM overlap detected, false if there is no overlap
1349  */
rdtgroup_cbm_overlaps(struct resctrl_schema * s,struct rdt_ctrl_domain * d,unsigned long cbm,int closid,bool exclusive)1350 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
1351 			   unsigned long cbm, int closid, bool exclusive)
1352 {
1353 	enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
1354 	struct rdt_resource *r = s->res;
1355 
1356 	if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
1357 				    exclusive))
1358 		return true;
1359 
1360 	if (!resctrl_arch_get_cdp_enabled(r->rid))
1361 		return false;
1362 	return  __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
1363 }
1364 
1365 /**
1366  * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1367  * @rdtgrp: Resource group identified through its closid.
1368  *
1369  * An exclusive resource group implies that there should be no sharing of
1370  * its allocated resources. At the time this group is considered to be
1371  * exclusive this test can determine if its current schemata supports this
1372  * setting by testing for overlap with all other resource groups.
1373  *
1374  * Return: true if resource group can be exclusive, false if there is overlap
1375  * with allocations of other resource groups and thus this resource group
1376  * cannot be exclusive.
1377  */
rdtgroup_mode_test_exclusive(struct rdtgroup * rdtgrp)1378 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1379 {
1380 	int closid = rdtgrp->closid;
1381 	struct rdt_ctrl_domain *d;
1382 	struct resctrl_schema *s;
1383 	struct rdt_resource *r;
1384 	bool has_cache = false;
1385 	u32 ctrl;
1386 
1387 	/* Walking r->domains, ensure it can't race with cpuhp */
1388 	lockdep_assert_cpus_held();
1389 
1390 	list_for_each_entry(s, &resctrl_schema_all, list) {
1391 		r = s->res;
1392 		if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)
1393 			continue;
1394 		has_cache = true;
1395 		list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
1396 			ctrl = resctrl_arch_get_config(r, d, closid,
1397 						       s->conf_type);
1398 			if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
1399 				rdt_last_cmd_puts("Schemata overlaps\n");
1400 				return false;
1401 			}
1402 		}
1403 	}
1404 
1405 	if (!has_cache) {
1406 		rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1407 		return false;
1408 	}
1409 
1410 	return true;
1411 }
1412 
1413 /*
1414  * rdtgroup_mode_write - Modify the resource group's mode
1415  */
rdtgroup_mode_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1416 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1417 				   char *buf, size_t nbytes, loff_t off)
1418 {
1419 	struct rdtgroup *rdtgrp;
1420 	enum rdtgrp_mode mode;
1421 	int ret = 0;
1422 
1423 	/* Valid input requires a trailing newline */
1424 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1425 		return -EINVAL;
1426 	buf[nbytes - 1] = '\0';
1427 
1428 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1429 	if (!rdtgrp) {
1430 		rdtgroup_kn_unlock(of->kn);
1431 		return -ENOENT;
1432 	}
1433 
1434 	rdt_last_cmd_clear();
1435 
1436 	mode = rdtgrp->mode;
1437 
1438 	if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1439 	    (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1440 	    (!strcmp(buf, "pseudo-locksetup") &&
1441 	     mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1442 	    (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1443 		goto out;
1444 
1445 	if (mode == RDT_MODE_PSEUDO_LOCKED) {
1446 		rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1447 		ret = -EINVAL;
1448 		goto out;
1449 	}
1450 
1451 	if (!strcmp(buf, "shareable")) {
1452 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1453 			ret = rdtgroup_locksetup_exit(rdtgrp);
1454 			if (ret)
1455 				goto out;
1456 		}
1457 		rdtgrp->mode = RDT_MODE_SHAREABLE;
1458 	} else if (!strcmp(buf, "exclusive")) {
1459 		if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1460 			ret = -EINVAL;
1461 			goto out;
1462 		}
1463 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1464 			ret = rdtgroup_locksetup_exit(rdtgrp);
1465 			if (ret)
1466 				goto out;
1467 		}
1468 		rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1469 	} else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) &&
1470 		   !strcmp(buf, "pseudo-locksetup")) {
1471 		ret = rdtgroup_locksetup_enter(rdtgrp);
1472 		if (ret)
1473 			goto out;
1474 		rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1475 	} else {
1476 		rdt_last_cmd_puts("Unknown or unsupported mode\n");
1477 		ret = -EINVAL;
1478 	}
1479 
1480 out:
1481 	rdtgroup_kn_unlock(of->kn);
1482 	return ret ?: nbytes;
1483 }
1484 
1485 /**
1486  * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1487  * @r: RDT resource to which @d belongs.
1488  * @d: RDT domain instance.
1489  * @cbm: bitmask for which the size should be computed.
1490  *
1491  * The bitmask provided associated with the RDT domain instance @d will be
1492  * translated into how many bytes it represents. The size in bytes is
1493  * computed by first dividing the total cache size by the CBM length to
1494  * determine how many bytes each bit in the bitmask represents. The result
1495  * is multiplied with the number of bits set in the bitmask.
1496  *
1497  * @cbm is unsigned long, even if only 32 bits are used to make the
1498  * bitmap functions work correctly.
1499  */
rdtgroup_cbm_to_size(struct rdt_resource * r,struct rdt_ctrl_domain * d,unsigned long cbm)1500 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1501 				  struct rdt_ctrl_domain *d, unsigned long cbm)
1502 {
1503 	unsigned int size = 0;
1504 	struct cacheinfo *ci;
1505 	int num_b;
1506 
1507 	if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE))
1508 		return size;
1509 
1510 	num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1511 	ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope);
1512 	if (ci)
1513 		size = ci->size / r->cache.cbm_len * num_b;
1514 
1515 	return size;
1516 }
1517 
is_mba_sc(struct rdt_resource * r)1518 bool is_mba_sc(struct rdt_resource *r)
1519 {
1520 	if (!r)
1521 		r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
1522 
1523 	/*
1524 	 * The software controller support is only applicable to MBA resource.
1525 	 * Make sure to check for resource type.
1526 	 */
1527 	if (r->rid != RDT_RESOURCE_MBA)
1528 		return false;
1529 
1530 	return r->membw.mba_sc;
1531 }
1532 
1533 /*
1534  * rdtgroup_size_show - Display size in bytes of allocated regions
1535  *
1536  * The "size" file mirrors the layout of the "schemata" file, printing the
1537  * size in bytes of each region instead of the capacity bitmask.
1538  */
rdtgroup_size_show(struct kernfs_open_file * of,struct seq_file * s,void * v)1539 static int rdtgroup_size_show(struct kernfs_open_file *of,
1540 			      struct seq_file *s, void *v)
1541 {
1542 	struct resctrl_schema *schema;
1543 	enum resctrl_conf_type type;
1544 	struct rdt_ctrl_domain *d;
1545 	struct rdtgroup *rdtgrp;
1546 	struct rdt_resource *r;
1547 	unsigned int size;
1548 	int ret = 0;
1549 	u32 closid;
1550 	bool sep;
1551 	u32 ctrl;
1552 
1553 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
1554 	if (!rdtgrp) {
1555 		rdtgroup_kn_unlock(of->kn);
1556 		return -ENOENT;
1557 	}
1558 
1559 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1560 		if (!rdtgrp->plr->d) {
1561 			rdt_last_cmd_clear();
1562 			rdt_last_cmd_puts("Cache domain offline\n");
1563 			ret = -ENODEV;
1564 		} else {
1565 			seq_printf(s, "%*s:", max_name_width,
1566 				   rdtgrp->plr->s->name);
1567 			size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
1568 						    rdtgrp->plr->d,
1569 						    rdtgrp->plr->cbm);
1570 			seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size);
1571 		}
1572 		goto out;
1573 	}
1574 
1575 	closid = rdtgrp->closid;
1576 
1577 	list_for_each_entry(schema, &resctrl_schema_all, list) {
1578 		r = schema->res;
1579 		type = schema->conf_type;
1580 		sep = false;
1581 		seq_printf(s, "%*s:", max_name_width, schema->name);
1582 		list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
1583 			if (sep)
1584 				seq_putc(s, ';');
1585 			if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1586 				size = 0;
1587 			} else {
1588 				if (is_mba_sc(r))
1589 					ctrl = d->mbps_val[closid];
1590 				else
1591 					ctrl = resctrl_arch_get_config(r, d,
1592 								       closid,
1593 								       type);
1594 				if (r->rid == RDT_RESOURCE_MBA ||
1595 				    r->rid == RDT_RESOURCE_SMBA)
1596 					size = ctrl;
1597 				else
1598 					size = rdtgroup_cbm_to_size(r, d, ctrl);
1599 			}
1600 			seq_printf(s, "%d=%u", d->hdr.id, size);
1601 			sep = true;
1602 		}
1603 		seq_putc(s, '\n');
1604 	}
1605 
1606 out:
1607 	rdtgroup_kn_unlock(of->kn);
1608 
1609 	return ret;
1610 }
1611 
mondata_config_read(struct resctrl_mon_config_info * mon_info)1612 static void mondata_config_read(struct resctrl_mon_config_info *mon_info)
1613 {
1614 	smp_call_function_any(&mon_info->d->hdr.cpu_mask,
1615 			      resctrl_arch_mon_event_config_read, mon_info, 1);
1616 }
1617 
mbm_config_show(struct seq_file * s,struct rdt_resource * r,u32 evtid)1618 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
1619 {
1620 	struct resctrl_mon_config_info mon_info;
1621 	struct rdt_mon_domain *dom;
1622 	bool sep = false;
1623 
1624 	cpus_read_lock();
1625 	mutex_lock(&rdtgroup_mutex);
1626 
1627 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1628 		if (sep)
1629 			seq_puts(s, ";");
1630 
1631 		memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info));
1632 		mon_info.r = r;
1633 		mon_info.d = dom;
1634 		mon_info.evtid = evtid;
1635 		mondata_config_read(&mon_info);
1636 
1637 		seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config);
1638 		sep = true;
1639 	}
1640 	seq_puts(s, "\n");
1641 
1642 	mutex_unlock(&rdtgroup_mutex);
1643 	cpus_read_unlock();
1644 
1645 	return 0;
1646 }
1647 
mbm_total_bytes_config_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1648 static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
1649 				       struct seq_file *seq, void *v)
1650 {
1651 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1652 
1653 	mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID);
1654 
1655 	return 0;
1656 }
1657 
mbm_local_bytes_config_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1658 static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
1659 				       struct seq_file *seq, void *v)
1660 {
1661 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1662 
1663 	mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID);
1664 
1665 	return 0;
1666 }
1667 
mbm_config_write_domain(struct rdt_resource * r,struct rdt_mon_domain * d,u32 evtid,u32 val)1668 static void mbm_config_write_domain(struct rdt_resource *r,
1669 				    struct rdt_mon_domain *d, u32 evtid, u32 val)
1670 {
1671 	struct resctrl_mon_config_info mon_info = {0};
1672 
1673 	/*
1674 	 * Read the current config value first. If both are the same then
1675 	 * no need to write it again.
1676 	 */
1677 	mon_info.r = r;
1678 	mon_info.d = d;
1679 	mon_info.evtid = evtid;
1680 	mondata_config_read(&mon_info);
1681 	if (mon_info.mon_config == val)
1682 		return;
1683 
1684 	mon_info.mon_config = val;
1685 
1686 	/*
1687 	 * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the
1688 	 * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE
1689 	 * are scoped at the domain level. Writing any of these MSRs
1690 	 * on one CPU is observed by all the CPUs in the domain.
1691 	 */
1692 	smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write,
1693 			      &mon_info, 1);
1694 
1695 	/*
1696 	 * When an Event Configuration is changed, the bandwidth counters
1697 	 * for all RMIDs and Events will be cleared by the hardware. The
1698 	 * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for
1699 	 * every RMID on the next read to any event for every RMID.
1700 	 * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62)
1701 	 * cleared while it is tracked by the hardware. Clear the
1702 	 * mbm_local and mbm_total counts for all the RMIDs.
1703 	 */
1704 	resctrl_arch_reset_rmid_all(r, d);
1705 }
1706 
mon_config_write(struct rdt_resource * r,char * tok,u32 evtid)1707 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
1708 {
1709 	char *dom_str = NULL, *id_str;
1710 	unsigned long dom_id, val;
1711 	struct rdt_mon_domain *d;
1712 
1713 	/* Walking r->domains, ensure it can't race with cpuhp */
1714 	lockdep_assert_cpus_held();
1715 
1716 next:
1717 	if (!tok || tok[0] == '\0')
1718 		return 0;
1719 
1720 	/* Start processing the strings for each domain */
1721 	dom_str = strim(strsep(&tok, ";"));
1722 	id_str = strsep(&dom_str, "=");
1723 
1724 	if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1725 		rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n");
1726 		return -EINVAL;
1727 	}
1728 
1729 	if (!dom_str || kstrtoul(dom_str, 16, &val)) {
1730 		rdt_last_cmd_puts("Non-numeric event configuration value\n");
1731 		return -EINVAL;
1732 	}
1733 
1734 	/* Value from user cannot be more than the supported set of events */
1735 	if ((val & r->mon.mbm_cfg_mask) != val) {
1736 		rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
1737 				    r->mon.mbm_cfg_mask);
1738 		return -EINVAL;
1739 	}
1740 
1741 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
1742 		if (d->hdr.id == dom_id) {
1743 			mbm_config_write_domain(r, d, evtid, val);
1744 			goto next;
1745 		}
1746 	}
1747 
1748 	return -EINVAL;
1749 }
1750 
mbm_total_bytes_config_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1751 static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
1752 					    char *buf, size_t nbytes,
1753 					    loff_t off)
1754 {
1755 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1756 	int ret;
1757 
1758 	/* Valid input requires a trailing newline */
1759 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1760 		return -EINVAL;
1761 
1762 	cpus_read_lock();
1763 	mutex_lock(&rdtgroup_mutex);
1764 
1765 	rdt_last_cmd_clear();
1766 
1767 	buf[nbytes - 1] = '\0';
1768 
1769 	ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID);
1770 
1771 	mutex_unlock(&rdtgroup_mutex);
1772 	cpus_read_unlock();
1773 
1774 	return ret ?: nbytes;
1775 }
1776 
mbm_local_bytes_config_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1777 static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
1778 					    char *buf, size_t nbytes,
1779 					    loff_t off)
1780 {
1781 	struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1782 	int ret;
1783 
1784 	/* Valid input requires a trailing newline */
1785 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
1786 		return -EINVAL;
1787 
1788 	cpus_read_lock();
1789 	mutex_lock(&rdtgroup_mutex);
1790 
1791 	rdt_last_cmd_clear();
1792 
1793 	buf[nbytes - 1] = '\0';
1794 
1795 	ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID);
1796 
1797 	mutex_unlock(&rdtgroup_mutex);
1798 	cpus_read_unlock();
1799 
1800 	return ret ?: nbytes;
1801 }
1802 
1803 /*
1804  * resctrl_bmec_files_show() — Controls the visibility of BMEC-related resctrl
1805  * files. When @show is true, the files are displayed; when false, the files
1806  * are hidden.
1807  * Don't treat kernfs_find_and_get failure as an error, since this function may
1808  * be called regardless of whether BMEC is supported or the event is enabled.
1809  */
resctrl_bmec_files_show(struct rdt_resource * r,struct kernfs_node * l3_mon_kn,bool show)1810 void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_kn,
1811 			     bool show)
1812 {
1813 	struct kernfs_node *kn_config, *mon_kn = NULL;
1814 	char name[32];
1815 
1816 	if (!l3_mon_kn) {
1817 		sprintf(name, "%s_MON", r->name);
1818 		mon_kn = kernfs_find_and_get(kn_info, name);
1819 		if (!mon_kn)
1820 			return;
1821 		l3_mon_kn = mon_kn;
1822 	}
1823 
1824 	kn_config = kernfs_find_and_get(l3_mon_kn, "mbm_total_bytes_config");
1825 	if (kn_config) {
1826 		kernfs_show(kn_config, show);
1827 		kernfs_put(kn_config);
1828 	}
1829 
1830 	kn_config = kernfs_find_and_get(l3_mon_kn, "mbm_local_bytes_config");
1831 	if (kn_config) {
1832 		kernfs_show(kn_config, show);
1833 		kernfs_put(kn_config);
1834 	}
1835 
1836 	/* Release the reference only if it was acquired */
1837 	if (mon_kn)
1838 		kernfs_put(mon_kn);
1839 }
1840 
1841 /* rdtgroup information files for one cache resource. */
1842 static struct rftype res_common_files[] = {
1843 	{
1844 		.name		= "last_cmd_status",
1845 		.mode		= 0444,
1846 		.kf_ops		= &rdtgroup_kf_single_ops,
1847 		.seq_show	= rdt_last_cmd_status_show,
1848 		.fflags		= RFTYPE_TOP_INFO,
1849 	},
1850 	{
1851 		.name		= "mbm_assign_on_mkdir",
1852 		.mode		= 0644,
1853 		.kf_ops		= &rdtgroup_kf_single_ops,
1854 		.seq_show	= resctrl_mbm_assign_on_mkdir_show,
1855 		.write		= resctrl_mbm_assign_on_mkdir_write,
1856 	},
1857 	{
1858 		.name		= "num_closids",
1859 		.mode		= 0444,
1860 		.kf_ops		= &rdtgroup_kf_single_ops,
1861 		.seq_show	= rdt_num_closids_show,
1862 		.fflags		= RFTYPE_CTRL_INFO,
1863 	},
1864 	{
1865 		.name		= "mon_features",
1866 		.mode		= 0444,
1867 		.kf_ops		= &rdtgroup_kf_single_ops,
1868 		.seq_show	= rdt_mon_features_show,
1869 		.fflags		= RFTYPE_MON_INFO,
1870 	},
1871 	{
1872 		.name		= "available_mbm_cntrs",
1873 		.mode		= 0444,
1874 		.kf_ops		= &rdtgroup_kf_single_ops,
1875 		.seq_show	= resctrl_available_mbm_cntrs_show,
1876 	},
1877 	{
1878 		.name		= "num_rmids",
1879 		.mode		= 0444,
1880 		.kf_ops		= &rdtgroup_kf_single_ops,
1881 		.seq_show	= rdt_num_rmids_show,
1882 		.fflags		= RFTYPE_MON_INFO,
1883 	},
1884 	{
1885 		.name		= "cbm_mask",
1886 		.mode		= 0444,
1887 		.kf_ops		= &rdtgroup_kf_single_ops,
1888 		.seq_show	= rdt_default_ctrl_show,
1889 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1890 	},
1891 	{
1892 		.name		= "num_mbm_cntrs",
1893 		.mode		= 0444,
1894 		.kf_ops		= &rdtgroup_kf_single_ops,
1895 		.seq_show	= resctrl_num_mbm_cntrs_show,
1896 	},
1897 	{
1898 		.name		= "min_cbm_bits",
1899 		.mode		= 0444,
1900 		.kf_ops		= &rdtgroup_kf_single_ops,
1901 		.seq_show	= rdt_min_cbm_bits_show,
1902 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1903 	},
1904 	{
1905 		.name		= "shareable_bits",
1906 		.mode		= 0444,
1907 		.kf_ops		= &rdtgroup_kf_single_ops,
1908 		.seq_show	= rdt_shareable_bits_show,
1909 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1910 	},
1911 	{
1912 		.name		= "bit_usage",
1913 		.mode		= 0444,
1914 		.kf_ops		= &rdtgroup_kf_single_ops,
1915 		.seq_show	= rdt_bit_usage_show,
1916 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1917 	},
1918 	{
1919 		.name		= "min_bandwidth",
1920 		.mode		= 0444,
1921 		.kf_ops		= &rdtgroup_kf_single_ops,
1922 		.seq_show	= rdt_min_bw_show,
1923 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1924 	},
1925 	{
1926 		.name		= "bandwidth_gran",
1927 		.mode		= 0444,
1928 		.kf_ops		= &rdtgroup_kf_single_ops,
1929 		.seq_show	= rdt_bw_gran_show,
1930 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1931 	},
1932 	{
1933 		.name		= "delay_linear",
1934 		.mode		= 0444,
1935 		.kf_ops		= &rdtgroup_kf_single_ops,
1936 		.seq_show	= rdt_delay_linear_show,
1937 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1938 	},
1939 	/*
1940 	 * Platform specific which (if any) capabilities are provided by
1941 	 * thread_throttle_mode. Defer "fflags" initialization to platform
1942 	 * discovery.
1943 	 */
1944 	{
1945 		.name		= "thread_throttle_mode",
1946 		.mode		= 0444,
1947 		.kf_ops		= &rdtgroup_kf_single_ops,
1948 		.seq_show	= rdt_thread_throttle_mode_show,
1949 	},
1950 	{
1951 		.name		= "max_threshold_occupancy",
1952 		.mode		= 0644,
1953 		.kf_ops		= &rdtgroup_kf_single_ops,
1954 		.write		= max_threshold_occ_write,
1955 		.seq_show	= max_threshold_occ_show,
1956 		.fflags		= RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
1957 	},
1958 	{
1959 		.name		= "mbm_total_bytes_config",
1960 		.mode		= 0644,
1961 		.kf_ops		= &rdtgroup_kf_single_ops,
1962 		.seq_show	= mbm_total_bytes_config_show,
1963 		.write		= mbm_total_bytes_config_write,
1964 	},
1965 	{
1966 		.name		= "mbm_local_bytes_config",
1967 		.mode		= 0644,
1968 		.kf_ops		= &rdtgroup_kf_single_ops,
1969 		.seq_show	= mbm_local_bytes_config_show,
1970 		.write		= mbm_local_bytes_config_write,
1971 	},
1972 	{
1973 		.name		= "event_filter",
1974 		.mode		= 0644,
1975 		.kf_ops		= &rdtgroup_kf_single_ops,
1976 		.seq_show	= event_filter_show,
1977 		.write		= event_filter_write,
1978 	},
1979 	{
1980 		.name		= "mbm_L3_assignments",
1981 		.mode		= 0644,
1982 		.kf_ops		= &rdtgroup_kf_single_ops,
1983 		.seq_show	= mbm_L3_assignments_show,
1984 		.write		= mbm_L3_assignments_write,
1985 	},
1986 	{
1987 		.name		= "mbm_assign_mode",
1988 		.mode		= 0644,
1989 		.kf_ops		= &rdtgroup_kf_single_ops,
1990 		.seq_show	= resctrl_mbm_assign_mode_show,
1991 		.write		= resctrl_mbm_assign_mode_write,
1992 		.fflags		= RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
1993 	},
1994 	{
1995 		.name		= "cpus",
1996 		.mode		= 0644,
1997 		.kf_ops		= &rdtgroup_kf_single_ops,
1998 		.write		= rdtgroup_cpus_write,
1999 		.seq_show	= rdtgroup_cpus_show,
2000 		.fflags		= RFTYPE_BASE,
2001 	},
2002 	{
2003 		.name		= "cpus_list",
2004 		.mode		= 0644,
2005 		.kf_ops		= &rdtgroup_kf_single_ops,
2006 		.write		= rdtgroup_cpus_write,
2007 		.seq_show	= rdtgroup_cpus_show,
2008 		.flags		= RFTYPE_FLAGS_CPUS_LIST,
2009 		.fflags		= RFTYPE_BASE,
2010 	},
2011 	{
2012 		.name		= "tasks",
2013 		.mode		= 0644,
2014 		.kf_ops		= &rdtgroup_kf_single_ops,
2015 		.write		= rdtgroup_tasks_write,
2016 		.seq_show	= rdtgroup_tasks_show,
2017 		.fflags		= RFTYPE_BASE,
2018 	},
2019 	{
2020 		.name		= "mon_hw_id",
2021 		.mode		= 0444,
2022 		.kf_ops		= &rdtgroup_kf_single_ops,
2023 		.seq_show	= rdtgroup_rmid_show,
2024 		.fflags		= RFTYPE_MON_BASE | RFTYPE_DEBUG,
2025 	},
2026 	{
2027 		.name		= "schemata",
2028 		.mode		= 0644,
2029 		.kf_ops		= &rdtgroup_kf_single_ops,
2030 		.write		= rdtgroup_schemata_write,
2031 		.seq_show	= rdtgroup_schemata_show,
2032 		.fflags		= RFTYPE_CTRL_BASE,
2033 	},
2034 	{
2035 		.name		= "mba_MBps_event",
2036 		.mode		= 0644,
2037 		.kf_ops		= &rdtgroup_kf_single_ops,
2038 		.write		= rdtgroup_mba_mbps_event_write,
2039 		.seq_show	= rdtgroup_mba_mbps_event_show,
2040 	},
2041 	{
2042 		.name		= "mode",
2043 		.mode		= 0644,
2044 		.kf_ops		= &rdtgroup_kf_single_ops,
2045 		.write		= rdtgroup_mode_write,
2046 		.seq_show	= rdtgroup_mode_show,
2047 		.fflags		= RFTYPE_CTRL_BASE,
2048 	},
2049 	{
2050 		.name		= "size",
2051 		.mode		= 0444,
2052 		.kf_ops		= &rdtgroup_kf_single_ops,
2053 		.seq_show	= rdtgroup_size_show,
2054 		.fflags		= RFTYPE_CTRL_BASE,
2055 	},
2056 	{
2057 		.name		= "sparse_masks",
2058 		.mode		= 0444,
2059 		.kf_ops		= &rdtgroup_kf_single_ops,
2060 		.seq_show	= rdt_has_sparse_bitmasks_show,
2061 		.fflags		= RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
2062 	},
2063 	{
2064 		.name		= "ctrl_hw_id",
2065 		.mode		= 0444,
2066 		.kf_ops		= &rdtgroup_kf_single_ops,
2067 		.seq_show	= rdtgroup_closid_show,
2068 		.fflags		= RFTYPE_CTRL_BASE | RFTYPE_DEBUG,
2069 	},
2070 };
2071 
rdtgroup_add_files(struct kernfs_node * kn,unsigned long fflags)2072 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
2073 {
2074 	struct rftype *rfts, *rft;
2075 	int ret, len;
2076 
2077 	rfts = res_common_files;
2078 	len = ARRAY_SIZE(res_common_files);
2079 
2080 	lockdep_assert_held(&rdtgroup_mutex);
2081 
2082 	if (resctrl_debug)
2083 		fflags |= RFTYPE_DEBUG;
2084 
2085 	for (rft = rfts; rft < rfts + len; rft++) {
2086 		if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
2087 			ret = rdtgroup_add_file(kn, rft);
2088 			if (ret)
2089 				goto error;
2090 		}
2091 	}
2092 
2093 	return 0;
2094 error:
2095 	pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
2096 	while (--rft >= rfts) {
2097 		if ((fflags & rft->fflags) == rft->fflags)
2098 			kernfs_remove_by_name(kn, rft->name);
2099 	}
2100 	return ret;
2101 }
2102 
rdtgroup_get_rftype_by_name(const char * name)2103 static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
2104 {
2105 	struct rftype *rfts, *rft;
2106 	int len;
2107 
2108 	rfts = res_common_files;
2109 	len = ARRAY_SIZE(res_common_files);
2110 
2111 	for (rft = rfts; rft < rfts + len; rft++) {
2112 		if (!strcmp(rft->name, name))
2113 			return rft;
2114 	}
2115 
2116 	return NULL;
2117 }
2118 
thread_throttle_mode_init(void)2119 static void thread_throttle_mode_init(void)
2120 {
2121 	enum membw_throttle_mode throttle_mode = THREAD_THROTTLE_UNDEFINED;
2122 	struct rdt_resource *r_mba, *r_smba;
2123 
2124 	r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
2125 	if (r_mba->alloc_capable &&
2126 	    r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
2127 		throttle_mode = r_mba->membw.throttle_mode;
2128 
2129 	r_smba = resctrl_arch_get_resource(RDT_RESOURCE_SMBA);
2130 	if (r_smba->alloc_capable &&
2131 	    r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
2132 		throttle_mode = r_smba->membw.throttle_mode;
2133 
2134 	if (throttle_mode == THREAD_THROTTLE_UNDEFINED)
2135 		return;
2136 
2137 	resctrl_file_fflags_init("thread_throttle_mode",
2138 				 RFTYPE_CTRL_INFO | RFTYPE_RES_MB);
2139 }
2140 
resctrl_file_fflags_init(const char * config,unsigned long fflags)2141 void resctrl_file_fflags_init(const char *config, unsigned long fflags)
2142 {
2143 	struct rftype *rft;
2144 
2145 	rft = rdtgroup_get_rftype_by_name(config);
2146 	if (rft)
2147 		rft->fflags = fflags;
2148 }
2149 
2150 /**
2151  * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
2152  * @r: The resource group with which the file is associated.
2153  * @name: Name of the file
2154  *
2155  * The permissions of named resctrl file, directory, or link are modified
2156  * to not allow read, write, or execute by any user.
2157  *
2158  * WARNING: This function is intended to communicate to the user that the
2159  * resctrl file has been locked down - that it is not relevant to the
2160  * particular state the system finds itself in. It should not be relied
2161  * on to protect from user access because after the file's permissions
2162  * are restricted the user can still change the permissions using chmod
2163  * from the command line.
2164  *
2165  * Return: 0 on success, <0 on failure.
2166  */
rdtgroup_kn_mode_restrict(struct rdtgroup * r,const char * name)2167 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
2168 {
2169 	struct iattr iattr = {.ia_valid = ATTR_MODE,};
2170 	struct kernfs_node *kn;
2171 	int ret = 0;
2172 
2173 	kn = kernfs_find_and_get_ns(r->kn, name, NULL);
2174 	if (!kn)
2175 		return -ENOENT;
2176 
2177 	switch (kernfs_type(kn)) {
2178 	case KERNFS_DIR:
2179 		iattr.ia_mode = S_IFDIR;
2180 		break;
2181 	case KERNFS_FILE:
2182 		iattr.ia_mode = S_IFREG;
2183 		break;
2184 	case KERNFS_LINK:
2185 		iattr.ia_mode = S_IFLNK;
2186 		break;
2187 	}
2188 
2189 	ret = kernfs_setattr(kn, &iattr);
2190 	kernfs_put(kn);
2191 	return ret;
2192 }
2193 
2194 /**
2195  * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
2196  * @r: The resource group with which the file is associated.
2197  * @name: Name of the file
2198  * @mask: Mask of permissions that should be restored
2199  *
2200  * Restore the permissions of the named file. If @name is a directory the
2201  * permissions of its parent will be used.
2202  *
2203  * Return: 0 on success, <0 on failure.
2204  */
rdtgroup_kn_mode_restore(struct rdtgroup * r,const char * name,umode_t mask)2205 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
2206 			     umode_t mask)
2207 {
2208 	struct iattr iattr = {.ia_valid = ATTR_MODE,};
2209 	struct kernfs_node *kn, *parent;
2210 	struct rftype *rfts, *rft;
2211 	int ret, len;
2212 
2213 	rfts = res_common_files;
2214 	len = ARRAY_SIZE(res_common_files);
2215 
2216 	for (rft = rfts; rft < rfts + len; rft++) {
2217 		if (!strcmp(rft->name, name))
2218 			iattr.ia_mode = rft->mode & mask;
2219 	}
2220 
2221 	kn = kernfs_find_and_get_ns(r->kn, name, NULL);
2222 	if (!kn)
2223 		return -ENOENT;
2224 
2225 	switch (kernfs_type(kn)) {
2226 	case KERNFS_DIR:
2227 		parent = kernfs_get_parent(kn);
2228 		if (parent) {
2229 			iattr.ia_mode |= parent->mode;
2230 			kernfs_put(parent);
2231 		}
2232 		iattr.ia_mode |= S_IFDIR;
2233 		break;
2234 	case KERNFS_FILE:
2235 		iattr.ia_mode |= S_IFREG;
2236 		break;
2237 	case KERNFS_LINK:
2238 		iattr.ia_mode |= S_IFLNK;
2239 		break;
2240 	}
2241 
2242 	ret = kernfs_setattr(kn, &iattr);
2243 	kernfs_put(kn);
2244 	return ret;
2245 }
2246 
resctrl_mkdir_event_configs(struct rdt_resource * r,struct kernfs_node * l3_mon_kn)2247 static int resctrl_mkdir_event_configs(struct rdt_resource *r, struct kernfs_node *l3_mon_kn)
2248 {
2249 	struct kernfs_node *kn_subdir, *kn_subdir2;
2250 	struct mon_evt *mevt;
2251 	int ret;
2252 
2253 	kn_subdir = kernfs_create_dir(l3_mon_kn, "event_configs", l3_mon_kn->mode, NULL);
2254 	if (IS_ERR(kn_subdir))
2255 		return PTR_ERR(kn_subdir);
2256 
2257 	ret = rdtgroup_kn_set_ugid(kn_subdir);
2258 	if (ret)
2259 		return ret;
2260 
2261 	for_each_mon_event(mevt) {
2262 		if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
2263 			continue;
2264 
2265 		kn_subdir2 = kernfs_create_dir(kn_subdir, mevt->name, kn_subdir->mode, mevt);
2266 		if (IS_ERR(kn_subdir2)) {
2267 			ret = PTR_ERR(kn_subdir2);
2268 			goto out;
2269 		}
2270 
2271 		ret = rdtgroup_kn_set_ugid(kn_subdir2);
2272 		if (ret)
2273 			goto out;
2274 
2275 		ret = rdtgroup_add_files(kn_subdir2, RFTYPE_ASSIGN_CONFIG);
2276 		if (ret)
2277 			break;
2278 	}
2279 
2280 out:
2281 	return ret;
2282 }
2283 
rdtgroup_mkdir_info_resdir(void * priv,char * name,unsigned long fflags)2284 static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
2285 				      unsigned long fflags)
2286 {
2287 	struct kernfs_node *kn_subdir;
2288 	struct rdt_resource *r;
2289 	int ret;
2290 
2291 	kn_subdir = kernfs_create_dir(kn_info, name,
2292 				      kn_info->mode, priv);
2293 	if (IS_ERR(kn_subdir))
2294 		return PTR_ERR(kn_subdir);
2295 
2296 	ret = rdtgroup_kn_set_ugid(kn_subdir);
2297 	if (ret)
2298 		return ret;
2299 
2300 	ret = rdtgroup_add_files(kn_subdir, fflags);
2301 	if (ret)
2302 		return ret;
2303 
2304 	if ((fflags & RFTYPE_MON_INFO) == RFTYPE_MON_INFO) {
2305 		r = priv;
2306 		if (r->mon.mbm_cntr_assignable) {
2307 			ret = resctrl_mkdir_event_configs(r, kn_subdir);
2308 			if (ret)
2309 				return ret;
2310 			/*
2311 			 * Hide BMEC related files if mbm_event mode
2312 			 * is enabled.
2313 			 */
2314 			if (resctrl_arch_mbm_cntr_assign_enabled(r))
2315 				resctrl_bmec_files_show(r, kn_subdir, false);
2316 		}
2317 	}
2318 
2319 	kernfs_activate(kn_subdir);
2320 
2321 	return ret;
2322 }
2323 
fflags_from_resource(struct rdt_resource * r)2324 static unsigned long fflags_from_resource(struct rdt_resource *r)
2325 {
2326 	switch (r->rid) {
2327 	case RDT_RESOURCE_L3:
2328 	case RDT_RESOURCE_L2:
2329 		return RFTYPE_RES_CACHE;
2330 	case RDT_RESOURCE_MBA:
2331 	case RDT_RESOURCE_SMBA:
2332 		return RFTYPE_RES_MB;
2333 	}
2334 
2335 	return WARN_ON_ONCE(1);
2336 }
2337 
rdtgroup_create_info_dir(struct kernfs_node * parent_kn)2338 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
2339 {
2340 	struct resctrl_schema *s;
2341 	struct rdt_resource *r;
2342 	unsigned long fflags;
2343 	char name[32];
2344 	int ret;
2345 
2346 	/* create the directory */
2347 	kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
2348 	if (IS_ERR(kn_info))
2349 		return PTR_ERR(kn_info);
2350 
2351 	ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO);
2352 	if (ret)
2353 		goto out_destroy;
2354 
2355 	/* loop over enabled controls, these are all alloc_capable */
2356 	list_for_each_entry(s, &resctrl_schema_all, list) {
2357 		r = s->res;
2358 		fflags = fflags_from_resource(r) | RFTYPE_CTRL_INFO;
2359 		ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
2360 		if (ret)
2361 			goto out_destroy;
2362 	}
2363 
2364 	for_each_mon_capable_rdt_resource(r) {
2365 		fflags = fflags_from_resource(r) | RFTYPE_MON_INFO;
2366 		sprintf(name, "%s_MON", r->name);
2367 		ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
2368 		if (ret)
2369 			goto out_destroy;
2370 	}
2371 
2372 	ret = rdtgroup_kn_set_ugid(kn_info);
2373 	if (ret)
2374 		goto out_destroy;
2375 
2376 	kernfs_activate(kn_info);
2377 
2378 	return 0;
2379 
2380 out_destroy:
2381 	kernfs_remove(kn_info);
2382 	return ret;
2383 }
2384 
2385 static int
mongroup_create_dir(struct kernfs_node * parent_kn,struct rdtgroup * prgrp,char * name,struct kernfs_node ** dest_kn)2386 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
2387 		    char *name, struct kernfs_node **dest_kn)
2388 {
2389 	struct kernfs_node *kn;
2390 	int ret;
2391 
2392 	/* create the directory */
2393 	kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2394 	if (IS_ERR(kn))
2395 		return PTR_ERR(kn);
2396 
2397 	if (dest_kn)
2398 		*dest_kn = kn;
2399 
2400 	ret = rdtgroup_kn_set_ugid(kn);
2401 	if (ret)
2402 		goto out_destroy;
2403 
2404 	kernfs_activate(kn);
2405 
2406 	return 0;
2407 
2408 out_destroy:
2409 	kernfs_remove(kn);
2410 	return ret;
2411 }
2412 
is_mba_linear(void)2413 static inline bool is_mba_linear(void)
2414 {
2415 	return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear;
2416 }
2417 
mba_sc_domain_allocate(struct rdt_resource * r,struct rdt_ctrl_domain * d)2418 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d)
2419 {
2420 	u32 num_closid = resctrl_arch_get_num_closid(r);
2421 	int cpu = cpumask_any(&d->hdr.cpu_mask);
2422 	int i;
2423 
2424 	d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val),
2425 				   GFP_KERNEL, cpu_to_node(cpu));
2426 	if (!d->mbps_val)
2427 		return -ENOMEM;
2428 
2429 	for (i = 0; i < num_closid; i++)
2430 		d->mbps_val[i] = MBA_MAX_MBPS;
2431 
2432 	return 0;
2433 }
2434 
mba_sc_domain_destroy(struct rdt_resource * r,struct rdt_ctrl_domain * d)2435 static void mba_sc_domain_destroy(struct rdt_resource *r,
2436 				  struct rdt_ctrl_domain *d)
2437 {
2438 	kfree(d->mbps_val);
2439 	d->mbps_val = NULL;
2440 }
2441 
2442 /*
2443  * MBA software controller is supported only if
2444  * MBM is supported and MBA is in linear scale,
2445  * and the MBM monitor scope is the same as MBA
2446  * control scope.
2447  */
supports_mba_mbps(void)2448 static bool supports_mba_mbps(void)
2449 {
2450 	struct rdt_resource *rmbm = resctrl_arch_get_resource(RDT_RESOURCE_L3);
2451 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
2452 
2453 	return (resctrl_is_mbm_enabled() &&
2454 		r->alloc_capable && is_mba_linear() &&
2455 		r->ctrl_scope == rmbm->mon_scope);
2456 }
2457 
2458 /*
2459  * Enable or disable the MBA software controller
2460  * which helps user specify bandwidth in MBps.
2461  */
set_mba_sc(bool mba_sc)2462 static int set_mba_sc(bool mba_sc)
2463 {
2464 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
2465 	u32 num_closid = resctrl_arch_get_num_closid(r);
2466 	struct rdt_ctrl_domain *d;
2467 	unsigned long fflags;
2468 	int i;
2469 
2470 	if (!supports_mba_mbps() || mba_sc == is_mba_sc(r))
2471 		return -EINVAL;
2472 
2473 	r->membw.mba_sc = mba_sc;
2474 
2475 	rdtgroup_default.mba_mbps_event = mba_mbps_default_event;
2476 
2477 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
2478 		for (i = 0; i < num_closid; i++)
2479 			d->mbps_val[i] = MBA_MAX_MBPS;
2480 	}
2481 
2482 	fflags = mba_sc ? RFTYPE_CTRL_BASE | RFTYPE_MON_BASE : 0;
2483 	resctrl_file_fflags_init("mba_MBps_event", fflags);
2484 
2485 	return 0;
2486 }
2487 
2488 /*
2489  * We don't allow rdtgroup directories to be created anywhere
2490  * except the root directory. Thus when looking for the rdtgroup
2491  * structure for a kernfs node we are either looking at a directory,
2492  * in which case the rdtgroup structure is pointed at by the "priv"
2493  * field, otherwise we have a file, and need only look to the parent
2494  * to find the rdtgroup.
2495  */
kernfs_to_rdtgroup(struct kernfs_node * kn)2496 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
2497 {
2498 	if (kernfs_type(kn) == KERNFS_DIR) {
2499 		/*
2500 		 * All the resource directories use "kn->priv"
2501 		 * to point to the "struct rdtgroup" for the
2502 		 * resource. "info" and its subdirectories don't
2503 		 * have rdtgroup structures, so return NULL here.
2504 		 */
2505 		if (kn == kn_info ||
2506 		    rcu_access_pointer(kn->__parent) == kn_info)
2507 			return NULL;
2508 		else
2509 			return kn->priv;
2510 	} else {
2511 		return rdt_kn_parent_priv(kn);
2512 	}
2513 }
2514 
rdtgroup_kn_get(struct rdtgroup * rdtgrp,struct kernfs_node * kn)2515 static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
2516 {
2517 	atomic_inc(&rdtgrp->waitcount);
2518 	kernfs_break_active_protection(kn);
2519 }
2520 
rdtgroup_kn_put(struct rdtgroup * rdtgrp,struct kernfs_node * kn)2521 static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
2522 {
2523 	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2524 	    (rdtgrp->flags & RDT_DELETED)) {
2525 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2526 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2527 			rdtgroup_pseudo_lock_remove(rdtgrp);
2528 		kernfs_unbreak_active_protection(kn);
2529 		rdtgroup_remove(rdtgrp);
2530 	} else {
2531 		kernfs_unbreak_active_protection(kn);
2532 	}
2533 }
2534 
rdtgroup_kn_lock_live(struct kernfs_node * kn)2535 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
2536 {
2537 	struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2538 
2539 	if (!rdtgrp)
2540 		return NULL;
2541 
2542 	rdtgroup_kn_get(rdtgrp, kn);
2543 
2544 	cpus_read_lock();
2545 	mutex_lock(&rdtgroup_mutex);
2546 
2547 	/* Was this group deleted while we waited? */
2548 	if (rdtgrp->flags & RDT_DELETED)
2549 		return NULL;
2550 
2551 	return rdtgrp;
2552 }
2553 
rdtgroup_kn_unlock(struct kernfs_node * kn)2554 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2555 {
2556 	struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2557 
2558 	if (!rdtgrp)
2559 		return;
2560 
2561 	mutex_unlock(&rdtgroup_mutex);
2562 	cpus_read_unlock();
2563 
2564 	rdtgroup_kn_put(rdtgrp, kn);
2565 }
2566 
2567 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2568 			     struct rdtgroup *prgrp,
2569 			     struct kernfs_node **mon_data_kn);
2570 
rdt_disable_ctx(void)2571 static void rdt_disable_ctx(void)
2572 {
2573 	resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
2574 	resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
2575 	set_mba_sc(false);
2576 
2577 	resctrl_debug = false;
2578 }
2579 
rdt_enable_ctx(struct rdt_fs_context * ctx)2580 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2581 {
2582 	int ret = 0;
2583 
2584 	if (ctx->enable_cdpl2) {
2585 		ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
2586 		if (ret)
2587 			goto out_done;
2588 	}
2589 
2590 	if (ctx->enable_cdpl3) {
2591 		ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
2592 		if (ret)
2593 			goto out_cdpl2;
2594 	}
2595 
2596 	if (ctx->enable_mba_mbps) {
2597 		ret = set_mba_sc(true);
2598 		if (ret)
2599 			goto out_cdpl3;
2600 	}
2601 
2602 	if (ctx->enable_debug)
2603 		resctrl_debug = true;
2604 
2605 	return 0;
2606 
2607 out_cdpl3:
2608 	resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
2609 out_cdpl2:
2610 	resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
2611 out_done:
2612 	return ret;
2613 }
2614 
schemata_list_add(struct rdt_resource * r,enum resctrl_conf_type type)2615 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
2616 {
2617 	struct resctrl_schema *s;
2618 	const char *suffix = "";
2619 	int ret, cl;
2620 
2621 	s = kzalloc(sizeof(*s), GFP_KERNEL);
2622 	if (!s)
2623 		return -ENOMEM;
2624 
2625 	s->res = r;
2626 	s->num_closid = resctrl_arch_get_num_closid(r);
2627 	if (resctrl_arch_get_cdp_enabled(r->rid))
2628 		s->num_closid /= 2;
2629 
2630 	s->conf_type = type;
2631 	switch (type) {
2632 	case CDP_CODE:
2633 		suffix = "CODE";
2634 		break;
2635 	case CDP_DATA:
2636 		suffix = "DATA";
2637 		break;
2638 	case CDP_NONE:
2639 		suffix = "";
2640 		break;
2641 	}
2642 
2643 	ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
2644 	if (ret >= sizeof(s->name)) {
2645 		kfree(s);
2646 		return -EINVAL;
2647 	}
2648 
2649 	cl = strlen(s->name);
2650 
2651 	/*
2652 	 * If CDP is supported by this resource, but not enabled,
2653 	 * include the suffix. This ensures the tabular format of the
2654 	 * schemata file does not change between mounts of the filesystem.
2655 	 */
2656 	if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
2657 		cl += 4;
2658 
2659 	if (cl > max_name_width)
2660 		max_name_width = cl;
2661 
2662 	switch (r->schema_fmt) {
2663 	case RESCTRL_SCHEMA_BITMAP:
2664 		s->fmt_str = "%d=%x";
2665 		break;
2666 	case RESCTRL_SCHEMA_RANGE:
2667 		s->fmt_str = "%d=%u";
2668 		break;
2669 	}
2670 
2671 	if (WARN_ON_ONCE(!s->fmt_str)) {
2672 		kfree(s);
2673 		return -EINVAL;
2674 	}
2675 
2676 	INIT_LIST_HEAD(&s->list);
2677 	list_add(&s->list, &resctrl_schema_all);
2678 
2679 	return 0;
2680 }
2681 
schemata_list_create(void)2682 static int schemata_list_create(void)
2683 {
2684 	struct rdt_resource *r;
2685 	int ret = 0;
2686 
2687 	for_each_alloc_capable_rdt_resource(r) {
2688 		if (resctrl_arch_get_cdp_enabled(r->rid)) {
2689 			ret = schemata_list_add(r, CDP_CODE);
2690 			if (ret)
2691 				break;
2692 
2693 			ret = schemata_list_add(r, CDP_DATA);
2694 		} else {
2695 			ret = schemata_list_add(r, CDP_NONE);
2696 		}
2697 
2698 		if (ret)
2699 			break;
2700 	}
2701 
2702 	return ret;
2703 }
2704 
schemata_list_destroy(void)2705 static void schemata_list_destroy(void)
2706 {
2707 	struct resctrl_schema *s, *tmp;
2708 
2709 	list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
2710 		list_del(&s->list);
2711 		kfree(s);
2712 	}
2713 }
2714 
rdt_get_tree(struct fs_context * fc)2715 static int rdt_get_tree(struct fs_context *fc)
2716 {
2717 	struct rdt_fs_context *ctx = rdt_fc2context(fc);
2718 	unsigned long flags = RFTYPE_CTRL_BASE;
2719 	struct rdt_mon_domain *dom;
2720 	struct rdt_resource *r;
2721 	int ret;
2722 
2723 	cpus_read_lock();
2724 	mutex_lock(&rdtgroup_mutex);
2725 	/*
2726 	 * resctrl file system can only be mounted once.
2727 	 */
2728 	if (resctrl_mounted) {
2729 		ret = -EBUSY;
2730 		goto out;
2731 	}
2732 
2733 	ret = rdtgroup_setup_root(ctx);
2734 	if (ret)
2735 		goto out;
2736 
2737 	ret = rdt_enable_ctx(ctx);
2738 	if (ret)
2739 		goto out_root;
2740 
2741 	ret = schemata_list_create();
2742 	if (ret)
2743 		goto out_schemata_free;
2744 
2745 	ret = closid_init();
2746 	if (ret)
2747 		goto out_schemata_free;
2748 
2749 	if (resctrl_arch_mon_capable())
2750 		flags |= RFTYPE_MON;
2751 
2752 	ret = rdtgroup_add_files(rdtgroup_default.kn, flags);
2753 	if (ret)
2754 		goto out_closid_exit;
2755 
2756 	kernfs_activate(rdtgroup_default.kn);
2757 
2758 	ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2759 	if (ret < 0)
2760 		goto out_closid_exit;
2761 
2762 	if (resctrl_arch_mon_capable()) {
2763 		ret = mongroup_create_dir(rdtgroup_default.kn,
2764 					  &rdtgroup_default, "mon_groups",
2765 					  &kn_mongrp);
2766 		if (ret < 0)
2767 			goto out_info;
2768 
2769 		rdtgroup_assign_cntrs(&rdtgroup_default);
2770 
2771 		ret = mkdir_mondata_all(rdtgroup_default.kn,
2772 					&rdtgroup_default, &kn_mondata);
2773 		if (ret < 0)
2774 			goto out_mongrp;
2775 		rdtgroup_default.mon.mon_data_kn = kn_mondata;
2776 	}
2777 
2778 	ret = rdt_pseudo_lock_init();
2779 	if (ret)
2780 		goto out_mondata;
2781 
2782 	ret = kernfs_get_tree(fc);
2783 	if (ret < 0)
2784 		goto out_psl;
2785 
2786 	if (resctrl_arch_alloc_capable())
2787 		resctrl_arch_enable_alloc();
2788 	if (resctrl_arch_mon_capable())
2789 		resctrl_arch_enable_mon();
2790 
2791 	if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
2792 		resctrl_mounted = true;
2793 
2794 	if (resctrl_is_mbm_enabled()) {
2795 		r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
2796 		list_for_each_entry(dom, &r->mon_domains, hdr.list)
2797 			mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
2798 						   RESCTRL_PICK_ANY_CPU);
2799 	}
2800 
2801 	goto out;
2802 
2803 out_psl:
2804 	rdt_pseudo_lock_release();
2805 out_mondata:
2806 	if (resctrl_arch_mon_capable())
2807 		kernfs_remove(kn_mondata);
2808 out_mongrp:
2809 	if (resctrl_arch_mon_capable()) {
2810 		rdtgroup_unassign_cntrs(&rdtgroup_default);
2811 		kernfs_remove(kn_mongrp);
2812 	}
2813 out_info:
2814 	kernfs_remove(kn_info);
2815 out_closid_exit:
2816 	closid_exit();
2817 out_schemata_free:
2818 	schemata_list_destroy();
2819 	rdt_disable_ctx();
2820 out_root:
2821 	rdtgroup_destroy_root();
2822 out:
2823 	rdt_last_cmd_clear();
2824 	mutex_unlock(&rdtgroup_mutex);
2825 	cpus_read_unlock();
2826 	return ret;
2827 }
2828 
2829 enum rdt_param {
2830 	Opt_cdp,
2831 	Opt_cdpl2,
2832 	Opt_mba_mbps,
2833 	Opt_debug,
2834 	nr__rdt_params
2835 };
2836 
2837 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2838 	fsparam_flag("cdp",		Opt_cdp),
2839 	fsparam_flag("cdpl2",		Opt_cdpl2),
2840 	fsparam_flag("mba_MBps",	Opt_mba_mbps),
2841 	fsparam_flag("debug",		Opt_debug),
2842 	{}
2843 };
2844 
rdt_parse_param(struct fs_context * fc,struct fs_parameter * param)2845 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2846 {
2847 	struct rdt_fs_context *ctx = rdt_fc2context(fc);
2848 	struct fs_parse_result result;
2849 	const char *msg;
2850 	int opt;
2851 
2852 	opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2853 	if (opt < 0)
2854 		return opt;
2855 
2856 	switch (opt) {
2857 	case Opt_cdp:
2858 		ctx->enable_cdpl3 = true;
2859 		return 0;
2860 	case Opt_cdpl2:
2861 		ctx->enable_cdpl2 = true;
2862 		return 0;
2863 	case Opt_mba_mbps:
2864 		msg = "mba_MBps requires MBM and linear scale MBA at L3 scope";
2865 		if (!supports_mba_mbps())
2866 			return invalfc(fc, msg);
2867 		ctx->enable_mba_mbps = true;
2868 		return 0;
2869 	case Opt_debug:
2870 		ctx->enable_debug = true;
2871 		return 0;
2872 	}
2873 
2874 	return -EINVAL;
2875 }
2876 
rdt_fs_context_free(struct fs_context * fc)2877 static void rdt_fs_context_free(struct fs_context *fc)
2878 {
2879 	struct rdt_fs_context *ctx = rdt_fc2context(fc);
2880 
2881 	kernfs_free_fs_context(fc);
2882 	kfree(ctx);
2883 }
2884 
2885 static const struct fs_context_operations rdt_fs_context_ops = {
2886 	.free		= rdt_fs_context_free,
2887 	.parse_param	= rdt_parse_param,
2888 	.get_tree	= rdt_get_tree,
2889 };
2890 
rdt_init_fs_context(struct fs_context * fc)2891 static int rdt_init_fs_context(struct fs_context *fc)
2892 {
2893 	struct rdt_fs_context *ctx;
2894 
2895 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2896 	if (!ctx)
2897 		return -ENOMEM;
2898 
2899 	ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2900 	fc->fs_private = &ctx->kfc;
2901 	fc->ops = &rdt_fs_context_ops;
2902 	put_user_ns(fc->user_ns);
2903 	fc->user_ns = get_user_ns(&init_user_ns);
2904 	fc->global = true;
2905 	return 0;
2906 }
2907 
2908 /*
2909  * Move tasks from one to the other group. If @from is NULL, then all tasks
2910  * in the systems are moved unconditionally (used for teardown).
2911  *
2912  * If @mask is not NULL the cpus on which moved tasks are running are set
2913  * in that mask so the update smp function call is restricted to affected
2914  * cpus.
2915  */
rdt_move_group_tasks(struct rdtgroup * from,struct rdtgroup * to,struct cpumask * mask)2916 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2917 				 struct cpumask *mask)
2918 {
2919 	struct task_struct *p, *t;
2920 
2921 	read_lock(&tasklist_lock);
2922 	for_each_process_thread(p, t) {
2923 		if (!from || is_closid_match(t, from) ||
2924 		    is_rmid_match(t, from)) {
2925 			resctrl_arch_set_closid_rmid(t, to->closid,
2926 						     to->mon.rmid);
2927 
2928 			/*
2929 			 * Order the closid/rmid stores above before the loads
2930 			 * in task_curr(). This pairs with the full barrier
2931 			 * between the rq->curr update and
2932 			 * resctrl_arch_sched_in() during context switch.
2933 			 */
2934 			smp_mb();
2935 
2936 			/*
2937 			 * If the task is on a CPU, set the CPU in the mask.
2938 			 * The detection is inaccurate as tasks might move or
2939 			 * schedule before the smp function call takes place.
2940 			 * In such a case the function call is pointless, but
2941 			 * there is no other side effect.
2942 			 */
2943 			if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
2944 				cpumask_set_cpu(task_cpu(t), mask);
2945 		}
2946 	}
2947 	read_unlock(&tasklist_lock);
2948 }
2949 
free_all_child_rdtgrp(struct rdtgroup * rdtgrp)2950 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2951 {
2952 	struct rdtgroup *sentry, *stmp;
2953 	struct list_head *head;
2954 
2955 	head = &rdtgrp->mon.crdtgrp_list;
2956 	list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2957 		rdtgroup_unassign_cntrs(sentry);
2958 		free_rmid(sentry->closid, sentry->mon.rmid);
2959 		list_del(&sentry->mon.crdtgrp_list);
2960 
2961 		if (atomic_read(&sentry->waitcount) != 0)
2962 			sentry->flags = RDT_DELETED;
2963 		else
2964 			rdtgroup_remove(sentry);
2965 	}
2966 }
2967 
2968 /*
2969  * Forcibly remove all of subdirectories under root.
2970  */
rmdir_all_sub(void)2971 static void rmdir_all_sub(void)
2972 {
2973 	struct rdtgroup *rdtgrp, *tmp;
2974 
2975 	/* Move all tasks to the default resource group */
2976 	rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2977 
2978 	list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2979 		/* Free any child rmids */
2980 		free_all_child_rdtgrp(rdtgrp);
2981 
2982 		/* Remove each rdtgroup other than root */
2983 		if (rdtgrp == &rdtgroup_default)
2984 			continue;
2985 
2986 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2987 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2988 			rdtgroup_pseudo_lock_remove(rdtgrp);
2989 
2990 		/*
2991 		 * Give any CPUs back to the default group. We cannot copy
2992 		 * cpu_online_mask because a CPU might have executed the
2993 		 * offline callback already, but is still marked online.
2994 		 */
2995 		cpumask_or(&rdtgroup_default.cpu_mask,
2996 			   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2997 
2998 		rdtgroup_unassign_cntrs(rdtgrp);
2999 
3000 		free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3001 
3002 		kernfs_remove(rdtgrp->kn);
3003 		list_del(&rdtgrp->rdtgroup_list);
3004 
3005 		if (atomic_read(&rdtgrp->waitcount) != 0)
3006 			rdtgrp->flags = RDT_DELETED;
3007 		else
3008 			rdtgroup_remove(rdtgrp);
3009 	}
3010 	/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
3011 	update_closid_rmid(cpu_online_mask, &rdtgroup_default);
3012 
3013 	kernfs_remove(kn_info);
3014 	kernfs_remove(kn_mongrp);
3015 	kernfs_remove(kn_mondata);
3016 }
3017 
3018 /**
3019  * mon_get_kn_priv() - Get the mon_data priv data for this event.
3020  *
3021  * The same values are used across the mon_data directories of all control and
3022  * monitor groups for the same event in the same domain. Keep a list of
3023  * allocated structures and re-use an existing one with the same values for
3024  * @rid, @domid, etc.
3025  *
3026  * @rid:    The resource id for the event file being created.
3027  * @domid:  The domain id for the event file being created.
3028  * @mevt:   The type of event file being created.
3029  * @do_sum: Whether SNC summing monitors are being created.
3030  */
mon_get_kn_priv(enum resctrl_res_level rid,int domid,struct mon_evt * mevt,bool do_sum)3031 static struct mon_data *mon_get_kn_priv(enum resctrl_res_level rid, int domid,
3032 					struct mon_evt *mevt,
3033 					bool do_sum)
3034 {
3035 	struct mon_data *priv;
3036 
3037 	lockdep_assert_held(&rdtgroup_mutex);
3038 
3039 	list_for_each_entry(priv, &mon_data_kn_priv_list, list) {
3040 		if (priv->rid == rid && priv->domid == domid &&
3041 		    priv->sum == do_sum && priv->evtid == mevt->evtid)
3042 			return priv;
3043 	}
3044 
3045 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3046 	if (!priv)
3047 		return NULL;
3048 
3049 	priv->rid = rid;
3050 	priv->domid = domid;
3051 	priv->sum = do_sum;
3052 	priv->evtid = mevt->evtid;
3053 	list_add_tail(&priv->list, &mon_data_kn_priv_list);
3054 
3055 	return priv;
3056 }
3057 
3058 /**
3059  * mon_put_kn_priv() - Free all allocated mon_data structures.
3060  *
3061  * Called when resctrl file system is unmounted.
3062  */
mon_put_kn_priv(void)3063 static void mon_put_kn_priv(void)
3064 {
3065 	struct mon_data *priv, *tmp;
3066 
3067 	lockdep_assert_held(&rdtgroup_mutex);
3068 
3069 	list_for_each_entry_safe(priv, tmp, &mon_data_kn_priv_list, list) {
3070 		list_del(&priv->list);
3071 		kfree(priv);
3072 	}
3073 }
3074 
resctrl_fs_teardown(void)3075 static void resctrl_fs_teardown(void)
3076 {
3077 	lockdep_assert_held(&rdtgroup_mutex);
3078 
3079 	/* Cleared by rdtgroup_destroy_root() */
3080 	if (!rdtgroup_default.kn)
3081 		return;
3082 
3083 	rmdir_all_sub();
3084 	rdtgroup_unassign_cntrs(&rdtgroup_default);
3085 	mon_put_kn_priv();
3086 	rdt_pseudo_lock_release();
3087 	rdtgroup_default.mode = RDT_MODE_SHAREABLE;
3088 	closid_exit();
3089 	schemata_list_destroy();
3090 	rdtgroup_destroy_root();
3091 }
3092 
rdt_kill_sb(struct super_block * sb)3093 static void rdt_kill_sb(struct super_block *sb)
3094 {
3095 	struct rdt_resource *r;
3096 
3097 	cpus_read_lock();
3098 	mutex_lock(&rdtgroup_mutex);
3099 
3100 	rdt_disable_ctx();
3101 
3102 	/* Put everything back to default values. */
3103 	for_each_alloc_capable_rdt_resource(r)
3104 		resctrl_arch_reset_all_ctrls(r);
3105 
3106 	resctrl_fs_teardown();
3107 	if (resctrl_arch_alloc_capable())
3108 		resctrl_arch_disable_alloc();
3109 	if (resctrl_arch_mon_capable())
3110 		resctrl_arch_disable_mon();
3111 	resctrl_mounted = false;
3112 	kernfs_kill_sb(sb);
3113 	mutex_unlock(&rdtgroup_mutex);
3114 	cpus_read_unlock();
3115 }
3116 
3117 static struct file_system_type rdt_fs_type = {
3118 	.name			= "resctrl",
3119 	.init_fs_context	= rdt_init_fs_context,
3120 	.parameters		= rdt_fs_parameters,
3121 	.kill_sb		= rdt_kill_sb,
3122 };
3123 
mon_addfile(struct kernfs_node * parent_kn,const char * name,void * priv)3124 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
3125 		       void *priv)
3126 {
3127 	struct kernfs_node *kn;
3128 	int ret = 0;
3129 
3130 	kn = __kernfs_create_file(parent_kn, name, 0444,
3131 				  GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
3132 				  &kf_mondata_ops, priv, NULL, NULL);
3133 	if (IS_ERR(kn))
3134 		return PTR_ERR(kn);
3135 
3136 	ret = rdtgroup_kn_set_ugid(kn);
3137 	if (ret) {
3138 		kernfs_remove(kn);
3139 		return ret;
3140 	}
3141 
3142 	return ret;
3143 }
3144 
mon_rmdir_one_subdir(struct kernfs_node * pkn,char * name,char * subname)3145 static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname)
3146 {
3147 	struct kernfs_node *kn;
3148 
3149 	kn = kernfs_find_and_get(pkn, name);
3150 	if (!kn)
3151 		return;
3152 	kernfs_put(kn);
3153 
3154 	if (kn->dir.subdirs <= 1)
3155 		kernfs_remove(kn);
3156 	else
3157 		kernfs_remove_by_name(kn, subname);
3158 }
3159 
3160 /*
3161  * Remove all subdirectories of mon_data of ctrl_mon groups
3162  * and monitor groups for the given domain.
3163  * Remove files and directories containing "sum" of domain data
3164  * when last domain being summed is removed.
3165  */
rmdir_mondata_subdir_allrdtgrp(struct rdt_resource * r,struct rdt_mon_domain * d)3166 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
3167 					   struct rdt_mon_domain *d)
3168 {
3169 	struct rdtgroup *prgrp, *crgrp;
3170 	char subname[32];
3171 	bool snc_mode;
3172 	char name[32];
3173 
3174 	snc_mode = r->mon_scope == RESCTRL_L3_NODE;
3175 	sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : d->hdr.id);
3176 	if (snc_mode)
3177 		sprintf(subname, "mon_sub_%s_%02d", r->name, d->hdr.id);
3178 
3179 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
3180 		mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname);
3181 
3182 		list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
3183 			mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname);
3184 	}
3185 }
3186 
mon_add_all_files(struct kernfs_node * kn,struct rdt_mon_domain * d,struct rdt_resource * r,struct rdtgroup * prgrp,bool do_sum)3187 static int mon_add_all_files(struct kernfs_node *kn, struct rdt_mon_domain *d,
3188 			     struct rdt_resource *r, struct rdtgroup *prgrp,
3189 			     bool do_sum)
3190 {
3191 	struct rmid_read rr = {0};
3192 	struct mon_data *priv;
3193 	struct mon_evt *mevt;
3194 	int ret, domid;
3195 
3196 	for_each_mon_event(mevt) {
3197 		if (mevt->rid != r->rid || !mevt->enabled)
3198 			continue;
3199 		domid = do_sum ? d->ci_id : d->hdr.id;
3200 		priv = mon_get_kn_priv(r->rid, domid, mevt, do_sum);
3201 		if (WARN_ON_ONCE(!priv))
3202 			return -EINVAL;
3203 
3204 		ret = mon_addfile(kn, mevt->name, priv);
3205 		if (ret)
3206 			return ret;
3207 
3208 		if (!do_sum && resctrl_is_mbm_event(mevt->evtid))
3209 			mon_event_read(&rr, r, d, prgrp, &d->hdr.cpu_mask, mevt->evtid, true);
3210 	}
3211 
3212 	return 0;
3213 }
3214 
mkdir_mondata_subdir(struct kernfs_node * parent_kn,struct rdt_mon_domain * d,struct rdt_resource * r,struct rdtgroup * prgrp)3215 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
3216 				struct rdt_mon_domain *d,
3217 				struct rdt_resource *r, struct rdtgroup *prgrp)
3218 {
3219 	struct kernfs_node *kn, *ckn;
3220 	char name[32];
3221 	bool snc_mode;
3222 	int ret = 0;
3223 
3224 	lockdep_assert_held(&rdtgroup_mutex);
3225 
3226 	snc_mode = r->mon_scope == RESCTRL_L3_NODE;
3227 	sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : d->hdr.id);
3228 	kn = kernfs_find_and_get(parent_kn, name);
3229 	if (kn) {
3230 		/*
3231 		 * rdtgroup_mutex will prevent this directory from being
3232 		 * removed. No need to keep this hold.
3233 		 */
3234 		kernfs_put(kn);
3235 	} else {
3236 		kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
3237 		if (IS_ERR(kn))
3238 			return PTR_ERR(kn);
3239 
3240 		ret = rdtgroup_kn_set_ugid(kn);
3241 		if (ret)
3242 			goto out_destroy;
3243 		ret = mon_add_all_files(kn, d, r, prgrp, snc_mode);
3244 		if (ret)
3245 			goto out_destroy;
3246 	}
3247 
3248 	if (snc_mode) {
3249 		sprintf(name, "mon_sub_%s_%02d", r->name, d->hdr.id);
3250 		ckn = kernfs_create_dir(kn, name, parent_kn->mode, prgrp);
3251 		if (IS_ERR(ckn)) {
3252 			ret = -EINVAL;
3253 			goto out_destroy;
3254 		}
3255 
3256 		ret = rdtgroup_kn_set_ugid(ckn);
3257 		if (ret)
3258 			goto out_destroy;
3259 
3260 		ret = mon_add_all_files(ckn, d, r, prgrp, false);
3261 		if (ret)
3262 			goto out_destroy;
3263 	}
3264 
3265 	kernfs_activate(kn);
3266 	return 0;
3267 
3268 out_destroy:
3269 	kernfs_remove(kn);
3270 	return ret;
3271 }
3272 
3273 /*
3274  * Add all subdirectories of mon_data for "ctrl_mon" groups
3275  * and "monitor" groups with given domain id.
3276  */
mkdir_mondata_subdir_allrdtgrp(struct rdt_resource * r,struct rdt_mon_domain * d)3277 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
3278 					   struct rdt_mon_domain *d)
3279 {
3280 	struct kernfs_node *parent_kn;
3281 	struct rdtgroup *prgrp, *crgrp;
3282 	struct list_head *head;
3283 
3284 	list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
3285 		parent_kn = prgrp->mon.mon_data_kn;
3286 		mkdir_mondata_subdir(parent_kn, d, r, prgrp);
3287 
3288 		head = &prgrp->mon.crdtgrp_list;
3289 		list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
3290 			parent_kn = crgrp->mon.mon_data_kn;
3291 			mkdir_mondata_subdir(parent_kn, d, r, crgrp);
3292 		}
3293 	}
3294 }
3295 
mkdir_mondata_subdir_alldom(struct kernfs_node * parent_kn,struct rdt_resource * r,struct rdtgroup * prgrp)3296 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
3297 				       struct rdt_resource *r,
3298 				       struct rdtgroup *prgrp)
3299 {
3300 	struct rdt_mon_domain *dom;
3301 	int ret;
3302 
3303 	/* Walking r->domains, ensure it can't race with cpuhp */
3304 	lockdep_assert_cpus_held();
3305 
3306 	list_for_each_entry(dom, &r->mon_domains, hdr.list) {
3307 		ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
3308 		if (ret)
3309 			return ret;
3310 	}
3311 
3312 	return 0;
3313 }
3314 
3315 /*
3316  * This creates a directory mon_data which contains the monitored data.
3317  *
3318  * mon_data has one directory for each domain which are named
3319  * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
3320  * with L3 domain looks as below:
3321  * ./mon_data:
3322  * mon_L3_00
3323  * mon_L3_01
3324  * mon_L3_02
3325  * ...
3326  *
3327  * Each domain directory has one file per event:
3328  * ./mon_L3_00/:
3329  * llc_occupancy
3330  *
3331  */
mkdir_mondata_all(struct kernfs_node * parent_kn,struct rdtgroup * prgrp,struct kernfs_node ** dest_kn)3332 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
3333 			     struct rdtgroup *prgrp,
3334 			     struct kernfs_node **dest_kn)
3335 {
3336 	struct rdt_resource *r;
3337 	struct kernfs_node *kn;
3338 	int ret;
3339 
3340 	/*
3341 	 * Create the mon_data directory first.
3342 	 */
3343 	ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
3344 	if (ret)
3345 		return ret;
3346 
3347 	if (dest_kn)
3348 		*dest_kn = kn;
3349 
3350 	/*
3351 	 * Create the subdirectories for each domain. Note that all events
3352 	 * in a domain like L3 are grouped into a resource whose domain is L3
3353 	 */
3354 	for_each_mon_capable_rdt_resource(r) {
3355 		ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
3356 		if (ret)
3357 			goto out_destroy;
3358 	}
3359 
3360 	return 0;
3361 
3362 out_destroy:
3363 	kernfs_remove(kn);
3364 	return ret;
3365 }
3366 
3367 /**
3368  * cbm_ensure_valid - Enforce validity on provided CBM
3369  * @_val:	Candidate CBM
3370  * @r:		RDT resource to which the CBM belongs
3371  *
3372  * The provided CBM represents all cache portions available for use. This
3373  * may be represented by a bitmap that does not consist of contiguous ones
3374  * and thus be an invalid CBM.
3375  * Here the provided CBM is forced to be a valid CBM by only considering
3376  * the first set of contiguous bits as valid and clearing all bits.
3377  * The intention here is to provide a valid default CBM with which a new
3378  * resource group is initialized. The user can follow this with a
3379  * modification to the CBM if the default does not satisfy the
3380  * requirements.
3381  */
cbm_ensure_valid(u32 _val,struct rdt_resource * r)3382 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
3383 {
3384 	unsigned int cbm_len = r->cache.cbm_len;
3385 	unsigned long first_bit, zero_bit;
3386 	unsigned long val = _val;
3387 
3388 	if (!val)
3389 		return 0;
3390 
3391 	first_bit = find_first_bit(&val, cbm_len);
3392 	zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
3393 
3394 	/* Clear any remaining bits to ensure contiguous region */
3395 	bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
3396 	return (u32)val;
3397 }
3398 
3399 /*
3400  * Initialize cache resources per RDT domain
3401  *
3402  * Set the RDT domain up to start off with all usable allocations. That is,
3403  * all shareable and unused bits. All-zero CBM is invalid.
3404  */
__init_one_rdt_domain(struct rdt_ctrl_domain * d,struct resctrl_schema * s,u32 closid)3405 static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s,
3406 				 u32 closid)
3407 {
3408 	enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
3409 	enum resctrl_conf_type t = s->conf_type;
3410 	struct resctrl_staged_config *cfg;
3411 	struct rdt_resource *r = s->res;
3412 	u32 used_b = 0, unused_b = 0;
3413 	unsigned long tmp_cbm;
3414 	enum rdtgrp_mode mode;
3415 	u32 peer_ctl, ctrl_val;
3416 	int i;
3417 
3418 	cfg = &d->staged_config[t];
3419 	cfg->have_new_ctrl = false;
3420 	cfg->new_ctrl = r->cache.shareable_bits;
3421 	used_b = r->cache.shareable_bits;
3422 	for (i = 0; i < closids_supported(); i++) {
3423 		if (closid_allocated(i) && i != closid) {
3424 			mode = rdtgroup_mode_by_closid(i);
3425 			if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
3426 				/*
3427 				 * ctrl values for locksetup aren't relevant
3428 				 * until the schemata is written, and the mode
3429 				 * becomes RDT_MODE_PSEUDO_LOCKED.
3430 				 */
3431 				continue;
3432 			/*
3433 			 * If CDP is active include peer domain's
3434 			 * usage to ensure there is no overlap
3435 			 * with an exclusive group.
3436 			 */
3437 			if (resctrl_arch_get_cdp_enabled(r->rid))
3438 				peer_ctl = resctrl_arch_get_config(r, d, i,
3439 								   peer_type);
3440 			else
3441 				peer_ctl = 0;
3442 			ctrl_val = resctrl_arch_get_config(r, d, i,
3443 							   s->conf_type);
3444 			used_b |= ctrl_val | peer_ctl;
3445 			if (mode == RDT_MODE_SHAREABLE)
3446 				cfg->new_ctrl |= ctrl_val | peer_ctl;
3447 		}
3448 	}
3449 	if (d->plr && d->plr->cbm > 0)
3450 		used_b |= d->plr->cbm;
3451 	unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
3452 	unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
3453 	cfg->new_ctrl |= unused_b;
3454 	/*
3455 	 * Force the initial CBM to be valid, user can
3456 	 * modify the CBM based on system availability.
3457 	 */
3458 	cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
3459 	/*
3460 	 * Assign the u32 CBM to an unsigned long to ensure that
3461 	 * bitmap_weight() does not access out-of-bound memory.
3462 	 */
3463 	tmp_cbm = cfg->new_ctrl;
3464 	if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
3465 		rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id);
3466 		return -ENOSPC;
3467 	}
3468 	cfg->have_new_ctrl = true;
3469 
3470 	return 0;
3471 }
3472 
3473 /*
3474  * Initialize cache resources with default values.
3475  *
3476  * A new RDT group is being created on an allocation capable (CAT)
3477  * supporting system. Set this group up to start off with all usable
3478  * allocations.
3479  *
3480  * If there are no more shareable bits available on any domain then
3481  * the entire allocation will fail.
3482  */
rdtgroup_init_cat(struct resctrl_schema * s,u32 closid)3483 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
3484 {
3485 	struct rdt_ctrl_domain *d;
3486 	int ret;
3487 
3488 	list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) {
3489 		ret = __init_one_rdt_domain(d, s, closid);
3490 		if (ret < 0)
3491 			return ret;
3492 	}
3493 
3494 	return 0;
3495 }
3496 
3497 /* Initialize MBA resource with default values. */
rdtgroup_init_mba(struct rdt_resource * r,u32 closid)3498 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
3499 {
3500 	struct resctrl_staged_config *cfg;
3501 	struct rdt_ctrl_domain *d;
3502 
3503 	list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
3504 		if (is_mba_sc(r)) {
3505 			d->mbps_val[closid] = MBA_MAX_MBPS;
3506 			continue;
3507 		}
3508 
3509 		cfg = &d->staged_config[CDP_NONE];
3510 		cfg->new_ctrl = resctrl_get_default_ctrl(r);
3511 		cfg->have_new_ctrl = true;
3512 	}
3513 }
3514 
3515 /* Initialize the RDT group's allocations. */
rdtgroup_init_alloc(struct rdtgroup * rdtgrp)3516 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
3517 {
3518 	struct resctrl_schema *s;
3519 	struct rdt_resource *r;
3520 	int ret = 0;
3521 
3522 	rdt_staged_configs_clear();
3523 
3524 	list_for_each_entry(s, &resctrl_schema_all, list) {
3525 		r = s->res;
3526 		if (r->rid == RDT_RESOURCE_MBA ||
3527 		    r->rid == RDT_RESOURCE_SMBA) {
3528 			rdtgroup_init_mba(r, rdtgrp->closid);
3529 			if (is_mba_sc(r))
3530 				continue;
3531 		} else {
3532 			ret = rdtgroup_init_cat(s, rdtgrp->closid);
3533 			if (ret < 0)
3534 				goto out;
3535 		}
3536 
3537 		ret = resctrl_arch_update_domains(r, rdtgrp->closid);
3538 		if (ret < 0) {
3539 			rdt_last_cmd_puts("Failed to initialize allocations\n");
3540 			goto out;
3541 		}
3542 	}
3543 
3544 	rdtgrp->mode = RDT_MODE_SHAREABLE;
3545 
3546 out:
3547 	rdt_staged_configs_clear();
3548 	return ret;
3549 }
3550 
mkdir_rdt_prepare_rmid_alloc(struct rdtgroup * rdtgrp)3551 static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
3552 {
3553 	int ret;
3554 
3555 	if (!resctrl_arch_mon_capable())
3556 		return 0;
3557 
3558 	ret = alloc_rmid(rdtgrp->closid);
3559 	if (ret < 0) {
3560 		rdt_last_cmd_puts("Out of RMIDs\n");
3561 		return ret;
3562 	}
3563 	rdtgrp->mon.rmid = ret;
3564 
3565 	rdtgroup_assign_cntrs(rdtgrp);
3566 
3567 	ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
3568 	if (ret) {
3569 		rdt_last_cmd_puts("kernfs subdir error\n");
3570 		rdtgroup_unassign_cntrs(rdtgrp);
3571 		free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3572 		return ret;
3573 	}
3574 
3575 	return 0;
3576 }
3577 
mkdir_rdt_prepare_rmid_free(struct rdtgroup * rgrp)3578 static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
3579 {
3580 	if (resctrl_arch_mon_capable()) {
3581 		rdtgroup_unassign_cntrs(rgrp);
3582 		free_rmid(rgrp->closid, rgrp->mon.rmid);
3583 	}
3584 }
3585 
3586 /*
3587  * We allow creating mon groups only with in a directory called "mon_groups"
3588  * which is present in every ctrl_mon group. Check if this is a valid
3589  * "mon_groups" directory.
3590  *
3591  * 1. The directory should be named "mon_groups".
3592  * 2. The mon group itself should "not" be named "mon_groups".
3593  *   This makes sure "mon_groups" directory always has a ctrl_mon group
3594  *   as parent.
3595  */
is_mon_groups(struct kernfs_node * kn,const char * name)3596 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
3597 {
3598 	return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
3599 		strcmp(name, "mon_groups"));
3600 }
3601 
mkdir_rdt_prepare(struct kernfs_node * parent_kn,const char * name,umode_t mode,enum rdt_group_type rtype,struct rdtgroup ** r)3602 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
3603 			     const char *name, umode_t mode,
3604 			     enum rdt_group_type rtype, struct rdtgroup **r)
3605 {
3606 	struct rdtgroup *prdtgrp, *rdtgrp;
3607 	unsigned long files = 0;
3608 	struct kernfs_node *kn;
3609 	int ret;
3610 
3611 	prdtgrp = rdtgroup_kn_lock_live(parent_kn);
3612 	if (!prdtgrp) {
3613 		ret = -ENODEV;
3614 		goto out_unlock;
3615 	}
3616 
3617 	rdt_last_cmd_clear();
3618 
3619 	/*
3620 	 * Check that the parent directory for a monitor group is a "mon_groups"
3621 	 * directory.
3622 	 */
3623 	if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) {
3624 		ret = -EPERM;
3625 		goto out_unlock;
3626 	}
3627 
3628 	if (rtype == RDTMON_GROUP &&
3629 	    (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3630 	     prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
3631 		ret = -EINVAL;
3632 		rdt_last_cmd_puts("Pseudo-locking in progress\n");
3633 		goto out_unlock;
3634 	}
3635 
3636 	/* allocate the rdtgroup. */
3637 	rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
3638 	if (!rdtgrp) {
3639 		ret = -ENOSPC;
3640 		rdt_last_cmd_puts("Kernel out of memory\n");
3641 		goto out_unlock;
3642 	}
3643 	*r = rdtgrp;
3644 	rdtgrp->mon.parent = prdtgrp;
3645 	rdtgrp->type = rtype;
3646 	INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
3647 
3648 	/* kernfs creates the directory for rdtgrp */
3649 	kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
3650 	if (IS_ERR(kn)) {
3651 		ret = PTR_ERR(kn);
3652 		rdt_last_cmd_puts("kernfs create error\n");
3653 		goto out_free_rgrp;
3654 	}
3655 	rdtgrp->kn = kn;
3656 
3657 	/*
3658 	 * kernfs_remove() will drop the reference count on "kn" which
3659 	 * will free it. But we still need it to stick around for the
3660 	 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
3661 	 * which will be dropped by kernfs_put() in rdtgroup_remove().
3662 	 */
3663 	kernfs_get(kn);
3664 
3665 	ret = rdtgroup_kn_set_ugid(kn);
3666 	if (ret) {
3667 		rdt_last_cmd_puts("kernfs perm error\n");
3668 		goto out_destroy;
3669 	}
3670 
3671 	if (rtype == RDTCTRL_GROUP) {
3672 		files = RFTYPE_BASE | RFTYPE_CTRL;
3673 		if (resctrl_arch_mon_capable())
3674 			files |= RFTYPE_MON;
3675 	} else {
3676 		files = RFTYPE_BASE | RFTYPE_MON;
3677 	}
3678 
3679 	ret = rdtgroup_add_files(kn, files);
3680 	if (ret) {
3681 		rdt_last_cmd_puts("kernfs fill error\n");
3682 		goto out_destroy;
3683 	}
3684 
3685 	/*
3686 	 * The caller unlocks the parent_kn upon success.
3687 	 */
3688 	return 0;
3689 
3690 out_destroy:
3691 	kernfs_put(rdtgrp->kn);
3692 	kernfs_remove(rdtgrp->kn);
3693 out_free_rgrp:
3694 	kfree(rdtgrp);
3695 out_unlock:
3696 	rdtgroup_kn_unlock(parent_kn);
3697 	return ret;
3698 }
3699 
mkdir_rdt_prepare_clean(struct rdtgroup * rgrp)3700 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
3701 {
3702 	kernfs_remove(rgrp->kn);
3703 	rdtgroup_remove(rgrp);
3704 }
3705 
3706 /*
3707  * Create a monitor group under "mon_groups" directory of a control
3708  * and monitor group(ctrl_mon). This is a resource group
3709  * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
3710  */
rdtgroup_mkdir_mon(struct kernfs_node * parent_kn,const char * name,umode_t mode)3711 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
3712 			      const char *name, umode_t mode)
3713 {
3714 	struct rdtgroup *rdtgrp, *prgrp;
3715 	int ret;
3716 
3717 	ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
3718 	if (ret)
3719 		return ret;
3720 
3721 	prgrp = rdtgrp->mon.parent;
3722 	rdtgrp->closid = prgrp->closid;
3723 
3724 	ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
3725 	if (ret) {
3726 		mkdir_rdt_prepare_clean(rdtgrp);
3727 		goto out_unlock;
3728 	}
3729 
3730 	kernfs_activate(rdtgrp->kn);
3731 
3732 	/*
3733 	 * Add the rdtgrp to the list of rdtgrps the parent
3734 	 * ctrl_mon group has to track.
3735 	 */
3736 	list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
3737 
3738 out_unlock:
3739 	rdtgroup_kn_unlock(parent_kn);
3740 	return ret;
3741 }
3742 
3743 /*
3744  * These are rdtgroups created under the root directory. Can be used
3745  * to allocate and monitor resources.
3746  */
rdtgroup_mkdir_ctrl_mon(struct kernfs_node * parent_kn,const char * name,umode_t mode)3747 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
3748 				   const char *name, umode_t mode)
3749 {
3750 	struct rdtgroup *rdtgrp;
3751 	struct kernfs_node *kn;
3752 	u32 closid;
3753 	int ret;
3754 
3755 	ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
3756 	if (ret)
3757 		return ret;
3758 
3759 	kn = rdtgrp->kn;
3760 	ret = closid_alloc();
3761 	if (ret < 0) {
3762 		rdt_last_cmd_puts("Out of CLOSIDs\n");
3763 		goto out_common_fail;
3764 	}
3765 	closid = ret;
3766 	ret = 0;
3767 
3768 	rdtgrp->closid = closid;
3769 
3770 	ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
3771 	if (ret)
3772 		goto out_closid_free;
3773 
3774 	kernfs_activate(rdtgrp->kn);
3775 
3776 	ret = rdtgroup_init_alloc(rdtgrp);
3777 	if (ret < 0)
3778 		goto out_rmid_free;
3779 
3780 	list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
3781 
3782 	if (resctrl_arch_mon_capable()) {
3783 		/*
3784 		 * Create an empty mon_groups directory to hold the subset
3785 		 * of tasks and cpus to monitor.
3786 		 */
3787 		ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
3788 		if (ret) {
3789 			rdt_last_cmd_puts("kernfs subdir error\n");
3790 			goto out_del_list;
3791 		}
3792 		if (is_mba_sc(NULL))
3793 			rdtgrp->mba_mbps_event = mba_mbps_default_event;
3794 	}
3795 
3796 	goto out_unlock;
3797 
3798 out_del_list:
3799 	list_del(&rdtgrp->rdtgroup_list);
3800 out_rmid_free:
3801 	mkdir_rdt_prepare_rmid_free(rdtgrp);
3802 out_closid_free:
3803 	closid_free(closid);
3804 out_common_fail:
3805 	mkdir_rdt_prepare_clean(rdtgrp);
3806 out_unlock:
3807 	rdtgroup_kn_unlock(parent_kn);
3808 	return ret;
3809 }
3810 
rdtgroup_mkdir(struct kernfs_node * parent_kn,const char * name,umode_t mode)3811 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
3812 			  umode_t mode)
3813 {
3814 	/* Do not accept '\n' to avoid unparsable situation. */
3815 	if (strchr(name, '\n'))
3816 		return -EINVAL;
3817 
3818 	/*
3819 	 * If the parent directory is the root directory and RDT
3820 	 * allocation is supported, add a control and monitoring
3821 	 * subdirectory
3822 	 */
3823 	if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
3824 		return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3825 
3826 	/* Else, attempt to add a monitoring subdirectory. */
3827 	if (resctrl_arch_mon_capable())
3828 		return rdtgroup_mkdir_mon(parent_kn, name, mode);
3829 
3830 	return -EPERM;
3831 }
3832 
rdtgroup_rmdir_mon(struct rdtgroup * rdtgrp,cpumask_var_t tmpmask)3833 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3834 {
3835 	struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3836 	u32 closid, rmid;
3837 	int cpu;
3838 
3839 	/* Give any tasks back to the parent group */
3840 	rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3841 
3842 	/*
3843 	 * Update per cpu closid/rmid of the moved CPUs first.
3844 	 * Note: the closid will not change, but the arch code still needs it.
3845 	 */
3846 	closid = prdtgrp->closid;
3847 	rmid = prdtgrp->mon.rmid;
3848 	for_each_cpu(cpu, &rdtgrp->cpu_mask)
3849 		resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
3850 
3851 	/*
3852 	 * Update the MSR on moved CPUs and CPUs which have moved
3853 	 * task running on them.
3854 	 */
3855 	cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3856 	update_closid_rmid(tmpmask, NULL);
3857 
3858 	rdtgrp->flags = RDT_DELETED;
3859 
3860 	rdtgroup_unassign_cntrs(rdtgrp);
3861 
3862 	free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3863 
3864 	/*
3865 	 * Remove the rdtgrp from the parent ctrl_mon group's list
3866 	 */
3867 	WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3868 	list_del(&rdtgrp->mon.crdtgrp_list);
3869 
3870 	kernfs_remove(rdtgrp->kn);
3871 
3872 	return 0;
3873 }
3874 
rdtgroup_ctrl_remove(struct rdtgroup * rdtgrp)3875 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
3876 {
3877 	rdtgrp->flags = RDT_DELETED;
3878 	list_del(&rdtgrp->rdtgroup_list);
3879 
3880 	kernfs_remove(rdtgrp->kn);
3881 	return 0;
3882 }
3883 
rdtgroup_rmdir_ctrl(struct rdtgroup * rdtgrp,cpumask_var_t tmpmask)3884 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3885 {
3886 	u32 closid, rmid;
3887 	int cpu;
3888 
3889 	/* Give any tasks back to the default group */
3890 	rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3891 
3892 	/* Give any CPUs back to the default group */
3893 	cpumask_or(&rdtgroup_default.cpu_mask,
3894 		   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3895 
3896 	/* Update per cpu closid and rmid of the moved CPUs first */
3897 	closid = rdtgroup_default.closid;
3898 	rmid = rdtgroup_default.mon.rmid;
3899 	for_each_cpu(cpu, &rdtgrp->cpu_mask)
3900 		resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
3901 
3902 	/*
3903 	 * Update the MSR on moved CPUs and CPUs which have moved
3904 	 * task running on them.
3905 	 */
3906 	cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3907 	update_closid_rmid(tmpmask, NULL);
3908 
3909 	rdtgroup_unassign_cntrs(rdtgrp);
3910 
3911 	free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3912 	closid_free(rdtgrp->closid);
3913 
3914 	rdtgroup_ctrl_remove(rdtgrp);
3915 
3916 	/*
3917 	 * Free all the child monitor group rmids.
3918 	 */
3919 	free_all_child_rdtgrp(rdtgrp);
3920 
3921 	return 0;
3922 }
3923 
rdt_kn_parent(struct kernfs_node * kn)3924 static struct kernfs_node *rdt_kn_parent(struct kernfs_node *kn)
3925 {
3926 	/*
3927 	 * Valid within the RCU section it was obtained or while rdtgroup_mutex
3928 	 * is held.
3929 	 */
3930 	return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex));
3931 }
3932 
rdtgroup_rmdir(struct kernfs_node * kn)3933 static int rdtgroup_rmdir(struct kernfs_node *kn)
3934 {
3935 	struct kernfs_node *parent_kn;
3936 	struct rdtgroup *rdtgrp;
3937 	cpumask_var_t tmpmask;
3938 	int ret = 0;
3939 
3940 	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3941 		return -ENOMEM;
3942 
3943 	rdtgrp = rdtgroup_kn_lock_live(kn);
3944 	if (!rdtgrp) {
3945 		ret = -EPERM;
3946 		goto out;
3947 	}
3948 	parent_kn = rdt_kn_parent(kn);
3949 
3950 	/*
3951 	 * If the rdtgroup is a ctrl_mon group and parent directory
3952 	 * is the root directory, remove the ctrl_mon group.
3953 	 *
3954 	 * If the rdtgroup is a mon group and parent directory
3955 	 * is a valid "mon_groups" directory, remove the mon group.
3956 	 */
3957 	if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3958 	    rdtgrp != &rdtgroup_default) {
3959 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3960 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3961 			ret = rdtgroup_ctrl_remove(rdtgrp);
3962 		} else {
3963 			ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
3964 		}
3965 	} else if (rdtgrp->type == RDTMON_GROUP &&
3966 		 is_mon_groups(parent_kn, rdt_kn_name(kn))) {
3967 		ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
3968 	} else {
3969 		ret = -EPERM;
3970 	}
3971 
3972 out:
3973 	rdtgroup_kn_unlock(kn);
3974 	free_cpumask_var(tmpmask);
3975 	return ret;
3976 }
3977 
3978 /**
3979  * mongrp_reparent() - replace parent CTRL_MON group of a MON group
3980  * @rdtgrp:		the MON group whose parent should be replaced
3981  * @new_prdtgrp:	replacement parent CTRL_MON group for @rdtgrp
3982  * @cpus:		cpumask provided by the caller for use during this call
3983  *
3984  * Replaces the parent CTRL_MON group for a MON group, resulting in all member
3985  * tasks' CLOSID immediately changing to that of the new parent group.
3986  * Monitoring data for the group is unaffected by this operation.
3987  */
mongrp_reparent(struct rdtgroup * rdtgrp,struct rdtgroup * new_prdtgrp,cpumask_var_t cpus)3988 static void mongrp_reparent(struct rdtgroup *rdtgrp,
3989 			    struct rdtgroup *new_prdtgrp,
3990 			    cpumask_var_t cpus)
3991 {
3992 	struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3993 
3994 	WARN_ON(rdtgrp->type != RDTMON_GROUP);
3995 	WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP);
3996 
3997 	/* Nothing to do when simply renaming a MON group. */
3998 	if (prdtgrp == new_prdtgrp)
3999 		return;
4000 
4001 	WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
4002 	list_move_tail(&rdtgrp->mon.crdtgrp_list,
4003 		       &new_prdtgrp->mon.crdtgrp_list);
4004 
4005 	rdtgrp->mon.parent = new_prdtgrp;
4006 	rdtgrp->closid = new_prdtgrp->closid;
4007 
4008 	/* Propagate updated closid to all tasks in this group. */
4009 	rdt_move_group_tasks(rdtgrp, rdtgrp, cpus);
4010 
4011 	update_closid_rmid(cpus, NULL);
4012 }
4013 
rdtgroup_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name)4014 static int rdtgroup_rename(struct kernfs_node *kn,
4015 			   struct kernfs_node *new_parent, const char *new_name)
4016 {
4017 	struct kernfs_node *kn_parent;
4018 	struct rdtgroup *new_prdtgrp;
4019 	struct rdtgroup *rdtgrp;
4020 	cpumask_var_t tmpmask;
4021 	int ret;
4022 
4023 	rdtgrp = kernfs_to_rdtgroup(kn);
4024 	new_prdtgrp = kernfs_to_rdtgroup(new_parent);
4025 	if (!rdtgrp || !new_prdtgrp)
4026 		return -ENOENT;
4027 
4028 	/* Release both kernfs active_refs before obtaining rdtgroup mutex. */
4029 	rdtgroup_kn_get(rdtgrp, kn);
4030 	rdtgroup_kn_get(new_prdtgrp, new_parent);
4031 
4032 	mutex_lock(&rdtgroup_mutex);
4033 
4034 	rdt_last_cmd_clear();
4035 
4036 	/*
4037 	 * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if
4038 	 * either kernfs_node is a file.
4039 	 */
4040 	if (kernfs_type(kn) != KERNFS_DIR ||
4041 	    kernfs_type(new_parent) != KERNFS_DIR) {
4042 		rdt_last_cmd_puts("Source and destination must be directories");
4043 		ret = -EPERM;
4044 		goto out;
4045 	}
4046 
4047 	if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) {
4048 		ret = -ENOENT;
4049 		goto out;
4050 	}
4051 
4052 	kn_parent = rdt_kn_parent(kn);
4053 	if (rdtgrp->type != RDTMON_GROUP || !kn_parent ||
4054 	    !is_mon_groups(kn_parent, rdt_kn_name(kn))) {
4055 		rdt_last_cmd_puts("Source must be a MON group\n");
4056 		ret = -EPERM;
4057 		goto out;
4058 	}
4059 
4060 	if (!is_mon_groups(new_parent, new_name)) {
4061 		rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n");
4062 		ret = -EPERM;
4063 		goto out;
4064 	}
4065 
4066 	/*
4067 	 * If the MON group is monitoring CPUs, the CPUs must be assigned to the
4068 	 * current parent CTRL_MON group and therefore cannot be assigned to
4069 	 * the new parent, making the move illegal.
4070 	 */
4071 	if (!cpumask_empty(&rdtgrp->cpu_mask) &&
4072 	    rdtgrp->mon.parent != new_prdtgrp) {
4073 		rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n");
4074 		ret = -EPERM;
4075 		goto out;
4076 	}
4077 
4078 	/*
4079 	 * Allocate the cpumask for use in mongrp_reparent() to avoid the
4080 	 * possibility of failing to allocate it after kernfs_rename() has
4081 	 * succeeded.
4082 	 */
4083 	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) {
4084 		ret = -ENOMEM;
4085 		goto out;
4086 	}
4087 
4088 	/*
4089 	 * Perform all input validation and allocations needed to ensure
4090 	 * mongrp_reparent() will succeed before calling kernfs_rename(),
4091 	 * otherwise it would be necessary to revert this call if
4092 	 * mongrp_reparent() failed.
4093 	 */
4094 	ret = kernfs_rename(kn, new_parent, new_name);
4095 	if (!ret)
4096 		mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask);
4097 
4098 	free_cpumask_var(tmpmask);
4099 
4100 out:
4101 	mutex_unlock(&rdtgroup_mutex);
4102 	rdtgroup_kn_put(rdtgrp, kn);
4103 	rdtgroup_kn_put(new_prdtgrp, new_parent);
4104 	return ret;
4105 }
4106 
rdtgroup_show_options(struct seq_file * seq,struct kernfs_root * kf)4107 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
4108 {
4109 	if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
4110 		seq_puts(seq, ",cdp");
4111 
4112 	if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
4113 		seq_puts(seq, ",cdpl2");
4114 
4115 	if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA)))
4116 		seq_puts(seq, ",mba_MBps");
4117 
4118 	if (resctrl_debug)
4119 		seq_puts(seq, ",debug");
4120 
4121 	return 0;
4122 }
4123 
4124 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
4125 	.mkdir		= rdtgroup_mkdir,
4126 	.rmdir		= rdtgroup_rmdir,
4127 	.rename		= rdtgroup_rename,
4128 	.show_options	= rdtgroup_show_options,
4129 };
4130 
rdtgroup_setup_root(struct rdt_fs_context * ctx)4131 static int rdtgroup_setup_root(struct rdt_fs_context *ctx)
4132 {
4133 	rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
4134 				      KERNFS_ROOT_CREATE_DEACTIVATED |
4135 				      KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
4136 				      &rdtgroup_default);
4137 	if (IS_ERR(rdt_root))
4138 		return PTR_ERR(rdt_root);
4139 
4140 	ctx->kfc.root = rdt_root;
4141 	rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
4142 
4143 	return 0;
4144 }
4145 
rdtgroup_destroy_root(void)4146 static void rdtgroup_destroy_root(void)
4147 {
4148 	lockdep_assert_held(&rdtgroup_mutex);
4149 
4150 	kernfs_destroy_root(rdt_root);
4151 	rdtgroup_default.kn = NULL;
4152 }
4153 
rdtgroup_setup_default(void)4154 static void rdtgroup_setup_default(void)
4155 {
4156 	mutex_lock(&rdtgroup_mutex);
4157 
4158 	rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID;
4159 	rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID;
4160 	rdtgroup_default.type = RDTCTRL_GROUP;
4161 	INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
4162 
4163 	list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
4164 
4165 	mutex_unlock(&rdtgroup_mutex);
4166 }
4167 
domain_destroy_mon_state(struct rdt_mon_domain * d)4168 static void domain_destroy_mon_state(struct rdt_mon_domain *d)
4169 {
4170 	int idx;
4171 
4172 	kfree(d->cntr_cfg);
4173 	bitmap_free(d->rmid_busy_llc);
4174 	for_each_mbm_idx(idx) {
4175 		kfree(d->mbm_states[idx]);
4176 		d->mbm_states[idx] = NULL;
4177 	}
4178 }
4179 
resctrl_offline_ctrl_domain(struct rdt_resource * r,struct rdt_ctrl_domain * d)4180 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
4181 {
4182 	mutex_lock(&rdtgroup_mutex);
4183 
4184 	if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
4185 		mba_sc_domain_destroy(r, d);
4186 
4187 	mutex_unlock(&rdtgroup_mutex);
4188 }
4189 
resctrl_offline_mon_domain(struct rdt_resource * r,struct rdt_mon_domain * d)4190 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
4191 {
4192 	mutex_lock(&rdtgroup_mutex);
4193 
4194 	/*
4195 	 * If resctrl is mounted, remove all the
4196 	 * per domain monitor data directories.
4197 	 */
4198 	if (resctrl_mounted && resctrl_arch_mon_capable())
4199 		rmdir_mondata_subdir_allrdtgrp(r, d);
4200 
4201 	if (resctrl_is_mbm_enabled())
4202 		cancel_delayed_work(&d->mbm_over);
4203 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) && has_busy_rmid(d)) {
4204 		/*
4205 		 * When a package is going down, forcefully
4206 		 * decrement rmid->ebusy. There is no way to know
4207 		 * that the L3 was flushed and hence may lead to
4208 		 * incorrect counts in rare scenarios, but leaving
4209 		 * the RMID as busy creates RMID leaks if the
4210 		 * package never comes back.
4211 		 */
4212 		__check_limbo(d, true);
4213 		cancel_delayed_work(&d->cqm_limbo);
4214 	}
4215 
4216 	domain_destroy_mon_state(d);
4217 
4218 	mutex_unlock(&rdtgroup_mutex);
4219 }
4220 
4221 /**
4222  * domain_setup_mon_state() -  Initialise domain monitoring structures.
4223  * @r:	The resource for the newly online domain.
4224  * @d:	The newly online domain.
4225  *
4226  * Allocate monitor resources that belong to this domain.
4227  * Called when the first CPU of a domain comes online, regardless of whether
4228  * the filesystem is mounted.
4229  * During boot this may be called before global allocations have been made by
4230  * resctrl_mon_resource_init().
4231  *
4232  * Returns 0 for success, or -ENOMEM.
4233  */
domain_setup_mon_state(struct rdt_resource * r,struct rdt_mon_domain * d)4234 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain *d)
4235 {
4236 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
4237 	size_t tsize = sizeof(*d->mbm_states[0]);
4238 	enum resctrl_event_id eventid;
4239 	int idx;
4240 
4241 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) {
4242 		d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL);
4243 		if (!d->rmid_busy_llc)
4244 			return -ENOMEM;
4245 	}
4246 
4247 	for_each_mbm_event_id(eventid) {
4248 		if (!resctrl_is_mon_event_enabled(eventid))
4249 			continue;
4250 		idx = MBM_STATE_IDX(eventid);
4251 		d->mbm_states[idx] = kcalloc(idx_limit, tsize, GFP_KERNEL);
4252 		if (!d->mbm_states[idx])
4253 			goto cleanup;
4254 	}
4255 
4256 	if (resctrl_is_mbm_enabled() && r->mon.mbm_cntr_assignable) {
4257 		tsize = sizeof(*d->cntr_cfg);
4258 		d->cntr_cfg = kcalloc(r->mon.num_mbm_cntrs, tsize, GFP_KERNEL);
4259 		if (!d->cntr_cfg)
4260 			goto cleanup;
4261 	}
4262 
4263 	return 0;
4264 cleanup:
4265 	bitmap_free(d->rmid_busy_llc);
4266 	for_each_mbm_idx(idx) {
4267 		kfree(d->mbm_states[idx]);
4268 		d->mbm_states[idx] = NULL;
4269 	}
4270 
4271 	return -ENOMEM;
4272 }
4273 
resctrl_online_ctrl_domain(struct rdt_resource * r,struct rdt_ctrl_domain * d)4274 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
4275 {
4276 	int err = 0;
4277 
4278 	mutex_lock(&rdtgroup_mutex);
4279 
4280 	if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) {
4281 		/* RDT_RESOURCE_MBA is never mon_capable */
4282 		err = mba_sc_domain_allocate(r, d);
4283 	}
4284 
4285 	mutex_unlock(&rdtgroup_mutex);
4286 
4287 	return err;
4288 }
4289 
resctrl_online_mon_domain(struct rdt_resource * r,struct rdt_mon_domain * d)4290 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
4291 {
4292 	int err;
4293 
4294 	mutex_lock(&rdtgroup_mutex);
4295 
4296 	err = domain_setup_mon_state(r, d);
4297 	if (err)
4298 		goto out_unlock;
4299 
4300 	if (resctrl_is_mbm_enabled()) {
4301 		INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
4302 		mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL,
4303 					   RESCTRL_PICK_ANY_CPU);
4304 	}
4305 
4306 	if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
4307 		INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
4308 
4309 	/*
4310 	 * If the filesystem is not mounted then only the default resource group
4311 	 * exists. Creation of its directories is deferred until mount time
4312 	 * by rdt_get_tree() calling mkdir_mondata_all().
4313 	 * If resctrl is mounted, add per domain monitor data directories.
4314 	 */
4315 	if (resctrl_mounted && resctrl_arch_mon_capable())
4316 		mkdir_mondata_subdir_allrdtgrp(r, d);
4317 
4318 out_unlock:
4319 	mutex_unlock(&rdtgroup_mutex);
4320 
4321 	return err;
4322 }
4323 
resctrl_online_cpu(unsigned int cpu)4324 void resctrl_online_cpu(unsigned int cpu)
4325 {
4326 	mutex_lock(&rdtgroup_mutex);
4327 	/* The CPU is set in default rdtgroup after online. */
4328 	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
4329 	mutex_unlock(&rdtgroup_mutex);
4330 }
4331 
clear_childcpus(struct rdtgroup * r,unsigned int cpu)4332 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
4333 {
4334 	struct rdtgroup *cr;
4335 
4336 	list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
4337 		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
4338 			break;
4339 	}
4340 }
4341 
get_mon_domain_from_cpu(int cpu,struct rdt_resource * r)4342 static struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu,
4343 						      struct rdt_resource *r)
4344 {
4345 	struct rdt_mon_domain *d;
4346 
4347 	lockdep_assert_cpus_held();
4348 
4349 	list_for_each_entry(d, &r->mon_domains, hdr.list) {
4350 		/* Find the domain that contains this CPU */
4351 		if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
4352 			return d;
4353 	}
4354 
4355 	return NULL;
4356 }
4357 
resctrl_offline_cpu(unsigned int cpu)4358 void resctrl_offline_cpu(unsigned int cpu)
4359 {
4360 	struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3);
4361 	struct rdt_mon_domain *d;
4362 	struct rdtgroup *rdtgrp;
4363 
4364 	mutex_lock(&rdtgroup_mutex);
4365 	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
4366 		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
4367 			clear_childcpus(rdtgrp, cpu);
4368 			break;
4369 		}
4370 	}
4371 
4372 	if (!l3->mon_capable)
4373 		goto out_unlock;
4374 
4375 	d = get_mon_domain_from_cpu(cpu, l3);
4376 	if (d) {
4377 		if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) {
4378 			cancel_delayed_work(&d->mbm_over);
4379 			mbm_setup_overflow_handler(d, 0, cpu);
4380 		}
4381 		if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) &&
4382 		    cpu == d->cqm_work_cpu && has_busy_rmid(d)) {
4383 			cancel_delayed_work(&d->cqm_limbo);
4384 			cqm_setup_limbo_handler(d, 0, cpu);
4385 		}
4386 	}
4387 
4388 out_unlock:
4389 	mutex_unlock(&rdtgroup_mutex);
4390 }
4391 
4392 /*
4393  * resctrl_init - resctrl filesystem initialization
4394  *
4395  * Setup resctrl file system including set up root, create mount point,
4396  * register resctrl filesystem, and initialize files under root directory.
4397  *
4398  * Return: 0 on success or -errno
4399  */
resctrl_init(void)4400 int resctrl_init(void)
4401 {
4402 	int ret = 0;
4403 
4404 	seq_buf_init(&last_cmd_status, last_cmd_status_buf,
4405 		     sizeof(last_cmd_status_buf));
4406 
4407 	rdtgroup_setup_default();
4408 
4409 	thread_throttle_mode_init();
4410 
4411 	ret = resctrl_mon_resource_init();
4412 	if (ret)
4413 		return ret;
4414 
4415 	ret = sysfs_create_mount_point(fs_kobj, "resctrl");
4416 	if (ret) {
4417 		resctrl_mon_resource_exit();
4418 		return ret;
4419 	}
4420 
4421 	ret = register_filesystem(&rdt_fs_type);
4422 	if (ret)
4423 		goto cleanup_mountpoint;
4424 
4425 	/*
4426 	 * Adding the resctrl debugfs directory here may not be ideal since
4427 	 * it would let the resctrl debugfs directory appear on the debugfs
4428 	 * filesystem before the resctrl filesystem is mounted.
4429 	 * It may also be ok since that would enable debugging of RDT before
4430 	 * resctrl is mounted.
4431 	 * The reason why the debugfs directory is created here and not in
4432 	 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
4433 	 * during the debugfs directory creation also &sb->s_type->i_mutex_key
4434 	 * (the lockdep class of inode->i_rwsem). Other filesystem
4435 	 * interactions (eg. SyS_getdents) have the lock ordering:
4436 	 * &sb->s_type->i_mutex_key --> &mm->mmap_lock
4437 	 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
4438 	 * is taken, thus creating dependency:
4439 	 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
4440 	 * issues considering the other two lock dependencies.
4441 	 * By creating the debugfs directory here we avoid a dependency
4442 	 * that may cause deadlock (even though file operations cannot
4443 	 * occur until the filesystem is mounted, but I do not know how to
4444 	 * tell lockdep that).
4445 	 */
4446 	debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
4447 
4448 	return 0;
4449 
4450 cleanup_mountpoint:
4451 	sysfs_remove_mount_point(fs_kobj, "resctrl");
4452 	resctrl_mon_resource_exit();
4453 
4454 	return ret;
4455 }
4456 
resctrl_online_domains_exist(void)4457 static bool resctrl_online_domains_exist(void)
4458 {
4459 	struct rdt_resource *r;
4460 
4461 	/*
4462 	 * Only walk capable resources to allow resctrl_arch_get_resource()
4463 	 * to return dummy 'not capable' resources.
4464 	 */
4465 	for_each_alloc_capable_rdt_resource(r) {
4466 		if (!list_empty(&r->ctrl_domains))
4467 			return true;
4468 	}
4469 
4470 	for_each_mon_capable_rdt_resource(r) {
4471 		if (!list_empty(&r->mon_domains))
4472 			return true;
4473 	}
4474 
4475 	return false;
4476 }
4477 
4478 /**
4479  * resctrl_exit() - Remove the resctrl filesystem and free resources.
4480  *
4481  * Called by the architecture code in response to a fatal error.
4482  * Removes resctrl files and structures from kernfs to prevent further
4483  * configuration.
4484  *
4485  * When called by the architecture code, all CPUs and resctrl domains must be
4486  * offline. This ensures the limbo and overflow handlers are not scheduled to
4487  * run, meaning the data structures they access can be freed by
4488  * resctrl_mon_resource_exit().
4489  *
4490  * After resctrl_exit() returns, the architecture code should return an
4491  * error from all resctrl_arch_ functions that can do this.
4492  * resctrl_arch_get_resource() must continue to return struct rdt_resources
4493  * with the correct rid field to ensure the filesystem can be unmounted.
4494  */
resctrl_exit(void)4495 void resctrl_exit(void)
4496 {
4497 	cpus_read_lock();
4498 	WARN_ON_ONCE(resctrl_online_domains_exist());
4499 
4500 	mutex_lock(&rdtgroup_mutex);
4501 	resctrl_fs_teardown();
4502 	mutex_unlock(&rdtgroup_mutex);
4503 
4504 	cpus_read_unlock();
4505 
4506 	debugfs_remove_recursive(debugfs_resctrl);
4507 	debugfs_resctrl = NULL;
4508 	unregister_filesystem(&rdt_fs_type);
4509 
4510 	/*
4511 	 * Do not remove the sysfs mount point added by resctrl_init() so that
4512 	 * it can be used to umount resctrl.
4513 	 */
4514 
4515 	resctrl_mon_resource_exit();
4516 }
4517