1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * User interface for Resource Allocation in Resource Director Technology(RDT)
4 *
5 * Copyright (C) 2016 Intel Corporation
6 *
7 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 *
9 * More information about RDT be found in the Intel (R) x86 Architecture
10 * Software Developer Manual.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/cpu.h>
16 #include <linux/debugfs.h>
17 #include <linux/fs.h>
18 #include <linux/fs_parser.h>
19 #include <linux/sysfs.h>
20 #include <linux/kernfs.h>
21 #include <linux/once.h>
22 #include <linux/resctrl.h>
23 #include <linux/seq_buf.h>
24 #include <linux/seq_file.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/user_namespace.h>
28
29 #include <uapi/linux/magic.h>
30
31 #include "internal.h"
32
33 /* Mutex to protect rdtgroup access. */
34 DEFINE_MUTEX(rdtgroup_mutex);
35
36 static struct kernfs_root *rdt_root;
37
38 struct rdtgroup rdtgroup_default;
39
40 LIST_HEAD(rdt_all_groups);
41
42 /* list of entries for the schemata file */
43 LIST_HEAD(resctrl_schema_all);
44
45 /*
46 * List of struct mon_data containing private data of event files for use by
47 * rdtgroup_mondata_show(). Protected by rdtgroup_mutex.
48 */
49 static LIST_HEAD(mon_data_kn_priv_list);
50
51 /* The filesystem can only be mounted once. */
52 bool resctrl_mounted;
53
54 /* Kernel fs node for "info" directory under root */
55 static struct kernfs_node *kn_info;
56
57 /* Kernel fs node for "mon_groups" directory under root */
58 static struct kernfs_node *kn_mongrp;
59
60 /* Kernel fs node for "mon_data" directory under root */
61 static struct kernfs_node *kn_mondata;
62
63 /*
64 * Used to store the max resource name width to display the schemata names in
65 * a tabular format.
66 */
67 int max_name_width;
68
69 static struct seq_buf last_cmd_status;
70
71 static char last_cmd_status_buf[512];
72
73 static int rdtgroup_setup_root(struct rdt_fs_context *ctx);
74
75 static void rdtgroup_destroy_root(void);
76
77 struct dentry *debugfs_resctrl;
78
79 /*
80 * Memory bandwidth monitoring event to use for the default CTRL_MON group
81 * and each new CTRL_MON group created by the user. Only relevant when
82 * the filesystem is mounted with the "mba_MBps" option so it does not
83 * matter that it remains uninitialized on systems that do not support
84 * the "mba_MBps" option.
85 */
86 enum resctrl_event_id mba_mbps_default_event;
87
88 static bool resctrl_debug;
89
rdt_last_cmd_clear(void)90 void rdt_last_cmd_clear(void)
91 {
92 lockdep_assert_held(&rdtgroup_mutex);
93 seq_buf_clear(&last_cmd_status);
94 }
95
rdt_last_cmd_puts(const char * s)96 void rdt_last_cmd_puts(const char *s)
97 {
98 lockdep_assert_held(&rdtgroup_mutex);
99 seq_buf_puts(&last_cmd_status, s);
100 }
101
rdt_last_cmd_printf(const char * fmt,...)102 void rdt_last_cmd_printf(const char *fmt, ...)
103 {
104 va_list ap;
105
106 va_start(ap, fmt);
107 lockdep_assert_held(&rdtgroup_mutex);
108 seq_buf_vprintf(&last_cmd_status, fmt, ap);
109 va_end(ap);
110 }
111
rdt_staged_configs_clear(void)112 void rdt_staged_configs_clear(void)
113 {
114 struct rdt_ctrl_domain *dom;
115 struct rdt_resource *r;
116
117 lockdep_assert_held(&rdtgroup_mutex);
118
119 for_each_alloc_capable_rdt_resource(r) {
120 list_for_each_entry(dom, &r->ctrl_domains, hdr.list)
121 memset(dom->staged_config, 0, sizeof(dom->staged_config));
122 }
123 }
124
resctrl_is_mbm_enabled(void)125 static bool resctrl_is_mbm_enabled(void)
126 {
127 return (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID) ||
128 resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID));
129 }
130
131 /*
132 * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap
133 * of free CLOSIDs.
134 *
135 * Using a global CLOSID across all resources has some advantages and
136 * some drawbacks:
137 * + We can simply set current's closid to assign a task to a resource
138 * group.
139 * + Context switch code can avoid extra memory references deciding which
140 * CLOSID to load into the PQR_ASSOC MSR
141 * - We give up some options in configuring resource groups across multi-socket
142 * systems.
143 * - Our choices on how to configure each resource become progressively more
144 * limited as the number of resources grows.
145 */
146 static unsigned long *closid_free_map;
147
148 static int closid_free_map_len;
149
closids_supported(void)150 int closids_supported(void)
151 {
152 return closid_free_map_len;
153 }
154
closid_init(void)155 static int closid_init(void)
156 {
157 struct resctrl_schema *s;
158 u32 rdt_min_closid = ~0;
159
160 /* Monitor only platforms still call closid_init() */
161 if (list_empty(&resctrl_schema_all))
162 return 0;
163
164 /* Compute rdt_min_closid across all resources */
165 list_for_each_entry(s, &resctrl_schema_all, list)
166 rdt_min_closid = min(rdt_min_closid, s->num_closid);
167
168 closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL);
169 if (!closid_free_map)
170 return -ENOMEM;
171 bitmap_fill(closid_free_map, rdt_min_closid);
172
173 /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */
174 __clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map);
175 closid_free_map_len = rdt_min_closid;
176
177 return 0;
178 }
179
closid_exit(void)180 static void closid_exit(void)
181 {
182 bitmap_free(closid_free_map);
183 closid_free_map = NULL;
184 }
185
closid_alloc(void)186 static int closid_alloc(void)
187 {
188 int cleanest_closid;
189 u32 closid;
190
191 lockdep_assert_held(&rdtgroup_mutex);
192
193 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
194 resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) {
195 cleanest_closid = resctrl_find_cleanest_closid();
196 if (cleanest_closid < 0)
197 return cleanest_closid;
198 closid = cleanest_closid;
199 } else {
200 closid = find_first_bit(closid_free_map, closid_free_map_len);
201 if (closid == closid_free_map_len)
202 return -ENOSPC;
203 }
204 __clear_bit(closid, closid_free_map);
205
206 return closid;
207 }
208
closid_free(int closid)209 void closid_free(int closid)
210 {
211 lockdep_assert_held(&rdtgroup_mutex);
212
213 __set_bit(closid, closid_free_map);
214 }
215
216 /**
217 * closid_allocated - test if provided closid is in use
218 * @closid: closid to be tested
219 *
220 * Return: true if @closid is currently associated with a resource group,
221 * false if @closid is free
222 */
closid_allocated(unsigned int closid)223 bool closid_allocated(unsigned int closid)
224 {
225 lockdep_assert_held(&rdtgroup_mutex);
226
227 return !test_bit(closid, closid_free_map);
228 }
229
closid_alloc_fixed(u32 closid)230 bool closid_alloc_fixed(u32 closid)
231 {
232 return __test_and_clear_bit(closid, closid_free_map);
233 }
234
235 /**
236 * rdtgroup_mode_by_closid - Return mode of resource group with closid
237 * @closid: closid if the resource group
238 *
239 * Each resource group is associated with a @closid. Here the mode
240 * of a resource group can be queried by searching for it using its closid.
241 *
242 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
243 */
rdtgroup_mode_by_closid(int closid)244 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
245 {
246 struct rdtgroup *rdtgrp;
247
248 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
249 if (rdtgrp->closid == closid)
250 return rdtgrp->mode;
251 }
252
253 return RDT_NUM_MODES;
254 }
255
256 static const char * const rdt_mode_str[] = {
257 [RDT_MODE_SHAREABLE] = "shareable",
258 [RDT_MODE_EXCLUSIVE] = "exclusive",
259 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
260 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
261 };
262
263 /**
264 * rdtgroup_mode_str - Return the string representation of mode
265 * @mode: the resource group mode as &enum rdtgroup_mode
266 *
267 * Return: string representation of valid mode, "unknown" otherwise
268 */
rdtgroup_mode_str(enum rdtgrp_mode mode)269 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
270 {
271 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
272 return "unknown";
273
274 return rdt_mode_str[mode];
275 }
276
277 /* set uid and gid of rdtgroup dirs and files to that of the creator */
rdtgroup_kn_set_ugid(struct kernfs_node * kn)278 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
279 {
280 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
281 .ia_uid = current_fsuid(),
282 .ia_gid = current_fsgid(), };
283
284 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
285 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
286 return 0;
287
288 return kernfs_setattr(kn, &iattr);
289 }
290
rdtgroup_add_file(struct kernfs_node * parent_kn,struct rftype * rft)291 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
292 {
293 struct kernfs_node *kn;
294 int ret;
295
296 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
297 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
298 0, rft->kf_ops, rft, NULL, NULL);
299 if (IS_ERR(kn))
300 return PTR_ERR(kn);
301
302 ret = rdtgroup_kn_set_ugid(kn);
303 if (ret) {
304 kernfs_remove(kn);
305 return ret;
306 }
307
308 return 0;
309 }
310
rdtgroup_seqfile_show(struct seq_file * m,void * arg)311 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
312 {
313 struct kernfs_open_file *of = m->private;
314 struct rftype *rft = of->kn->priv;
315
316 if (rft->seq_show)
317 return rft->seq_show(of, m, arg);
318 return 0;
319 }
320
rdtgroup_file_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)321 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
322 size_t nbytes, loff_t off)
323 {
324 struct rftype *rft = of->kn->priv;
325
326 if (rft->write)
327 return rft->write(of, buf, nbytes, off);
328
329 return -EINVAL;
330 }
331
332 static const struct kernfs_ops rdtgroup_kf_single_ops = {
333 .atomic_write_len = PAGE_SIZE,
334 .write = rdtgroup_file_write,
335 .seq_show = rdtgroup_seqfile_show,
336 };
337
338 static const struct kernfs_ops kf_mondata_ops = {
339 .atomic_write_len = PAGE_SIZE,
340 .seq_show = rdtgroup_mondata_show,
341 };
342
is_cpu_list(struct kernfs_open_file * of)343 static bool is_cpu_list(struct kernfs_open_file *of)
344 {
345 struct rftype *rft = of->kn->priv;
346
347 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
348 }
349
rdtgroup_cpus_show(struct kernfs_open_file * of,struct seq_file * s,void * v)350 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
351 struct seq_file *s, void *v)
352 {
353 struct rdtgroup *rdtgrp;
354 struct cpumask *mask;
355 int ret = 0;
356
357 rdtgrp = rdtgroup_kn_lock_live(of->kn);
358
359 if (rdtgrp) {
360 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
361 if (!rdtgrp->plr->d) {
362 rdt_last_cmd_clear();
363 rdt_last_cmd_puts("Cache domain offline\n");
364 ret = -ENODEV;
365 } else {
366 mask = &rdtgrp->plr->d->hdr.cpu_mask;
367 seq_printf(s, is_cpu_list(of) ?
368 "%*pbl\n" : "%*pb\n",
369 cpumask_pr_args(mask));
370 }
371 } else {
372 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
373 cpumask_pr_args(&rdtgrp->cpu_mask));
374 }
375 } else {
376 ret = -ENOENT;
377 }
378 rdtgroup_kn_unlock(of->kn);
379
380 return ret;
381 }
382
383 /*
384 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
385 *
386 * Per task closids/rmids must have been set up before calling this function.
387 * @r may be NULL.
388 */
389 static void
update_closid_rmid(const struct cpumask * cpu_mask,struct rdtgroup * r)390 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
391 {
392 struct resctrl_cpu_defaults defaults, *p = NULL;
393
394 if (r) {
395 defaults.closid = r->closid;
396 defaults.rmid = r->mon.rmid;
397 p = &defaults;
398 }
399
400 on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_closid_rmid, p, 1);
401 }
402
cpus_mon_write(struct rdtgroup * rdtgrp,cpumask_var_t newmask,cpumask_var_t tmpmask)403 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
404 cpumask_var_t tmpmask)
405 {
406 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
407 struct list_head *head;
408
409 /* Check whether cpus belong to parent ctrl group */
410 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
411 if (!cpumask_empty(tmpmask)) {
412 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
413 return -EINVAL;
414 }
415
416 /* Check whether cpus are dropped from this group */
417 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
418 if (!cpumask_empty(tmpmask)) {
419 /* Give any dropped cpus to parent rdtgroup */
420 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
421 update_closid_rmid(tmpmask, prgrp);
422 }
423
424 /*
425 * If we added cpus, remove them from previous group that owned them
426 * and update per-cpu rmid
427 */
428 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
429 if (!cpumask_empty(tmpmask)) {
430 head = &prgrp->mon.crdtgrp_list;
431 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
432 if (crgrp == rdtgrp)
433 continue;
434 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
435 tmpmask);
436 }
437 update_closid_rmid(tmpmask, rdtgrp);
438 }
439
440 /* Done pushing/pulling - update this group with new mask */
441 cpumask_copy(&rdtgrp->cpu_mask, newmask);
442
443 return 0;
444 }
445
cpumask_rdtgrp_clear(struct rdtgroup * r,struct cpumask * m)446 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
447 {
448 struct rdtgroup *crgrp;
449
450 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
451 /* update the child mon group masks as well*/
452 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
453 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
454 }
455
cpus_ctrl_write(struct rdtgroup * rdtgrp,cpumask_var_t newmask,cpumask_var_t tmpmask,cpumask_var_t tmpmask1)456 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
457 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
458 {
459 struct rdtgroup *r, *crgrp;
460 struct list_head *head;
461
462 /* Check whether cpus are dropped from this group */
463 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
464 if (!cpumask_empty(tmpmask)) {
465 /* Can't drop from default group */
466 if (rdtgrp == &rdtgroup_default) {
467 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
468 return -EINVAL;
469 }
470
471 /* Give any dropped cpus to rdtgroup_default */
472 cpumask_or(&rdtgroup_default.cpu_mask,
473 &rdtgroup_default.cpu_mask, tmpmask);
474 update_closid_rmid(tmpmask, &rdtgroup_default);
475 }
476
477 /*
478 * If we added cpus, remove them from previous group and
479 * the prev group's child groups that owned them
480 * and update per-cpu closid/rmid.
481 */
482 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
483 if (!cpumask_empty(tmpmask)) {
484 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
485 if (r == rdtgrp)
486 continue;
487 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
488 if (!cpumask_empty(tmpmask1))
489 cpumask_rdtgrp_clear(r, tmpmask1);
490 }
491 update_closid_rmid(tmpmask, rdtgrp);
492 }
493
494 /* Done pushing/pulling - update this group with new mask */
495 cpumask_copy(&rdtgrp->cpu_mask, newmask);
496
497 /*
498 * Clear child mon group masks since there is a new parent mask
499 * now and update the rmid for the cpus the child lost.
500 */
501 head = &rdtgrp->mon.crdtgrp_list;
502 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
503 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
504 update_closid_rmid(tmpmask, rdtgrp);
505 cpumask_clear(&crgrp->cpu_mask);
506 }
507
508 return 0;
509 }
510
rdtgroup_cpus_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)511 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
512 char *buf, size_t nbytes, loff_t off)
513 {
514 cpumask_var_t tmpmask, newmask, tmpmask1;
515 struct rdtgroup *rdtgrp;
516 int ret;
517
518 if (!buf)
519 return -EINVAL;
520
521 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
522 return -ENOMEM;
523 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
524 free_cpumask_var(tmpmask);
525 return -ENOMEM;
526 }
527 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
528 free_cpumask_var(tmpmask);
529 free_cpumask_var(newmask);
530 return -ENOMEM;
531 }
532
533 rdtgrp = rdtgroup_kn_lock_live(of->kn);
534 if (!rdtgrp) {
535 ret = -ENOENT;
536 goto unlock;
537 }
538
539 rdt_last_cmd_clear();
540
541 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
542 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
543 ret = -EINVAL;
544 rdt_last_cmd_puts("Pseudo-locking in progress\n");
545 goto unlock;
546 }
547
548 if (is_cpu_list(of))
549 ret = cpulist_parse(buf, newmask);
550 else
551 ret = cpumask_parse(buf, newmask);
552
553 if (ret) {
554 rdt_last_cmd_puts("Bad CPU list/mask\n");
555 goto unlock;
556 }
557
558 /* check that user didn't specify any offline cpus */
559 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
560 if (!cpumask_empty(tmpmask)) {
561 ret = -EINVAL;
562 rdt_last_cmd_puts("Can only assign online CPUs\n");
563 goto unlock;
564 }
565
566 if (rdtgrp->type == RDTCTRL_GROUP)
567 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
568 else if (rdtgrp->type == RDTMON_GROUP)
569 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
570 else
571 ret = -EINVAL;
572
573 unlock:
574 rdtgroup_kn_unlock(of->kn);
575 free_cpumask_var(tmpmask);
576 free_cpumask_var(newmask);
577 free_cpumask_var(tmpmask1);
578
579 return ret ?: nbytes;
580 }
581
582 /**
583 * rdtgroup_remove - the helper to remove resource group safely
584 * @rdtgrp: resource group to remove
585 *
586 * On resource group creation via a mkdir, an extra kernfs_node reference is
587 * taken to ensure that the rdtgroup structure remains accessible for the
588 * rdtgroup_kn_unlock() calls where it is removed.
589 *
590 * Drop the extra reference here, then free the rdtgroup structure.
591 *
592 * Return: void
593 */
rdtgroup_remove(struct rdtgroup * rdtgrp)594 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
595 {
596 kernfs_put(rdtgrp->kn);
597 kfree(rdtgrp);
598 }
599
_update_task_closid_rmid(void * task)600 static void _update_task_closid_rmid(void *task)
601 {
602 /*
603 * If the task is still current on this CPU, update PQR_ASSOC MSR.
604 * Otherwise, the MSR is updated when the task is scheduled in.
605 */
606 if (task == current)
607 resctrl_arch_sched_in(task);
608 }
609
update_task_closid_rmid(struct task_struct * t)610 static void update_task_closid_rmid(struct task_struct *t)
611 {
612 if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
613 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
614 else
615 _update_task_closid_rmid(t);
616 }
617
task_in_rdtgroup(struct task_struct * tsk,struct rdtgroup * rdtgrp)618 static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp)
619 {
620 u32 closid, rmid = rdtgrp->mon.rmid;
621
622 if (rdtgrp->type == RDTCTRL_GROUP)
623 closid = rdtgrp->closid;
624 else if (rdtgrp->type == RDTMON_GROUP)
625 closid = rdtgrp->mon.parent->closid;
626 else
627 return false;
628
629 return resctrl_arch_match_closid(tsk, closid) &&
630 resctrl_arch_match_rmid(tsk, closid, rmid);
631 }
632
__rdtgroup_move_task(struct task_struct * tsk,struct rdtgroup * rdtgrp)633 static int __rdtgroup_move_task(struct task_struct *tsk,
634 struct rdtgroup *rdtgrp)
635 {
636 /* If the task is already in rdtgrp, no need to move the task. */
637 if (task_in_rdtgroup(tsk, rdtgrp))
638 return 0;
639
640 /*
641 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
642 * updated by them.
643 *
644 * For ctrl_mon groups, move both closid and rmid.
645 * For monitor groups, can move the tasks only from
646 * their parent CTRL group.
647 */
648 if (rdtgrp->type == RDTMON_GROUP &&
649 !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) {
650 rdt_last_cmd_puts("Can't move task to different control group\n");
651 return -EINVAL;
652 }
653
654 if (rdtgrp->type == RDTMON_GROUP)
655 resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid,
656 rdtgrp->mon.rmid);
657 else
658 resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid,
659 rdtgrp->mon.rmid);
660
661 /*
662 * Ensure the task's closid and rmid are written before determining if
663 * the task is current that will decide if it will be interrupted.
664 * This pairs with the full barrier between the rq->curr update and
665 * resctrl_arch_sched_in() during context switch.
666 */
667 smp_mb();
668
669 /*
670 * By now, the task's closid and rmid are set. If the task is current
671 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
672 * group go into effect. If the task is not current, the MSR will be
673 * updated when the task is scheduled in.
674 */
675 update_task_closid_rmid(tsk);
676
677 return 0;
678 }
679
is_closid_match(struct task_struct * t,struct rdtgroup * r)680 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
681 {
682 return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) &&
683 resctrl_arch_match_closid(t, r->closid));
684 }
685
is_rmid_match(struct task_struct * t,struct rdtgroup * r)686 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
687 {
688 return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) &&
689 resctrl_arch_match_rmid(t, r->mon.parent->closid,
690 r->mon.rmid));
691 }
692
693 /**
694 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
695 * @r: Resource group
696 *
697 * Return: 1 if tasks have been assigned to @r, 0 otherwise
698 */
rdtgroup_tasks_assigned(struct rdtgroup * r)699 int rdtgroup_tasks_assigned(struct rdtgroup *r)
700 {
701 struct task_struct *p, *t;
702 int ret = 0;
703
704 lockdep_assert_held(&rdtgroup_mutex);
705
706 rcu_read_lock();
707 for_each_process_thread(p, t) {
708 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
709 ret = 1;
710 break;
711 }
712 }
713 rcu_read_unlock();
714
715 return ret;
716 }
717
rdtgroup_task_write_permission(struct task_struct * task,struct kernfs_open_file * of)718 static int rdtgroup_task_write_permission(struct task_struct *task,
719 struct kernfs_open_file *of)
720 {
721 const struct cred *tcred = get_task_cred(task);
722 const struct cred *cred = current_cred();
723 int ret = 0;
724
725 /*
726 * Even if we're attaching all tasks in the thread group, we only
727 * need to check permissions on one of them.
728 */
729 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
730 !uid_eq(cred->euid, tcred->uid) &&
731 !uid_eq(cred->euid, tcred->suid)) {
732 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
733 ret = -EPERM;
734 }
735
736 put_cred(tcred);
737 return ret;
738 }
739
rdtgroup_move_task(pid_t pid,struct rdtgroup * rdtgrp,struct kernfs_open_file * of)740 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
741 struct kernfs_open_file *of)
742 {
743 struct task_struct *tsk;
744 int ret;
745
746 rcu_read_lock();
747 if (pid) {
748 tsk = find_task_by_vpid(pid);
749 if (!tsk) {
750 rcu_read_unlock();
751 rdt_last_cmd_printf("No task %d\n", pid);
752 return -ESRCH;
753 }
754 } else {
755 tsk = current;
756 }
757
758 get_task_struct(tsk);
759 rcu_read_unlock();
760
761 ret = rdtgroup_task_write_permission(tsk, of);
762 if (!ret)
763 ret = __rdtgroup_move_task(tsk, rdtgrp);
764
765 put_task_struct(tsk);
766 return ret;
767 }
768
rdtgroup_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)769 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
770 char *buf, size_t nbytes, loff_t off)
771 {
772 struct rdtgroup *rdtgrp;
773 char *pid_str;
774 int ret = 0;
775 pid_t pid;
776
777 rdtgrp = rdtgroup_kn_lock_live(of->kn);
778 if (!rdtgrp) {
779 rdtgroup_kn_unlock(of->kn);
780 return -ENOENT;
781 }
782 rdt_last_cmd_clear();
783
784 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
785 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
786 ret = -EINVAL;
787 rdt_last_cmd_puts("Pseudo-locking in progress\n");
788 goto unlock;
789 }
790
791 while (buf && buf[0] != '\0' && buf[0] != '\n') {
792 pid_str = strim(strsep(&buf, ","));
793
794 if (kstrtoint(pid_str, 0, &pid)) {
795 rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str);
796 ret = -EINVAL;
797 break;
798 }
799
800 if (pid < 0) {
801 rdt_last_cmd_printf("Invalid pid %d\n", pid);
802 ret = -EINVAL;
803 break;
804 }
805
806 ret = rdtgroup_move_task(pid, rdtgrp, of);
807 if (ret) {
808 rdt_last_cmd_printf("Error while processing task %d\n", pid);
809 break;
810 }
811 }
812
813 unlock:
814 rdtgroup_kn_unlock(of->kn);
815
816 return ret ?: nbytes;
817 }
818
show_rdt_tasks(struct rdtgroup * r,struct seq_file * s)819 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
820 {
821 struct task_struct *p, *t;
822 pid_t pid;
823
824 rcu_read_lock();
825 for_each_process_thread(p, t) {
826 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
827 pid = task_pid_vnr(t);
828 if (pid)
829 seq_printf(s, "%d\n", pid);
830 }
831 }
832 rcu_read_unlock();
833 }
834
rdtgroup_tasks_show(struct kernfs_open_file * of,struct seq_file * s,void * v)835 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
836 struct seq_file *s, void *v)
837 {
838 struct rdtgroup *rdtgrp;
839 int ret = 0;
840
841 rdtgrp = rdtgroup_kn_lock_live(of->kn);
842 if (rdtgrp)
843 show_rdt_tasks(rdtgrp, s);
844 else
845 ret = -ENOENT;
846 rdtgroup_kn_unlock(of->kn);
847
848 return ret;
849 }
850
rdtgroup_closid_show(struct kernfs_open_file * of,struct seq_file * s,void * v)851 static int rdtgroup_closid_show(struct kernfs_open_file *of,
852 struct seq_file *s, void *v)
853 {
854 struct rdtgroup *rdtgrp;
855 int ret = 0;
856
857 rdtgrp = rdtgroup_kn_lock_live(of->kn);
858 if (rdtgrp)
859 seq_printf(s, "%u\n", rdtgrp->closid);
860 else
861 ret = -ENOENT;
862 rdtgroup_kn_unlock(of->kn);
863
864 return ret;
865 }
866
rdtgroup_rmid_show(struct kernfs_open_file * of,struct seq_file * s,void * v)867 static int rdtgroup_rmid_show(struct kernfs_open_file *of,
868 struct seq_file *s, void *v)
869 {
870 struct rdtgroup *rdtgrp;
871 int ret = 0;
872
873 rdtgrp = rdtgroup_kn_lock_live(of->kn);
874 if (rdtgrp)
875 seq_printf(s, "%u\n", rdtgrp->mon.rmid);
876 else
877 ret = -ENOENT;
878 rdtgroup_kn_unlock(of->kn);
879
880 return ret;
881 }
882
883 #ifdef CONFIG_PROC_CPU_RESCTRL
884 /*
885 * A task can only be part of one resctrl control group and of one monitor
886 * group which is associated to that control group.
887 *
888 * 1) res:
889 * mon:
890 *
891 * resctrl is not available.
892 *
893 * 2) res:/
894 * mon:
895 *
896 * Task is part of the root resctrl control group, and it is not associated
897 * to any monitor group.
898 *
899 * 3) res:/
900 * mon:mon0
901 *
902 * Task is part of the root resctrl control group and monitor group mon0.
903 *
904 * 4) res:group0
905 * mon:
906 *
907 * Task is part of resctrl control group group0, and it is not associated
908 * to any monitor group.
909 *
910 * 5) res:group0
911 * mon:mon1
912 *
913 * Task is part of resctrl control group group0 and monitor group mon1.
914 */
proc_resctrl_show(struct seq_file * s,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)915 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
916 struct pid *pid, struct task_struct *tsk)
917 {
918 struct rdtgroup *rdtg;
919 int ret = 0;
920
921 mutex_lock(&rdtgroup_mutex);
922
923 /* Return empty if resctrl has not been mounted. */
924 if (!resctrl_mounted) {
925 seq_puts(s, "res:\nmon:\n");
926 goto unlock;
927 }
928
929 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
930 struct rdtgroup *crg;
931
932 /*
933 * Task information is only relevant for shareable
934 * and exclusive groups.
935 */
936 if (rdtg->mode != RDT_MODE_SHAREABLE &&
937 rdtg->mode != RDT_MODE_EXCLUSIVE)
938 continue;
939
940 if (!resctrl_arch_match_closid(tsk, rdtg->closid))
941 continue;
942
943 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
944 rdt_kn_name(rdtg->kn));
945 seq_puts(s, "mon:");
946 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
947 mon.crdtgrp_list) {
948 if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid,
949 crg->mon.rmid))
950 continue;
951 seq_printf(s, "%s", rdt_kn_name(crg->kn));
952 break;
953 }
954 seq_putc(s, '\n');
955 goto unlock;
956 }
957 /*
958 * The above search should succeed. Otherwise return
959 * with an error.
960 */
961 ret = -ENOENT;
962 unlock:
963 mutex_unlock(&rdtgroup_mutex);
964
965 return ret;
966 }
967 #endif
968
rdt_last_cmd_status_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)969 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
970 struct seq_file *seq, void *v)
971 {
972 int len;
973
974 mutex_lock(&rdtgroup_mutex);
975 len = seq_buf_used(&last_cmd_status);
976 if (len)
977 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
978 else
979 seq_puts(seq, "ok\n");
980 mutex_unlock(&rdtgroup_mutex);
981 return 0;
982 }
983
rdt_kn_parent_priv(struct kernfs_node * kn)984 void *rdt_kn_parent_priv(struct kernfs_node *kn)
985 {
986 /*
987 * The parent pointer is only valid within RCU section since it can be
988 * replaced.
989 */
990 guard(rcu)();
991 return rcu_dereference(kn->__parent)->priv;
992 }
993
rdt_num_closids_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)994 static int rdt_num_closids_show(struct kernfs_open_file *of,
995 struct seq_file *seq, void *v)
996 {
997 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
998
999 seq_printf(seq, "%u\n", s->num_closid);
1000 return 0;
1001 }
1002
rdt_default_ctrl_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1003 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
1004 struct seq_file *seq, void *v)
1005 {
1006 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1007 struct rdt_resource *r = s->res;
1008
1009 seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r));
1010 return 0;
1011 }
1012
rdt_min_cbm_bits_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1013 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
1014 struct seq_file *seq, void *v)
1015 {
1016 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1017 struct rdt_resource *r = s->res;
1018
1019 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
1020 return 0;
1021 }
1022
rdt_shareable_bits_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1023 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
1024 struct seq_file *seq, void *v)
1025 {
1026 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1027 struct rdt_resource *r = s->res;
1028
1029 seq_printf(seq, "%x\n", r->cache.shareable_bits);
1030 return 0;
1031 }
1032
1033 /*
1034 * rdt_bit_usage_show - Display current usage of resources
1035 *
1036 * A domain is a shared resource that can now be allocated differently. Here
1037 * we display the current regions of the domain as an annotated bitmask.
1038 * For each domain of this resource its allocation bitmask
1039 * is annotated as below to indicate the current usage of the corresponding bit:
1040 * 0 - currently unused
1041 * X - currently available for sharing and used by software and hardware
1042 * H - currently used by hardware only but available for software use
1043 * S - currently used and shareable by software only
1044 * E - currently used exclusively by one resource group
1045 * P - currently pseudo-locked by one resource group
1046 */
rdt_bit_usage_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1047 static int rdt_bit_usage_show(struct kernfs_open_file *of,
1048 struct seq_file *seq, void *v)
1049 {
1050 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1051 /*
1052 * Use unsigned long even though only 32 bits are used to ensure
1053 * test_bit() is used safely.
1054 */
1055 unsigned long sw_shareable = 0, hw_shareable = 0;
1056 unsigned long exclusive = 0, pseudo_locked = 0;
1057 struct rdt_resource *r = s->res;
1058 struct rdt_ctrl_domain *dom;
1059 int i, hwb, swb, excl, psl;
1060 enum rdtgrp_mode mode;
1061 bool sep = false;
1062 u32 ctrl_val;
1063
1064 cpus_read_lock();
1065 mutex_lock(&rdtgroup_mutex);
1066 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
1067 if (sep)
1068 seq_putc(seq, ';');
1069 hw_shareable = r->cache.shareable_bits;
1070 sw_shareable = 0;
1071 exclusive = 0;
1072 seq_printf(seq, "%d=", dom->hdr.id);
1073 for (i = 0; i < closids_supported(); i++) {
1074 if (!closid_allocated(i) ||
1075 (resctrl_arch_get_io_alloc_enabled(r) &&
1076 i == resctrl_io_alloc_closid(r)))
1077 continue;
1078 ctrl_val = resctrl_arch_get_config(r, dom, i,
1079 s->conf_type);
1080 mode = rdtgroup_mode_by_closid(i);
1081 switch (mode) {
1082 case RDT_MODE_SHAREABLE:
1083 sw_shareable |= ctrl_val;
1084 break;
1085 case RDT_MODE_EXCLUSIVE:
1086 exclusive |= ctrl_val;
1087 break;
1088 case RDT_MODE_PSEUDO_LOCKSETUP:
1089 /*
1090 * RDT_MODE_PSEUDO_LOCKSETUP is possible
1091 * here but not included since the CBM
1092 * associated with this CLOSID in this mode
1093 * is not initialized and no task or cpu can be
1094 * assigned this CLOSID.
1095 */
1096 break;
1097 case RDT_MODE_PSEUDO_LOCKED:
1098 case RDT_NUM_MODES:
1099 WARN(1,
1100 "invalid mode for closid %d\n", i);
1101 break;
1102 }
1103 }
1104
1105 /*
1106 * When the "io_alloc" feature is enabled, a portion of the cache
1107 * is configured for shared use between hardware and software.
1108 * Also, when CDP is enabled the CBMs of CDP_CODE and CDP_DATA
1109 * resources are kept in sync. So, the CBMs for "io_alloc" can
1110 * be accessed through either resource.
1111 */
1112 if (resctrl_arch_get_io_alloc_enabled(r)) {
1113 ctrl_val = resctrl_arch_get_config(r, dom,
1114 resctrl_io_alloc_closid(r),
1115 s->conf_type);
1116 hw_shareable |= ctrl_val;
1117 }
1118
1119 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
1120 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
1121 hwb = test_bit(i, &hw_shareable);
1122 swb = test_bit(i, &sw_shareable);
1123 excl = test_bit(i, &exclusive);
1124 psl = test_bit(i, &pseudo_locked);
1125 if (hwb && swb)
1126 seq_putc(seq, 'X');
1127 else if (hwb && !swb)
1128 seq_putc(seq, 'H');
1129 else if (!hwb && swb)
1130 seq_putc(seq, 'S');
1131 else if (excl)
1132 seq_putc(seq, 'E');
1133 else if (psl)
1134 seq_putc(seq, 'P');
1135 else /* Unused bits remain */
1136 seq_putc(seq, '0');
1137 }
1138 sep = true;
1139 }
1140 seq_putc(seq, '\n');
1141 mutex_unlock(&rdtgroup_mutex);
1142 cpus_read_unlock();
1143 return 0;
1144 }
1145
rdt_min_bw_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1146 static int rdt_min_bw_show(struct kernfs_open_file *of,
1147 struct seq_file *seq, void *v)
1148 {
1149 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1150 struct rdt_resource *r = s->res;
1151
1152 seq_printf(seq, "%u\n", r->membw.min_bw);
1153 return 0;
1154 }
1155
rdt_num_rmids_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1156 static int rdt_num_rmids_show(struct kernfs_open_file *of,
1157 struct seq_file *seq, void *v)
1158 {
1159 struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1160
1161 seq_printf(seq, "%u\n", r->mon.num_rmid);
1162
1163 return 0;
1164 }
1165
rdt_mon_features_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1166 static int rdt_mon_features_show(struct kernfs_open_file *of,
1167 struct seq_file *seq, void *v)
1168 {
1169 struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1170 struct mon_evt *mevt;
1171
1172 for_each_mon_event(mevt) {
1173 if (mevt->rid != r->rid || !mevt->enabled)
1174 continue;
1175 seq_printf(seq, "%s\n", mevt->name);
1176 if (mevt->configurable &&
1177 !resctrl_arch_mbm_cntr_assign_enabled(r))
1178 seq_printf(seq, "%s_config\n", mevt->name);
1179 }
1180
1181 return 0;
1182 }
1183
rdt_bw_gran_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1184 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1185 struct seq_file *seq, void *v)
1186 {
1187 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1188 struct rdt_resource *r = s->res;
1189
1190 seq_printf(seq, "%u\n", r->membw.bw_gran);
1191 return 0;
1192 }
1193
rdt_delay_linear_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1194 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1195 struct seq_file *seq, void *v)
1196 {
1197 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1198 struct rdt_resource *r = s->res;
1199
1200 seq_printf(seq, "%u\n", r->membw.delay_linear);
1201 return 0;
1202 }
1203
max_threshold_occ_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1204 static int max_threshold_occ_show(struct kernfs_open_file *of,
1205 struct seq_file *seq, void *v)
1206 {
1207 seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold);
1208
1209 return 0;
1210 }
1211
rdt_thread_throttle_mode_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1212 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1213 struct seq_file *seq, void *v)
1214 {
1215 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1216 struct rdt_resource *r = s->res;
1217
1218 switch (r->membw.throttle_mode) {
1219 case THREAD_THROTTLE_PER_THREAD:
1220 seq_puts(seq, "per-thread\n");
1221 return 0;
1222 case THREAD_THROTTLE_MAX:
1223 seq_puts(seq, "max\n");
1224 return 0;
1225 case THREAD_THROTTLE_UNDEFINED:
1226 seq_puts(seq, "undefined\n");
1227 return 0;
1228 }
1229
1230 WARN_ON_ONCE(1);
1231
1232 return 0;
1233 }
1234
max_threshold_occ_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1235 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1236 char *buf, size_t nbytes, loff_t off)
1237 {
1238 unsigned int bytes;
1239 int ret;
1240
1241 ret = kstrtouint(buf, 0, &bytes);
1242 if (ret)
1243 return ret;
1244
1245 if (bytes > resctrl_rmid_realloc_limit)
1246 return -EINVAL;
1247
1248 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes);
1249
1250 return nbytes;
1251 }
1252
1253 /*
1254 * rdtgroup_mode_show - Display mode of this resource group
1255 */
rdtgroup_mode_show(struct kernfs_open_file * of,struct seq_file * s,void * v)1256 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1257 struct seq_file *s, void *v)
1258 {
1259 struct rdtgroup *rdtgrp;
1260
1261 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1262 if (!rdtgrp) {
1263 rdtgroup_kn_unlock(of->kn);
1264 return -ENOENT;
1265 }
1266
1267 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1268
1269 rdtgroup_kn_unlock(of->kn);
1270 return 0;
1271 }
1272
resctrl_peer_type(enum resctrl_conf_type my_type)1273 enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
1274 {
1275 switch (my_type) {
1276 case CDP_CODE:
1277 return CDP_DATA;
1278 case CDP_DATA:
1279 return CDP_CODE;
1280 default:
1281 case CDP_NONE:
1282 return CDP_NONE;
1283 }
1284 }
1285
rdt_has_sparse_bitmasks_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1286 static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
1287 struct seq_file *seq, void *v)
1288 {
1289 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
1290 struct rdt_resource *r = s->res;
1291
1292 seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks);
1293
1294 return 0;
1295 }
1296
1297 /**
1298 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1299 * @r: Resource to which domain instance @d belongs.
1300 * @d: The domain instance for which @closid is being tested.
1301 * @cbm: Capacity bitmask being tested.
1302 * @closid: Intended closid for @cbm.
1303 * @type: CDP type of @r.
1304 * @exclusive: Only check if overlaps with exclusive resource groups
1305 *
1306 * Checks if provided @cbm intended to be used for @closid on domain
1307 * @d overlaps with any other closids or other hardware usage associated
1308 * with this domain. If @exclusive is true then only overlaps with
1309 * resource groups in exclusive mode will be considered. If @exclusive
1310 * is false then overlaps with any resource group or hardware entities
1311 * will be considered.
1312 *
1313 * @cbm is unsigned long, even if only 32 bits are used, to make the
1314 * bitmap functions work correctly.
1315 *
1316 * Return: false if CBM does not overlap, true if it does.
1317 */
__rdtgroup_cbm_overlaps(struct rdt_resource * r,struct rdt_ctrl_domain * d,unsigned long cbm,int closid,enum resctrl_conf_type type,bool exclusive)1318 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d,
1319 unsigned long cbm, int closid,
1320 enum resctrl_conf_type type, bool exclusive)
1321 {
1322 enum rdtgrp_mode mode;
1323 unsigned long ctrl_b;
1324 int i;
1325
1326 /* Check for any overlap with regions used by hardware directly */
1327 if (!exclusive) {
1328 ctrl_b = r->cache.shareable_bits;
1329 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1330 return true;
1331 }
1332
1333 /* Check for overlap with other resource groups */
1334 for (i = 0; i < closids_supported(); i++) {
1335 ctrl_b = resctrl_arch_get_config(r, d, i, type);
1336 mode = rdtgroup_mode_by_closid(i);
1337 if (closid_allocated(i) && i != closid &&
1338 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1339 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1340 if (exclusive) {
1341 if (mode == RDT_MODE_EXCLUSIVE)
1342 return true;
1343 continue;
1344 }
1345 return true;
1346 }
1347 }
1348 }
1349
1350 return false;
1351 }
1352
1353 /**
1354 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1355 * @s: Schema for the resource to which domain instance @d belongs.
1356 * @d: The domain instance for which @closid is being tested.
1357 * @cbm: Capacity bitmask being tested.
1358 * @closid: Intended closid for @cbm.
1359 * @exclusive: Only check if overlaps with exclusive resource groups
1360 *
1361 * Resources that can be allocated using a CBM can use the CBM to control
1362 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1363 * for overlap. Overlap test is not limited to the specific resource for
1364 * which the CBM is intended though - when dealing with CDP resources that
1365 * share the underlying hardware the overlap check should be performed on
1366 * the CDP resource sharing the hardware also.
1367 *
1368 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1369 * overlap test.
1370 *
1371 * Return: true if CBM overlap detected, false if there is no overlap
1372 */
rdtgroup_cbm_overlaps(struct resctrl_schema * s,struct rdt_ctrl_domain * d,unsigned long cbm,int closid,bool exclusive)1373 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
1374 unsigned long cbm, int closid, bool exclusive)
1375 {
1376 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
1377 struct rdt_resource *r = s->res;
1378
1379 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
1380 exclusive))
1381 return true;
1382
1383 if (!resctrl_arch_get_cdp_enabled(r->rid))
1384 return false;
1385 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
1386 }
1387
1388 /**
1389 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1390 * @rdtgrp: Resource group identified through its closid.
1391 *
1392 * An exclusive resource group implies that there should be no sharing of
1393 * its allocated resources. At the time this group is considered to be
1394 * exclusive this test can determine if its current schemata supports this
1395 * setting by testing for overlap with all other resource groups.
1396 *
1397 * Return: true if resource group can be exclusive, false if there is overlap
1398 * with allocations of other resource groups and thus this resource group
1399 * cannot be exclusive.
1400 */
rdtgroup_mode_test_exclusive(struct rdtgroup * rdtgrp)1401 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1402 {
1403 int closid = rdtgrp->closid;
1404 struct rdt_ctrl_domain *d;
1405 struct resctrl_schema *s;
1406 struct rdt_resource *r;
1407 bool has_cache = false;
1408 u32 ctrl;
1409
1410 /* Walking r->domains, ensure it can't race with cpuhp */
1411 lockdep_assert_cpus_held();
1412
1413 list_for_each_entry(s, &resctrl_schema_all, list) {
1414 r = s->res;
1415 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)
1416 continue;
1417 has_cache = true;
1418 list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
1419 ctrl = resctrl_arch_get_config(r, d, closid,
1420 s->conf_type);
1421 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
1422 rdt_last_cmd_puts("Schemata overlaps\n");
1423 return false;
1424 }
1425 }
1426 }
1427
1428 if (!has_cache) {
1429 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1430 return false;
1431 }
1432
1433 return true;
1434 }
1435
1436 /*
1437 * rdtgroup_mode_write - Modify the resource group's mode
1438 */
rdtgroup_mode_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1439 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1440 char *buf, size_t nbytes, loff_t off)
1441 {
1442 struct rdtgroup *rdtgrp;
1443 enum rdtgrp_mode mode;
1444 int ret = 0;
1445
1446 /* Valid input requires a trailing newline */
1447 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1448 return -EINVAL;
1449 buf[nbytes - 1] = '\0';
1450
1451 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1452 if (!rdtgrp) {
1453 rdtgroup_kn_unlock(of->kn);
1454 return -ENOENT;
1455 }
1456
1457 rdt_last_cmd_clear();
1458
1459 mode = rdtgrp->mode;
1460
1461 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1462 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1463 (!strcmp(buf, "pseudo-locksetup") &&
1464 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1465 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1466 goto out;
1467
1468 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1469 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1470 ret = -EINVAL;
1471 goto out;
1472 }
1473
1474 if (!strcmp(buf, "shareable")) {
1475 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1476 ret = rdtgroup_locksetup_exit(rdtgrp);
1477 if (ret)
1478 goto out;
1479 }
1480 rdtgrp->mode = RDT_MODE_SHAREABLE;
1481 } else if (!strcmp(buf, "exclusive")) {
1482 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1483 ret = -EINVAL;
1484 goto out;
1485 }
1486 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1487 ret = rdtgroup_locksetup_exit(rdtgrp);
1488 if (ret)
1489 goto out;
1490 }
1491 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1492 } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) &&
1493 !strcmp(buf, "pseudo-locksetup")) {
1494 ret = rdtgroup_locksetup_enter(rdtgrp);
1495 if (ret)
1496 goto out;
1497 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1498 } else {
1499 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1500 ret = -EINVAL;
1501 }
1502
1503 out:
1504 rdtgroup_kn_unlock(of->kn);
1505 return ret ?: nbytes;
1506 }
1507
1508 /**
1509 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1510 * @r: RDT resource to which @d belongs.
1511 * @d: RDT domain instance.
1512 * @cbm: bitmask for which the size should be computed.
1513 *
1514 * The bitmask provided associated with the RDT domain instance @d will be
1515 * translated into how many bytes it represents. The size in bytes is
1516 * computed by first dividing the total cache size by the CBM length to
1517 * determine how many bytes each bit in the bitmask represents. The result
1518 * is multiplied with the number of bits set in the bitmask.
1519 *
1520 * @cbm is unsigned long, even if only 32 bits are used to make the
1521 * bitmap functions work correctly.
1522 */
rdtgroup_cbm_to_size(struct rdt_resource * r,struct rdt_ctrl_domain * d,unsigned long cbm)1523 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1524 struct rdt_ctrl_domain *d, unsigned long cbm)
1525 {
1526 unsigned int size = 0;
1527 struct cacheinfo *ci;
1528 int num_b;
1529
1530 if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE))
1531 return size;
1532
1533 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1534 ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope);
1535 if (ci)
1536 size = ci->size / r->cache.cbm_len * num_b;
1537
1538 return size;
1539 }
1540
is_mba_sc(struct rdt_resource * r)1541 bool is_mba_sc(struct rdt_resource *r)
1542 {
1543 if (!r)
1544 r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
1545
1546 /*
1547 * The software controller support is only applicable to MBA resource.
1548 * Make sure to check for resource type.
1549 */
1550 if (r->rid != RDT_RESOURCE_MBA)
1551 return false;
1552
1553 return r->membw.mba_sc;
1554 }
1555
1556 /*
1557 * rdtgroup_size_show - Display size in bytes of allocated regions
1558 *
1559 * The "size" file mirrors the layout of the "schemata" file, printing the
1560 * size in bytes of each region instead of the capacity bitmask.
1561 */
rdtgroup_size_show(struct kernfs_open_file * of,struct seq_file * s,void * v)1562 static int rdtgroup_size_show(struct kernfs_open_file *of,
1563 struct seq_file *s, void *v)
1564 {
1565 struct resctrl_schema *schema;
1566 enum resctrl_conf_type type;
1567 struct rdt_ctrl_domain *d;
1568 struct rdtgroup *rdtgrp;
1569 struct rdt_resource *r;
1570 unsigned int size;
1571 int ret = 0;
1572 u32 closid;
1573 bool sep;
1574 u32 ctrl;
1575
1576 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1577 if (!rdtgrp) {
1578 rdtgroup_kn_unlock(of->kn);
1579 return -ENOENT;
1580 }
1581
1582 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1583 if (!rdtgrp->plr->d) {
1584 rdt_last_cmd_clear();
1585 rdt_last_cmd_puts("Cache domain offline\n");
1586 ret = -ENODEV;
1587 } else {
1588 seq_printf(s, "%*s:", max_name_width,
1589 rdtgrp->plr->s->name);
1590 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
1591 rdtgrp->plr->d,
1592 rdtgrp->plr->cbm);
1593 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size);
1594 }
1595 goto out;
1596 }
1597
1598 closid = rdtgrp->closid;
1599
1600 list_for_each_entry(schema, &resctrl_schema_all, list) {
1601 r = schema->res;
1602 type = schema->conf_type;
1603 sep = false;
1604 seq_printf(s, "%*s:", max_name_width, schema->name);
1605 list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
1606 if (sep)
1607 seq_putc(s, ';');
1608 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1609 size = 0;
1610 } else {
1611 if (is_mba_sc(r))
1612 ctrl = d->mbps_val[closid];
1613 else
1614 ctrl = resctrl_arch_get_config(r, d,
1615 closid,
1616 type);
1617 if (r->rid == RDT_RESOURCE_MBA ||
1618 r->rid == RDT_RESOURCE_SMBA)
1619 size = ctrl;
1620 else
1621 size = rdtgroup_cbm_to_size(r, d, ctrl);
1622 }
1623 seq_printf(s, "%d=%u", d->hdr.id, size);
1624 sep = true;
1625 }
1626 seq_putc(s, '\n');
1627 }
1628
1629 out:
1630 rdtgroup_kn_unlock(of->kn);
1631
1632 return ret;
1633 }
1634
mondata_config_read(struct resctrl_mon_config_info * mon_info)1635 static void mondata_config_read(struct resctrl_mon_config_info *mon_info)
1636 {
1637 smp_call_function_any(&mon_info->d->hdr.cpu_mask,
1638 resctrl_arch_mon_event_config_read, mon_info, 1);
1639 }
1640
mbm_config_show(struct seq_file * s,struct rdt_resource * r,u32 evtid)1641 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
1642 {
1643 struct resctrl_mon_config_info mon_info;
1644 struct rdt_l3_mon_domain *dom;
1645 bool sep = false;
1646
1647 cpus_read_lock();
1648 mutex_lock(&rdtgroup_mutex);
1649
1650 list_for_each_entry(dom, &r->mon_domains, hdr.list) {
1651 if (sep)
1652 seq_puts(s, ";");
1653
1654 memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info));
1655 mon_info.r = r;
1656 mon_info.d = dom;
1657 mon_info.evtid = evtid;
1658 mondata_config_read(&mon_info);
1659
1660 seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config);
1661 sep = true;
1662 }
1663 seq_puts(s, "\n");
1664
1665 mutex_unlock(&rdtgroup_mutex);
1666 cpus_read_unlock();
1667
1668 return 0;
1669 }
1670
mbm_total_bytes_config_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1671 static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
1672 struct seq_file *seq, void *v)
1673 {
1674 struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1675
1676 mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID);
1677
1678 return 0;
1679 }
1680
mbm_local_bytes_config_show(struct kernfs_open_file * of,struct seq_file * seq,void * v)1681 static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
1682 struct seq_file *seq, void *v)
1683 {
1684 struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1685
1686 mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID);
1687
1688 return 0;
1689 }
1690
mbm_config_write_domain(struct rdt_resource * r,struct rdt_l3_mon_domain * d,u32 evtid,u32 val)1691 static void mbm_config_write_domain(struct rdt_resource *r,
1692 struct rdt_l3_mon_domain *d, u32 evtid, u32 val)
1693 {
1694 struct resctrl_mon_config_info mon_info = {0};
1695
1696 /*
1697 * Read the current config value first. If both are the same then
1698 * no need to write it again.
1699 */
1700 mon_info.r = r;
1701 mon_info.d = d;
1702 mon_info.evtid = evtid;
1703 mondata_config_read(&mon_info);
1704 if (mon_info.mon_config == val)
1705 return;
1706
1707 mon_info.mon_config = val;
1708
1709 /*
1710 * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the
1711 * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE
1712 * are scoped at the domain level. Writing any of these MSRs
1713 * on one CPU is observed by all the CPUs in the domain.
1714 */
1715 smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write,
1716 &mon_info, 1);
1717
1718 /*
1719 * When an Event Configuration is changed, the bandwidth counters
1720 * for all RMIDs and Events will be cleared by the hardware. The
1721 * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for
1722 * every RMID on the next read to any event for every RMID.
1723 * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62)
1724 * cleared while it is tracked by the hardware. Clear the
1725 * mbm_local and mbm_total counts for all the RMIDs.
1726 */
1727 resctrl_arch_reset_rmid_all(r, d);
1728 }
1729
mon_config_write(struct rdt_resource * r,char * tok,u32 evtid)1730 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
1731 {
1732 char *dom_str = NULL, *id_str;
1733 struct rdt_l3_mon_domain *d;
1734 unsigned long dom_id, val;
1735
1736 /* Walking r->domains, ensure it can't race with cpuhp */
1737 lockdep_assert_cpus_held();
1738
1739 next:
1740 if (!tok || tok[0] == '\0')
1741 return 0;
1742
1743 /* Start processing the strings for each domain */
1744 dom_str = strim(strsep(&tok, ";"));
1745 id_str = strsep(&dom_str, "=");
1746
1747 if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1748 rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n");
1749 return -EINVAL;
1750 }
1751
1752 if (!dom_str || kstrtoul(dom_str, 16, &val)) {
1753 rdt_last_cmd_puts("Non-numeric event configuration value\n");
1754 return -EINVAL;
1755 }
1756
1757 /* Value from user cannot be more than the supported set of events */
1758 if ((val & r->mon.mbm_cfg_mask) != val) {
1759 rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
1760 r->mon.mbm_cfg_mask);
1761 return -EINVAL;
1762 }
1763
1764 list_for_each_entry(d, &r->mon_domains, hdr.list) {
1765 if (d->hdr.id == dom_id) {
1766 mbm_config_write_domain(r, d, evtid, val);
1767 goto next;
1768 }
1769 }
1770
1771 return -EINVAL;
1772 }
1773
mbm_total_bytes_config_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1774 static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
1775 char *buf, size_t nbytes,
1776 loff_t off)
1777 {
1778 struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1779 int ret;
1780
1781 /* Valid input requires a trailing newline */
1782 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1783 return -EINVAL;
1784
1785 cpus_read_lock();
1786 mutex_lock(&rdtgroup_mutex);
1787
1788 rdt_last_cmd_clear();
1789
1790 buf[nbytes - 1] = '\0';
1791
1792 ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID);
1793
1794 mutex_unlock(&rdtgroup_mutex);
1795 cpus_read_unlock();
1796
1797 return ret ?: nbytes;
1798 }
1799
mbm_local_bytes_config_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1800 static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
1801 char *buf, size_t nbytes,
1802 loff_t off)
1803 {
1804 struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
1805 int ret;
1806
1807 /* Valid input requires a trailing newline */
1808 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1809 return -EINVAL;
1810
1811 cpus_read_lock();
1812 mutex_lock(&rdtgroup_mutex);
1813
1814 rdt_last_cmd_clear();
1815
1816 buf[nbytes - 1] = '\0';
1817
1818 ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID);
1819
1820 mutex_unlock(&rdtgroup_mutex);
1821 cpus_read_unlock();
1822
1823 return ret ?: nbytes;
1824 }
1825
1826 /*
1827 * resctrl_bmec_files_show() — Controls the visibility of BMEC-related resctrl
1828 * files. When @show is true, the files are displayed; when false, the files
1829 * are hidden.
1830 * Don't treat kernfs_find_and_get failure as an error, since this function may
1831 * be called regardless of whether BMEC is supported or the event is enabled.
1832 */
resctrl_bmec_files_show(struct rdt_resource * r,struct kernfs_node * l3_mon_kn,bool show)1833 void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_kn,
1834 bool show)
1835 {
1836 struct kernfs_node *kn_config, *mon_kn = NULL;
1837 char name[32];
1838
1839 if (!l3_mon_kn) {
1840 sprintf(name, "%s_MON", r->name);
1841 mon_kn = kernfs_find_and_get(kn_info, name);
1842 if (!mon_kn)
1843 return;
1844 l3_mon_kn = mon_kn;
1845 }
1846
1847 kn_config = kernfs_find_and_get(l3_mon_kn, "mbm_total_bytes_config");
1848 if (kn_config) {
1849 kernfs_show(kn_config, show);
1850 kernfs_put(kn_config);
1851 }
1852
1853 kn_config = kernfs_find_and_get(l3_mon_kn, "mbm_local_bytes_config");
1854 if (kn_config) {
1855 kernfs_show(kn_config, show);
1856 kernfs_put(kn_config);
1857 }
1858
1859 /* Release the reference only if it was acquired */
1860 if (mon_kn)
1861 kernfs_put(mon_kn);
1862 }
1863
rdtgroup_name_by_closid(u32 closid)1864 const char *rdtgroup_name_by_closid(u32 closid)
1865 {
1866 struct rdtgroup *rdtgrp;
1867
1868 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
1869 if (rdtgrp->closid == closid)
1870 return rdt_kn_name(rdtgrp->kn);
1871 }
1872
1873 return NULL;
1874 }
1875
1876 /* rdtgroup information files for one cache resource. */
1877 static struct rftype res_common_files[] = {
1878 {
1879 .name = "last_cmd_status",
1880 .mode = 0444,
1881 .kf_ops = &rdtgroup_kf_single_ops,
1882 .seq_show = rdt_last_cmd_status_show,
1883 .fflags = RFTYPE_TOP_INFO,
1884 },
1885 {
1886 .name = "mbm_assign_on_mkdir",
1887 .mode = 0644,
1888 .kf_ops = &rdtgroup_kf_single_ops,
1889 .seq_show = resctrl_mbm_assign_on_mkdir_show,
1890 .write = resctrl_mbm_assign_on_mkdir_write,
1891 },
1892 {
1893 .name = "num_closids",
1894 .mode = 0444,
1895 .kf_ops = &rdtgroup_kf_single_ops,
1896 .seq_show = rdt_num_closids_show,
1897 .fflags = RFTYPE_CTRL_INFO,
1898 },
1899 {
1900 .name = "mon_features",
1901 .mode = 0444,
1902 .kf_ops = &rdtgroup_kf_single_ops,
1903 .seq_show = rdt_mon_features_show,
1904 .fflags = RFTYPE_MON_INFO,
1905 },
1906 {
1907 .name = "available_mbm_cntrs",
1908 .mode = 0444,
1909 .kf_ops = &rdtgroup_kf_single_ops,
1910 .seq_show = resctrl_available_mbm_cntrs_show,
1911 },
1912 {
1913 .name = "num_rmids",
1914 .mode = 0444,
1915 .kf_ops = &rdtgroup_kf_single_ops,
1916 .seq_show = rdt_num_rmids_show,
1917 .fflags = RFTYPE_MON_INFO,
1918 },
1919 {
1920 .name = "cbm_mask",
1921 .mode = 0444,
1922 .kf_ops = &rdtgroup_kf_single_ops,
1923 .seq_show = rdt_default_ctrl_show,
1924 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1925 },
1926 {
1927 .name = "num_mbm_cntrs",
1928 .mode = 0444,
1929 .kf_ops = &rdtgroup_kf_single_ops,
1930 .seq_show = resctrl_num_mbm_cntrs_show,
1931 },
1932 {
1933 .name = "min_cbm_bits",
1934 .mode = 0444,
1935 .kf_ops = &rdtgroup_kf_single_ops,
1936 .seq_show = rdt_min_cbm_bits_show,
1937 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1938 },
1939 {
1940 .name = "shareable_bits",
1941 .mode = 0444,
1942 .kf_ops = &rdtgroup_kf_single_ops,
1943 .seq_show = rdt_shareable_bits_show,
1944 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1945 },
1946 {
1947 .name = "bit_usage",
1948 .mode = 0444,
1949 .kf_ops = &rdtgroup_kf_single_ops,
1950 .seq_show = rdt_bit_usage_show,
1951 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1952 },
1953 {
1954 .name = "min_bandwidth",
1955 .mode = 0444,
1956 .kf_ops = &rdtgroup_kf_single_ops,
1957 .seq_show = rdt_min_bw_show,
1958 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1959 },
1960 {
1961 .name = "bandwidth_gran",
1962 .mode = 0444,
1963 .kf_ops = &rdtgroup_kf_single_ops,
1964 .seq_show = rdt_bw_gran_show,
1965 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1966 },
1967 {
1968 .name = "delay_linear",
1969 .mode = 0444,
1970 .kf_ops = &rdtgroup_kf_single_ops,
1971 .seq_show = rdt_delay_linear_show,
1972 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1973 },
1974 /*
1975 * Platform specific which (if any) capabilities are provided by
1976 * thread_throttle_mode. Defer "fflags" initialization to platform
1977 * discovery.
1978 */
1979 {
1980 .name = "thread_throttle_mode",
1981 .mode = 0444,
1982 .kf_ops = &rdtgroup_kf_single_ops,
1983 .seq_show = rdt_thread_throttle_mode_show,
1984 },
1985 {
1986 .name = "io_alloc",
1987 .mode = 0644,
1988 .kf_ops = &rdtgroup_kf_single_ops,
1989 .seq_show = resctrl_io_alloc_show,
1990 .write = resctrl_io_alloc_write,
1991 },
1992 {
1993 .name = "io_alloc_cbm",
1994 .mode = 0644,
1995 .kf_ops = &rdtgroup_kf_single_ops,
1996 .seq_show = resctrl_io_alloc_cbm_show,
1997 .write = resctrl_io_alloc_cbm_write,
1998 },
1999 {
2000 .name = "max_threshold_occupancy",
2001 .mode = 0644,
2002 .kf_ops = &rdtgroup_kf_single_ops,
2003 .write = max_threshold_occ_write,
2004 .seq_show = max_threshold_occ_show,
2005 .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
2006 },
2007 {
2008 .name = "mbm_total_bytes_config",
2009 .mode = 0644,
2010 .kf_ops = &rdtgroup_kf_single_ops,
2011 .seq_show = mbm_total_bytes_config_show,
2012 .write = mbm_total_bytes_config_write,
2013 },
2014 {
2015 .name = "mbm_local_bytes_config",
2016 .mode = 0644,
2017 .kf_ops = &rdtgroup_kf_single_ops,
2018 .seq_show = mbm_local_bytes_config_show,
2019 .write = mbm_local_bytes_config_write,
2020 },
2021 {
2022 .name = "event_filter",
2023 .mode = 0644,
2024 .kf_ops = &rdtgroup_kf_single_ops,
2025 .seq_show = event_filter_show,
2026 .write = event_filter_write,
2027 },
2028 {
2029 .name = "mbm_L3_assignments",
2030 .mode = 0644,
2031 .kf_ops = &rdtgroup_kf_single_ops,
2032 .seq_show = mbm_L3_assignments_show,
2033 .write = mbm_L3_assignments_write,
2034 },
2035 {
2036 .name = "mbm_assign_mode",
2037 .mode = 0644,
2038 .kf_ops = &rdtgroup_kf_single_ops,
2039 .seq_show = resctrl_mbm_assign_mode_show,
2040 .write = resctrl_mbm_assign_mode_write,
2041 .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
2042 },
2043 {
2044 .name = "cpus",
2045 .mode = 0644,
2046 .kf_ops = &rdtgroup_kf_single_ops,
2047 .write = rdtgroup_cpus_write,
2048 .seq_show = rdtgroup_cpus_show,
2049 .fflags = RFTYPE_BASE,
2050 },
2051 {
2052 .name = "cpus_list",
2053 .mode = 0644,
2054 .kf_ops = &rdtgroup_kf_single_ops,
2055 .write = rdtgroup_cpus_write,
2056 .seq_show = rdtgroup_cpus_show,
2057 .flags = RFTYPE_FLAGS_CPUS_LIST,
2058 .fflags = RFTYPE_BASE,
2059 },
2060 {
2061 .name = "tasks",
2062 .mode = 0644,
2063 .kf_ops = &rdtgroup_kf_single_ops,
2064 .write = rdtgroup_tasks_write,
2065 .seq_show = rdtgroup_tasks_show,
2066 .fflags = RFTYPE_BASE,
2067 },
2068 {
2069 .name = "mon_hw_id",
2070 .mode = 0444,
2071 .kf_ops = &rdtgroup_kf_single_ops,
2072 .seq_show = rdtgroup_rmid_show,
2073 .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG,
2074 },
2075 {
2076 .name = "schemata",
2077 .mode = 0644,
2078 .kf_ops = &rdtgroup_kf_single_ops,
2079 .write = rdtgroup_schemata_write,
2080 .seq_show = rdtgroup_schemata_show,
2081 .fflags = RFTYPE_CTRL_BASE,
2082 },
2083 {
2084 .name = "mba_MBps_event",
2085 .mode = 0644,
2086 .kf_ops = &rdtgroup_kf_single_ops,
2087 .write = rdtgroup_mba_mbps_event_write,
2088 .seq_show = rdtgroup_mba_mbps_event_show,
2089 },
2090 {
2091 .name = "mode",
2092 .mode = 0644,
2093 .kf_ops = &rdtgroup_kf_single_ops,
2094 .write = rdtgroup_mode_write,
2095 .seq_show = rdtgroup_mode_show,
2096 .fflags = RFTYPE_CTRL_BASE,
2097 },
2098 {
2099 .name = "size",
2100 .mode = 0444,
2101 .kf_ops = &rdtgroup_kf_single_ops,
2102 .seq_show = rdtgroup_size_show,
2103 .fflags = RFTYPE_CTRL_BASE,
2104 },
2105 {
2106 .name = "sparse_masks",
2107 .mode = 0444,
2108 .kf_ops = &rdtgroup_kf_single_ops,
2109 .seq_show = rdt_has_sparse_bitmasks_show,
2110 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
2111 },
2112 {
2113 .name = "ctrl_hw_id",
2114 .mode = 0444,
2115 .kf_ops = &rdtgroup_kf_single_ops,
2116 .seq_show = rdtgroup_closid_show,
2117 .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG,
2118 },
2119 };
2120
rdtgroup_add_files(struct kernfs_node * kn,unsigned long fflags)2121 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
2122 {
2123 struct rftype *rfts, *rft;
2124 int ret, len;
2125
2126 rfts = res_common_files;
2127 len = ARRAY_SIZE(res_common_files);
2128
2129 lockdep_assert_held(&rdtgroup_mutex);
2130
2131 if (resctrl_debug)
2132 fflags |= RFTYPE_DEBUG;
2133
2134 for (rft = rfts; rft < rfts + len; rft++) {
2135 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
2136 ret = rdtgroup_add_file(kn, rft);
2137 if (ret)
2138 goto error;
2139 }
2140 }
2141
2142 return 0;
2143 error:
2144 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
2145 while (--rft >= rfts) {
2146 if ((fflags & rft->fflags) == rft->fflags)
2147 kernfs_remove_by_name(kn, rft->name);
2148 }
2149 return ret;
2150 }
2151
rdtgroup_get_rftype_by_name(const char * name)2152 static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
2153 {
2154 struct rftype *rfts, *rft;
2155 int len;
2156
2157 rfts = res_common_files;
2158 len = ARRAY_SIZE(res_common_files);
2159
2160 for (rft = rfts; rft < rfts + len; rft++) {
2161 if (!strcmp(rft->name, name))
2162 return rft;
2163 }
2164
2165 return NULL;
2166 }
2167
thread_throttle_mode_init(void)2168 static void thread_throttle_mode_init(void)
2169 {
2170 enum membw_throttle_mode throttle_mode = THREAD_THROTTLE_UNDEFINED;
2171 struct rdt_resource *r_mba, *r_smba;
2172
2173 r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
2174 if (r_mba->alloc_capable &&
2175 r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
2176 throttle_mode = r_mba->membw.throttle_mode;
2177
2178 r_smba = resctrl_arch_get_resource(RDT_RESOURCE_SMBA);
2179 if (r_smba->alloc_capable &&
2180 r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
2181 throttle_mode = r_smba->membw.throttle_mode;
2182
2183 if (throttle_mode == THREAD_THROTTLE_UNDEFINED)
2184 return;
2185
2186 resctrl_file_fflags_init("thread_throttle_mode",
2187 RFTYPE_CTRL_INFO | RFTYPE_RES_MB);
2188 }
2189
2190 /*
2191 * The resctrl file "io_alloc" is added using L3 resource. However, it results
2192 * in this file being visible for *all* cache resources (eg. L2 cache),
2193 * whether it supports "io_alloc" or not.
2194 */
io_alloc_init(void)2195 static void io_alloc_init(void)
2196 {
2197 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
2198
2199 if (r->cache.io_alloc_capable) {
2200 resctrl_file_fflags_init("io_alloc", RFTYPE_CTRL_INFO |
2201 RFTYPE_RES_CACHE);
2202 resctrl_file_fflags_init("io_alloc_cbm",
2203 RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
2204 }
2205 }
2206
resctrl_file_fflags_init(const char * config,unsigned long fflags)2207 void resctrl_file_fflags_init(const char *config, unsigned long fflags)
2208 {
2209 struct rftype *rft;
2210
2211 rft = rdtgroup_get_rftype_by_name(config);
2212 if (rft)
2213 rft->fflags = fflags;
2214 }
2215
2216 /**
2217 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
2218 * @r: The resource group with which the file is associated.
2219 * @name: Name of the file
2220 *
2221 * The permissions of named resctrl file, directory, or link are modified
2222 * to not allow read, write, or execute by any user.
2223 *
2224 * WARNING: This function is intended to communicate to the user that the
2225 * resctrl file has been locked down - that it is not relevant to the
2226 * particular state the system finds itself in. It should not be relied
2227 * on to protect from user access because after the file's permissions
2228 * are restricted the user can still change the permissions using chmod
2229 * from the command line.
2230 *
2231 * Return: 0 on success, <0 on failure.
2232 */
rdtgroup_kn_mode_restrict(struct rdtgroup * r,const char * name)2233 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
2234 {
2235 struct iattr iattr = {.ia_valid = ATTR_MODE,};
2236 struct kernfs_node *kn;
2237 int ret = 0;
2238
2239 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
2240 if (!kn)
2241 return -ENOENT;
2242
2243 switch (kernfs_type(kn)) {
2244 case KERNFS_DIR:
2245 iattr.ia_mode = S_IFDIR;
2246 break;
2247 case KERNFS_FILE:
2248 iattr.ia_mode = S_IFREG;
2249 break;
2250 case KERNFS_LINK:
2251 iattr.ia_mode = S_IFLNK;
2252 break;
2253 }
2254
2255 ret = kernfs_setattr(kn, &iattr);
2256 kernfs_put(kn);
2257 return ret;
2258 }
2259
2260 /**
2261 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
2262 * @r: The resource group with which the file is associated.
2263 * @name: Name of the file
2264 * @mask: Mask of permissions that should be restored
2265 *
2266 * Restore the permissions of the named file. If @name is a directory the
2267 * permissions of its parent will be used.
2268 *
2269 * Return: 0 on success, <0 on failure.
2270 */
rdtgroup_kn_mode_restore(struct rdtgroup * r,const char * name,umode_t mask)2271 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
2272 umode_t mask)
2273 {
2274 struct iattr iattr = {.ia_valid = ATTR_MODE,};
2275 struct kernfs_node *kn, *parent;
2276 struct rftype *rfts, *rft;
2277 int ret, len;
2278
2279 rfts = res_common_files;
2280 len = ARRAY_SIZE(res_common_files);
2281
2282 for (rft = rfts; rft < rfts + len; rft++) {
2283 if (!strcmp(rft->name, name))
2284 iattr.ia_mode = rft->mode & mask;
2285 }
2286
2287 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
2288 if (!kn)
2289 return -ENOENT;
2290
2291 switch (kernfs_type(kn)) {
2292 case KERNFS_DIR:
2293 parent = kernfs_get_parent(kn);
2294 if (parent) {
2295 iattr.ia_mode |= parent->mode;
2296 kernfs_put(parent);
2297 }
2298 iattr.ia_mode |= S_IFDIR;
2299 break;
2300 case KERNFS_FILE:
2301 iattr.ia_mode |= S_IFREG;
2302 break;
2303 case KERNFS_LINK:
2304 iattr.ia_mode |= S_IFLNK;
2305 break;
2306 }
2307
2308 ret = kernfs_setattr(kn, &iattr);
2309 kernfs_put(kn);
2310 return ret;
2311 }
2312
resctrl_mkdir_event_configs(struct rdt_resource * r,struct kernfs_node * l3_mon_kn)2313 static int resctrl_mkdir_event_configs(struct rdt_resource *r, struct kernfs_node *l3_mon_kn)
2314 {
2315 struct kernfs_node *kn_subdir, *kn_subdir2;
2316 struct mon_evt *mevt;
2317 int ret;
2318
2319 kn_subdir = kernfs_create_dir(l3_mon_kn, "event_configs", l3_mon_kn->mode, NULL);
2320 if (IS_ERR(kn_subdir))
2321 return PTR_ERR(kn_subdir);
2322
2323 ret = rdtgroup_kn_set_ugid(kn_subdir);
2324 if (ret)
2325 return ret;
2326
2327 for_each_mon_event(mevt) {
2328 if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid))
2329 continue;
2330
2331 kn_subdir2 = kernfs_create_dir(kn_subdir, mevt->name, kn_subdir->mode, mevt);
2332 if (IS_ERR(kn_subdir2)) {
2333 ret = PTR_ERR(kn_subdir2);
2334 goto out;
2335 }
2336
2337 ret = rdtgroup_kn_set_ugid(kn_subdir2);
2338 if (ret)
2339 goto out;
2340
2341 ret = rdtgroup_add_files(kn_subdir2, RFTYPE_ASSIGN_CONFIG);
2342 if (ret)
2343 break;
2344 }
2345
2346 out:
2347 return ret;
2348 }
2349
rdtgroup_mkdir_info_resdir(void * priv,char * name,unsigned long fflags)2350 static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
2351 unsigned long fflags)
2352 {
2353 struct kernfs_node *kn_subdir;
2354 struct rdt_resource *r;
2355 int ret;
2356
2357 kn_subdir = kernfs_create_dir(kn_info, name,
2358 kn_info->mode, priv);
2359 if (IS_ERR(kn_subdir))
2360 return PTR_ERR(kn_subdir);
2361
2362 ret = rdtgroup_kn_set_ugid(kn_subdir);
2363 if (ret)
2364 return ret;
2365
2366 ret = rdtgroup_add_files(kn_subdir, fflags);
2367 if (ret)
2368 return ret;
2369
2370 if ((fflags & RFTYPE_MON_INFO) == RFTYPE_MON_INFO) {
2371 r = priv;
2372 if (r->mon.mbm_cntr_assignable) {
2373 ret = resctrl_mkdir_event_configs(r, kn_subdir);
2374 if (ret)
2375 return ret;
2376 /*
2377 * Hide BMEC related files if mbm_event mode
2378 * is enabled.
2379 */
2380 if (resctrl_arch_mbm_cntr_assign_enabled(r))
2381 resctrl_bmec_files_show(r, kn_subdir, false);
2382 }
2383 }
2384
2385 kernfs_activate(kn_subdir);
2386
2387 return ret;
2388 }
2389
fflags_from_resource(struct rdt_resource * r)2390 static unsigned long fflags_from_resource(struct rdt_resource *r)
2391 {
2392 switch (r->rid) {
2393 case RDT_RESOURCE_L3:
2394 case RDT_RESOURCE_L2:
2395 return RFTYPE_RES_CACHE;
2396 case RDT_RESOURCE_MBA:
2397 case RDT_RESOURCE_SMBA:
2398 return RFTYPE_RES_MB;
2399 case RDT_RESOURCE_PERF_PKG:
2400 return RFTYPE_RES_PERF_PKG;
2401 }
2402
2403 return WARN_ON_ONCE(1);
2404 }
2405
rdtgroup_create_info_dir(struct kernfs_node * parent_kn)2406 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
2407 {
2408 struct resctrl_schema *s;
2409 struct rdt_resource *r;
2410 unsigned long fflags;
2411 char name[32];
2412 int ret;
2413
2414 /* create the directory */
2415 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
2416 if (IS_ERR(kn_info))
2417 return PTR_ERR(kn_info);
2418
2419 ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO);
2420 if (ret)
2421 goto out_destroy;
2422
2423 /* loop over enabled controls, these are all alloc_capable */
2424 list_for_each_entry(s, &resctrl_schema_all, list) {
2425 r = s->res;
2426 fflags = fflags_from_resource(r) | RFTYPE_CTRL_INFO;
2427 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
2428 if (ret)
2429 goto out_destroy;
2430 }
2431
2432 for_each_mon_capable_rdt_resource(r) {
2433 fflags = fflags_from_resource(r) | RFTYPE_MON_INFO;
2434 sprintf(name, "%s_MON", r->name);
2435 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
2436 if (ret)
2437 goto out_destroy;
2438 }
2439
2440 ret = rdtgroup_kn_set_ugid(kn_info);
2441 if (ret)
2442 goto out_destroy;
2443
2444 kernfs_activate(kn_info);
2445
2446 return 0;
2447
2448 out_destroy:
2449 kernfs_remove(kn_info);
2450 return ret;
2451 }
2452
2453 static int
mongroup_create_dir(struct kernfs_node * parent_kn,struct rdtgroup * prgrp,char * name,struct kernfs_node ** dest_kn)2454 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
2455 char *name, struct kernfs_node **dest_kn)
2456 {
2457 struct kernfs_node *kn;
2458 int ret;
2459
2460 /* create the directory */
2461 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2462 if (IS_ERR(kn))
2463 return PTR_ERR(kn);
2464
2465 if (dest_kn)
2466 *dest_kn = kn;
2467
2468 ret = rdtgroup_kn_set_ugid(kn);
2469 if (ret)
2470 goto out_destroy;
2471
2472 kernfs_activate(kn);
2473
2474 return 0;
2475
2476 out_destroy:
2477 kernfs_remove(kn);
2478 return ret;
2479 }
2480
is_mba_linear(void)2481 static inline bool is_mba_linear(void)
2482 {
2483 return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear;
2484 }
2485
mba_sc_domain_allocate(struct rdt_resource * r,struct rdt_ctrl_domain * d)2486 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d)
2487 {
2488 u32 num_closid = resctrl_arch_get_num_closid(r);
2489 int cpu = cpumask_any(&d->hdr.cpu_mask);
2490 int i;
2491
2492 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val),
2493 GFP_KERNEL, cpu_to_node(cpu));
2494 if (!d->mbps_val)
2495 return -ENOMEM;
2496
2497 for (i = 0; i < num_closid; i++)
2498 d->mbps_val[i] = MBA_MAX_MBPS;
2499
2500 return 0;
2501 }
2502
mba_sc_domain_destroy(struct rdt_resource * r,struct rdt_ctrl_domain * d)2503 static void mba_sc_domain_destroy(struct rdt_resource *r,
2504 struct rdt_ctrl_domain *d)
2505 {
2506 kfree(d->mbps_val);
2507 d->mbps_val = NULL;
2508 }
2509
2510 /*
2511 * MBA software controller is supported only if
2512 * MBM is supported and MBA is in linear scale,
2513 * and the MBM monitor scope is the same as MBA
2514 * control scope.
2515 */
supports_mba_mbps(void)2516 static bool supports_mba_mbps(void)
2517 {
2518 struct rdt_resource *rmbm = resctrl_arch_get_resource(RDT_RESOURCE_L3);
2519 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
2520
2521 return (resctrl_is_mbm_enabled() &&
2522 r->alloc_capable && is_mba_linear() &&
2523 r->ctrl_scope == rmbm->mon_scope);
2524 }
2525
2526 /*
2527 * Enable or disable the MBA software controller
2528 * which helps user specify bandwidth in MBps.
2529 */
set_mba_sc(bool mba_sc)2530 static int set_mba_sc(bool mba_sc)
2531 {
2532 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
2533 u32 num_closid = resctrl_arch_get_num_closid(r);
2534 struct rdt_ctrl_domain *d;
2535 unsigned long fflags;
2536 int i;
2537
2538 if (!supports_mba_mbps() || mba_sc == is_mba_sc(r))
2539 return -EINVAL;
2540
2541 r->membw.mba_sc = mba_sc;
2542
2543 rdtgroup_default.mba_mbps_event = mba_mbps_default_event;
2544
2545 list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
2546 for (i = 0; i < num_closid; i++)
2547 d->mbps_val[i] = MBA_MAX_MBPS;
2548 }
2549
2550 fflags = mba_sc ? RFTYPE_CTRL_BASE | RFTYPE_MON_BASE : 0;
2551 resctrl_file_fflags_init("mba_MBps_event", fflags);
2552
2553 return 0;
2554 }
2555
2556 /*
2557 * We don't allow rdtgroup directories to be created anywhere
2558 * except the root directory. Thus when looking for the rdtgroup
2559 * structure for a kernfs node we are either looking at a directory,
2560 * in which case the rdtgroup structure is pointed at by the "priv"
2561 * field, otherwise we have a file, and need only look to the parent
2562 * to find the rdtgroup.
2563 */
kernfs_to_rdtgroup(struct kernfs_node * kn)2564 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
2565 {
2566 if (kernfs_type(kn) == KERNFS_DIR) {
2567 /*
2568 * All the resource directories use "kn->priv"
2569 * to point to the "struct rdtgroup" for the
2570 * resource. "info" and its subdirectories don't
2571 * have rdtgroup structures, so return NULL here.
2572 */
2573 if (kn == kn_info ||
2574 rcu_access_pointer(kn->__parent) == kn_info)
2575 return NULL;
2576 else
2577 return kn->priv;
2578 } else {
2579 return rdt_kn_parent_priv(kn);
2580 }
2581 }
2582
rdtgroup_kn_get(struct rdtgroup * rdtgrp,struct kernfs_node * kn)2583 static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
2584 {
2585 atomic_inc(&rdtgrp->waitcount);
2586 kernfs_break_active_protection(kn);
2587 }
2588
rdtgroup_kn_put(struct rdtgroup * rdtgrp,struct kernfs_node * kn)2589 static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
2590 {
2591 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2592 (rdtgrp->flags & RDT_DELETED)) {
2593 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2594 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2595 rdtgroup_pseudo_lock_remove(rdtgrp);
2596 kernfs_unbreak_active_protection(kn);
2597 rdtgroup_remove(rdtgrp);
2598 } else {
2599 kernfs_unbreak_active_protection(kn);
2600 }
2601 }
2602
rdtgroup_kn_lock_live(struct kernfs_node * kn)2603 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
2604 {
2605 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2606
2607 if (!rdtgrp)
2608 return NULL;
2609
2610 rdtgroup_kn_get(rdtgrp, kn);
2611
2612 cpus_read_lock();
2613 mutex_lock(&rdtgroup_mutex);
2614
2615 /* Was this group deleted while we waited? */
2616 if (rdtgrp->flags & RDT_DELETED)
2617 return NULL;
2618
2619 return rdtgrp;
2620 }
2621
rdtgroup_kn_unlock(struct kernfs_node * kn)2622 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2623 {
2624 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2625
2626 if (!rdtgrp)
2627 return;
2628
2629 mutex_unlock(&rdtgroup_mutex);
2630 cpus_read_unlock();
2631
2632 rdtgroup_kn_put(rdtgrp, kn);
2633 }
2634
2635 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2636 struct rdtgroup *prgrp,
2637 struct kernfs_node **mon_data_kn);
2638
rdt_disable_ctx(void)2639 static void rdt_disable_ctx(void)
2640 {
2641 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
2642 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
2643 set_mba_sc(false);
2644
2645 resctrl_debug = false;
2646 }
2647
rdt_enable_ctx(struct rdt_fs_context * ctx)2648 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2649 {
2650 int ret = 0;
2651
2652 if (ctx->enable_cdpl2) {
2653 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
2654 if (ret)
2655 goto out_done;
2656 }
2657
2658 if (ctx->enable_cdpl3) {
2659 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
2660 if (ret)
2661 goto out_cdpl2;
2662 }
2663
2664 if (ctx->enable_mba_mbps) {
2665 ret = set_mba_sc(true);
2666 if (ret)
2667 goto out_cdpl3;
2668 }
2669
2670 if (ctx->enable_debug)
2671 resctrl_debug = true;
2672
2673 return 0;
2674
2675 out_cdpl3:
2676 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
2677 out_cdpl2:
2678 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
2679 out_done:
2680 return ret;
2681 }
2682
schemata_list_add(struct rdt_resource * r,enum resctrl_conf_type type)2683 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
2684 {
2685 struct resctrl_schema *s;
2686 const char *suffix = "";
2687 int ret, cl;
2688
2689 s = kzalloc_obj(*s, GFP_KERNEL);
2690 if (!s)
2691 return -ENOMEM;
2692
2693 s->res = r;
2694 s->num_closid = resctrl_arch_get_num_closid(r);
2695 if (resctrl_arch_get_cdp_enabled(r->rid))
2696 s->num_closid /= 2;
2697
2698 s->conf_type = type;
2699 switch (type) {
2700 case CDP_CODE:
2701 suffix = "CODE";
2702 break;
2703 case CDP_DATA:
2704 suffix = "DATA";
2705 break;
2706 case CDP_NONE:
2707 suffix = "";
2708 break;
2709 }
2710
2711 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
2712 if (ret >= sizeof(s->name)) {
2713 kfree(s);
2714 return -EINVAL;
2715 }
2716
2717 cl = strlen(s->name);
2718
2719 /*
2720 * If CDP is supported by this resource, but not enabled,
2721 * include the suffix. This ensures the tabular format of the
2722 * schemata file does not change between mounts of the filesystem.
2723 */
2724 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
2725 cl += 4;
2726
2727 if (cl > max_name_width)
2728 max_name_width = cl;
2729
2730 switch (r->schema_fmt) {
2731 case RESCTRL_SCHEMA_BITMAP:
2732 s->fmt_str = "%d=%x";
2733 break;
2734 case RESCTRL_SCHEMA_RANGE:
2735 s->fmt_str = "%d=%u";
2736 break;
2737 }
2738
2739 if (WARN_ON_ONCE(!s->fmt_str)) {
2740 kfree(s);
2741 return -EINVAL;
2742 }
2743
2744 INIT_LIST_HEAD(&s->list);
2745 list_add(&s->list, &resctrl_schema_all);
2746
2747 return 0;
2748 }
2749
schemata_list_create(void)2750 static int schemata_list_create(void)
2751 {
2752 struct rdt_resource *r;
2753 int ret = 0;
2754
2755 for_each_alloc_capable_rdt_resource(r) {
2756 if (resctrl_arch_get_cdp_enabled(r->rid)) {
2757 ret = schemata_list_add(r, CDP_CODE);
2758 if (ret)
2759 break;
2760
2761 ret = schemata_list_add(r, CDP_DATA);
2762 } else {
2763 ret = schemata_list_add(r, CDP_NONE);
2764 }
2765
2766 if (ret)
2767 break;
2768 }
2769
2770 return ret;
2771 }
2772
schemata_list_destroy(void)2773 static void schemata_list_destroy(void)
2774 {
2775 struct resctrl_schema *s, *tmp;
2776
2777 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
2778 list_del(&s->list);
2779 kfree(s);
2780 }
2781 }
2782
rdt_get_tree(struct fs_context * fc)2783 static int rdt_get_tree(struct fs_context *fc)
2784 {
2785 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2786 unsigned long flags = RFTYPE_CTRL_BASE;
2787 struct rdt_l3_mon_domain *dom;
2788 struct rdt_resource *r;
2789 int ret;
2790
2791 DO_ONCE_SLEEPABLE(resctrl_arch_pre_mount);
2792
2793 cpus_read_lock();
2794 mutex_lock(&rdtgroup_mutex);
2795 /*
2796 * resctrl file system can only be mounted once.
2797 */
2798 if (resctrl_mounted) {
2799 ret = -EBUSY;
2800 goto out;
2801 }
2802
2803 ret = setup_rmid_lru_list();
2804 if (ret)
2805 goto out;
2806
2807 ret = rdtgroup_setup_root(ctx);
2808 if (ret)
2809 goto out;
2810
2811 ret = rdt_enable_ctx(ctx);
2812 if (ret)
2813 goto out_root;
2814
2815 ret = schemata_list_create();
2816 if (ret)
2817 goto out_schemata_free;
2818
2819 ret = closid_init();
2820 if (ret)
2821 goto out_schemata_free;
2822
2823 if (resctrl_arch_mon_capable())
2824 flags |= RFTYPE_MON;
2825
2826 ret = rdtgroup_add_files(rdtgroup_default.kn, flags);
2827 if (ret)
2828 goto out_closid_exit;
2829
2830 kernfs_activate(rdtgroup_default.kn);
2831
2832 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2833 if (ret < 0)
2834 goto out_closid_exit;
2835
2836 if (resctrl_arch_mon_capable()) {
2837 ret = mongroup_create_dir(rdtgroup_default.kn,
2838 &rdtgroup_default, "mon_groups",
2839 &kn_mongrp);
2840 if (ret < 0)
2841 goto out_info;
2842
2843 rdtgroup_assign_cntrs(&rdtgroup_default);
2844
2845 ret = mkdir_mondata_all(rdtgroup_default.kn,
2846 &rdtgroup_default, &kn_mondata);
2847 if (ret < 0)
2848 goto out_mongrp;
2849 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2850 }
2851
2852 ret = rdt_pseudo_lock_init();
2853 if (ret)
2854 goto out_mondata;
2855
2856 ret = kernfs_get_tree(fc);
2857 if (ret < 0)
2858 goto out_psl;
2859
2860 if (resctrl_arch_alloc_capable())
2861 resctrl_arch_enable_alloc();
2862 if (resctrl_arch_mon_capable())
2863 resctrl_arch_enable_mon();
2864
2865 if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
2866 resctrl_mounted = true;
2867
2868 if (resctrl_is_mbm_enabled()) {
2869 r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
2870 list_for_each_entry(dom, &r->mon_domains, hdr.list)
2871 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
2872 RESCTRL_PICK_ANY_CPU);
2873 }
2874
2875 goto out;
2876
2877 out_psl:
2878 rdt_pseudo_lock_release();
2879 out_mondata:
2880 if (resctrl_arch_mon_capable())
2881 kernfs_remove(kn_mondata);
2882 out_mongrp:
2883 if (resctrl_arch_mon_capable()) {
2884 rdtgroup_unassign_cntrs(&rdtgroup_default);
2885 kernfs_remove(kn_mongrp);
2886 }
2887 out_info:
2888 kernfs_remove(kn_info);
2889 out_closid_exit:
2890 closid_exit();
2891 out_schemata_free:
2892 schemata_list_destroy();
2893 rdt_disable_ctx();
2894 out_root:
2895 rdtgroup_destroy_root();
2896 out:
2897 rdt_last_cmd_clear();
2898 mutex_unlock(&rdtgroup_mutex);
2899 cpus_read_unlock();
2900 return ret;
2901 }
2902
2903 enum rdt_param {
2904 Opt_cdp,
2905 Opt_cdpl2,
2906 Opt_mba_mbps,
2907 Opt_debug,
2908 nr__rdt_params
2909 };
2910
2911 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2912 fsparam_flag("cdp", Opt_cdp),
2913 fsparam_flag("cdpl2", Opt_cdpl2),
2914 fsparam_flag("mba_MBps", Opt_mba_mbps),
2915 fsparam_flag("debug", Opt_debug),
2916 {}
2917 };
2918
rdt_parse_param(struct fs_context * fc,struct fs_parameter * param)2919 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2920 {
2921 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2922 struct fs_parse_result result;
2923 const char *msg;
2924 int opt;
2925
2926 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2927 if (opt < 0)
2928 return opt;
2929
2930 switch (opt) {
2931 case Opt_cdp:
2932 ctx->enable_cdpl3 = true;
2933 return 0;
2934 case Opt_cdpl2:
2935 ctx->enable_cdpl2 = true;
2936 return 0;
2937 case Opt_mba_mbps:
2938 msg = "mba_MBps requires MBM and linear scale MBA at L3 scope";
2939 if (!supports_mba_mbps())
2940 return invalfc(fc, msg);
2941 ctx->enable_mba_mbps = true;
2942 return 0;
2943 case Opt_debug:
2944 ctx->enable_debug = true;
2945 return 0;
2946 }
2947
2948 return -EINVAL;
2949 }
2950
rdt_fs_context_free(struct fs_context * fc)2951 static void rdt_fs_context_free(struct fs_context *fc)
2952 {
2953 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2954
2955 kernfs_free_fs_context(fc);
2956 kfree(ctx);
2957 }
2958
2959 static const struct fs_context_operations rdt_fs_context_ops = {
2960 .free = rdt_fs_context_free,
2961 .parse_param = rdt_parse_param,
2962 .get_tree = rdt_get_tree,
2963 };
2964
rdt_init_fs_context(struct fs_context * fc)2965 static int rdt_init_fs_context(struct fs_context *fc)
2966 {
2967 struct rdt_fs_context *ctx;
2968
2969 ctx = kzalloc_obj(*ctx, GFP_KERNEL);
2970 if (!ctx)
2971 return -ENOMEM;
2972
2973 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2974 fc->fs_private = &ctx->kfc;
2975 fc->ops = &rdt_fs_context_ops;
2976 put_user_ns(fc->user_ns);
2977 fc->user_ns = get_user_ns(&init_user_ns);
2978 fc->global = true;
2979 return 0;
2980 }
2981
2982 /*
2983 * Move tasks from one to the other group. If @from is NULL, then all tasks
2984 * in the systems are moved unconditionally (used for teardown).
2985 *
2986 * If @mask is not NULL the cpus on which moved tasks are running are set
2987 * in that mask so the update smp function call is restricted to affected
2988 * cpus.
2989 */
rdt_move_group_tasks(struct rdtgroup * from,struct rdtgroup * to,struct cpumask * mask)2990 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2991 struct cpumask *mask)
2992 {
2993 struct task_struct *p, *t;
2994
2995 read_lock(&tasklist_lock);
2996 for_each_process_thread(p, t) {
2997 if (!from || is_closid_match(t, from) ||
2998 is_rmid_match(t, from)) {
2999 resctrl_arch_set_closid_rmid(t, to->closid,
3000 to->mon.rmid);
3001
3002 /*
3003 * Order the closid/rmid stores above before the loads
3004 * in task_curr(). This pairs with the full barrier
3005 * between the rq->curr update and
3006 * resctrl_arch_sched_in() during context switch.
3007 */
3008 smp_mb();
3009
3010 /*
3011 * If the task is on a CPU, set the CPU in the mask.
3012 * The detection is inaccurate as tasks might move or
3013 * schedule before the smp function call takes place.
3014 * In such a case the function call is pointless, but
3015 * there is no other side effect.
3016 */
3017 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
3018 cpumask_set_cpu(task_cpu(t), mask);
3019 }
3020 }
3021 read_unlock(&tasklist_lock);
3022 }
3023
free_all_child_rdtgrp(struct rdtgroup * rdtgrp)3024 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
3025 {
3026 struct rdtgroup *sentry, *stmp;
3027 struct list_head *head;
3028
3029 head = &rdtgrp->mon.crdtgrp_list;
3030 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
3031 rdtgroup_unassign_cntrs(sentry);
3032 free_rmid(sentry->closid, sentry->mon.rmid);
3033 list_del(&sentry->mon.crdtgrp_list);
3034
3035 if (atomic_read(&sentry->waitcount) != 0)
3036 sentry->flags = RDT_DELETED;
3037 else
3038 rdtgroup_remove(sentry);
3039 }
3040 }
3041
3042 /*
3043 * Forcibly remove all of subdirectories under root.
3044 */
rmdir_all_sub(void)3045 static void rmdir_all_sub(void)
3046 {
3047 struct rdtgroup *rdtgrp, *tmp;
3048
3049 /* Move all tasks to the default resource group */
3050 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
3051
3052 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
3053 /* Free any child rmids */
3054 free_all_child_rdtgrp(rdtgrp);
3055
3056 /* Remove each rdtgroup other than root */
3057 if (rdtgrp == &rdtgroup_default)
3058 continue;
3059
3060 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3061 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
3062 rdtgroup_pseudo_lock_remove(rdtgrp);
3063
3064 /*
3065 * Give any CPUs back to the default group. We cannot copy
3066 * cpu_online_mask because a CPU might have executed the
3067 * offline callback already, but is still marked online.
3068 */
3069 cpumask_or(&rdtgroup_default.cpu_mask,
3070 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3071
3072 rdtgroup_unassign_cntrs(rdtgrp);
3073
3074 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3075
3076 kernfs_remove(rdtgrp->kn);
3077 list_del(&rdtgrp->rdtgroup_list);
3078
3079 if (atomic_read(&rdtgrp->waitcount) != 0)
3080 rdtgrp->flags = RDT_DELETED;
3081 else
3082 rdtgroup_remove(rdtgrp);
3083 }
3084 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
3085 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
3086
3087 kernfs_remove(kn_info);
3088 kernfs_remove(kn_mongrp);
3089 kernfs_remove(kn_mondata);
3090 }
3091
3092 /**
3093 * mon_get_kn_priv() - Get the mon_data priv data for this event.
3094 *
3095 * The same values are used across the mon_data directories of all control and
3096 * monitor groups for the same event in the same domain. Keep a list of
3097 * allocated structures and re-use an existing one with the same values for
3098 * @rid, @domid, etc.
3099 *
3100 * @rid: The resource id for the event file being created.
3101 * @domid: The domain id for the event file being created.
3102 * @mevt: The type of event file being created.
3103 * @do_sum: Whether SNC summing monitors are being created. Only set
3104 * when @rid == RDT_RESOURCE_L3.
3105 */
mon_get_kn_priv(enum resctrl_res_level rid,int domid,struct mon_evt * mevt,bool do_sum)3106 static struct mon_data *mon_get_kn_priv(enum resctrl_res_level rid, int domid,
3107 struct mon_evt *mevt,
3108 bool do_sum)
3109 {
3110 struct mon_data *priv;
3111
3112 lockdep_assert_held(&rdtgroup_mutex);
3113
3114 list_for_each_entry(priv, &mon_data_kn_priv_list, list) {
3115 if (priv->rid == rid && priv->domid == domid &&
3116 priv->sum == do_sum && priv->evt == mevt)
3117 return priv;
3118 }
3119
3120 priv = kzalloc_obj(*priv, GFP_KERNEL);
3121 if (!priv)
3122 return NULL;
3123
3124 priv->rid = rid;
3125 priv->domid = domid;
3126 priv->sum = do_sum;
3127 priv->evt = mevt;
3128 list_add_tail(&priv->list, &mon_data_kn_priv_list);
3129
3130 return priv;
3131 }
3132
3133 /**
3134 * mon_put_kn_priv() - Free all allocated mon_data structures.
3135 *
3136 * Called when resctrl file system is unmounted.
3137 */
mon_put_kn_priv(void)3138 static void mon_put_kn_priv(void)
3139 {
3140 struct mon_data *priv, *tmp;
3141
3142 lockdep_assert_held(&rdtgroup_mutex);
3143
3144 list_for_each_entry_safe(priv, tmp, &mon_data_kn_priv_list, list) {
3145 list_del(&priv->list);
3146 kfree(priv);
3147 }
3148 }
3149
resctrl_fs_teardown(void)3150 static void resctrl_fs_teardown(void)
3151 {
3152 lockdep_assert_held(&rdtgroup_mutex);
3153
3154 /* Cleared by rdtgroup_destroy_root() */
3155 if (!rdtgroup_default.kn)
3156 return;
3157
3158 rmdir_all_sub();
3159 rdtgroup_unassign_cntrs(&rdtgroup_default);
3160 mon_put_kn_priv();
3161 rdt_pseudo_lock_release();
3162 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
3163 closid_exit();
3164 schemata_list_destroy();
3165 rdtgroup_destroy_root();
3166 }
3167
rdt_kill_sb(struct super_block * sb)3168 static void rdt_kill_sb(struct super_block *sb)
3169 {
3170 struct rdt_resource *r;
3171
3172 cpus_read_lock();
3173 mutex_lock(&rdtgroup_mutex);
3174
3175 rdt_disable_ctx();
3176
3177 /* Put everything back to default values. */
3178 for_each_alloc_capable_rdt_resource(r)
3179 resctrl_arch_reset_all_ctrls(r);
3180
3181 resctrl_fs_teardown();
3182 if (resctrl_arch_alloc_capable())
3183 resctrl_arch_disable_alloc();
3184 if (resctrl_arch_mon_capable())
3185 resctrl_arch_disable_mon();
3186 resctrl_mounted = false;
3187 kernfs_kill_sb(sb);
3188 mutex_unlock(&rdtgroup_mutex);
3189 cpus_read_unlock();
3190 }
3191
3192 static struct file_system_type rdt_fs_type = {
3193 .name = "resctrl",
3194 .init_fs_context = rdt_init_fs_context,
3195 .parameters = rdt_fs_parameters,
3196 .kill_sb = rdt_kill_sb,
3197 };
3198
mon_addfile(struct kernfs_node * parent_kn,const char * name,void * priv)3199 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
3200 void *priv)
3201 {
3202 struct kernfs_node *kn;
3203 int ret = 0;
3204
3205 kn = __kernfs_create_file(parent_kn, name, 0444,
3206 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
3207 &kf_mondata_ops, priv, NULL, NULL);
3208 if (IS_ERR(kn))
3209 return PTR_ERR(kn);
3210
3211 ret = rdtgroup_kn_set_ugid(kn);
3212 if (ret) {
3213 kernfs_remove(kn);
3214 return ret;
3215 }
3216
3217 return ret;
3218 }
3219
mon_rmdir_one_subdir(struct kernfs_node * pkn,char * name,char * subname)3220 static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname)
3221 {
3222 struct kernfs_node *kn;
3223
3224 kn = kernfs_find_and_get(pkn, name);
3225 if (!kn)
3226 return;
3227 kernfs_put(kn);
3228
3229 if (kn->dir.subdirs <= 1)
3230 kernfs_remove(kn);
3231 else
3232 kernfs_remove_by_name(kn, subname);
3233 }
3234
3235 /*
3236 * Remove files and directories for one SNC node. If it is the last node
3237 * sharing an L3 cache, then remove the upper level directory containing
3238 * the "sum" files too.
3239 */
rmdir_mondata_subdir_allrdtgrp_snc(struct rdt_resource * r,struct rdt_domain_hdr * hdr)3240 static void rmdir_mondata_subdir_allrdtgrp_snc(struct rdt_resource *r,
3241 struct rdt_domain_hdr *hdr)
3242 {
3243 struct rdtgroup *prgrp, *crgrp;
3244 struct rdt_l3_mon_domain *d;
3245 char subname[32];
3246 char name[32];
3247
3248 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
3249 return;
3250
3251 d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
3252 sprintf(name, "mon_%s_%02d", r->name, d->ci_id);
3253 sprintf(subname, "mon_sub_%s_%02d", r->name, hdr->id);
3254
3255 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
3256 mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname);
3257
3258 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
3259 mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname);
3260 }
3261 }
3262
3263 /*
3264 * Remove all subdirectories of mon_data of ctrl_mon groups
3265 * and monitor groups for the given domain.
3266 */
rmdir_mondata_subdir_allrdtgrp(struct rdt_resource * r,struct rdt_domain_hdr * hdr)3267 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
3268 struct rdt_domain_hdr *hdr)
3269 {
3270 struct rdtgroup *prgrp, *crgrp;
3271 char name[32];
3272
3273 if (r->rid == RDT_RESOURCE_L3 && r->mon_scope == RESCTRL_L3_NODE) {
3274 rmdir_mondata_subdir_allrdtgrp_snc(r, hdr);
3275 return;
3276 }
3277
3278 sprintf(name, "mon_%s_%02d", r->name, hdr->id);
3279 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
3280 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
3281
3282 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
3283 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
3284 }
3285 }
3286
3287 /*
3288 * Create a directory for a domain and populate it with monitor files. Create
3289 * summing monitors when @hdr is NULL. No need to initialize summing monitors.
3290 */
_mkdir_mondata_subdir(struct kernfs_node * parent_kn,char * name,struct rdt_domain_hdr * hdr,struct rdt_resource * r,struct rdtgroup * prgrp,int domid)3291 static struct kernfs_node *_mkdir_mondata_subdir(struct kernfs_node *parent_kn, char *name,
3292 struct rdt_domain_hdr *hdr,
3293 struct rdt_resource *r,
3294 struct rdtgroup *prgrp, int domid)
3295 {
3296 struct rmid_read rr = {0};
3297 struct kernfs_node *kn;
3298 struct mon_data *priv;
3299 struct mon_evt *mevt;
3300 int ret;
3301
3302 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
3303 if (IS_ERR(kn))
3304 return kn;
3305
3306 ret = rdtgroup_kn_set_ugid(kn);
3307 if (ret)
3308 goto out_destroy;
3309
3310 for_each_mon_event(mevt) {
3311 if (mevt->rid != r->rid || !mevt->enabled)
3312 continue;
3313 priv = mon_get_kn_priv(r->rid, domid, mevt, !hdr);
3314 if (WARN_ON_ONCE(!priv)) {
3315 ret = -EINVAL;
3316 goto out_destroy;
3317 }
3318
3319 ret = mon_addfile(kn, mevt->name, priv);
3320 if (ret)
3321 goto out_destroy;
3322
3323 if (hdr && resctrl_is_mbm_event(mevt->evtid))
3324 mon_event_read(&rr, r, hdr, prgrp, &hdr->cpu_mask, mevt, true);
3325 }
3326
3327 return kn;
3328 out_destroy:
3329 kernfs_remove(kn);
3330 return ERR_PTR(ret);
3331 }
3332
mkdir_mondata_subdir_snc(struct kernfs_node * parent_kn,struct rdt_domain_hdr * hdr,struct rdt_resource * r,struct rdtgroup * prgrp)3333 static int mkdir_mondata_subdir_snc(struct kernfs_node *parent_kn,
3334 struct rdt_domain_hdr *hdr,
3335 struct rdt_resource *r, struct rdtgroup *prgrp)
3336 {
3337 struct kernfs_node *ckn, *kn;
3338 struct rdt_l3_mon_domain *d;
3339 char name[32];
3340
3341 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
3342 return -EINVAL;
3343
3344 d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
3345 sprintf(name, "mon_%s_%02d", r->name, d->ci_id);
3346 kn = kernfs_find_and_get(parent_kn, name);
3347 if (kn) {
3348 /*
3349 * rdtgroup_mutex will prevent this directory from being
3350 * removed. No need to keep this hold.
3351 */
3352 kernfs_put(kn);
3353 } else {
3354 kn = _mkdir_mondata_subdir(parent_kn, name, NULL, r, prgrp, d->ci_id);
3355 if (IS_ERR(kn))
3356 return PTR_ERR(kn);
3357 }
3358
3359 sprintf(name, "mon_sub_%s_%02d", r->name, hdr->id);
3360 ckn = _mkdir_mondata_subdir(kn, name, hdr, r, prgrp, hdr->id);
3361 if (IS_ERR(ckn)) {
3362 kernfs_remove(kn);
3363 return PTR_ERR(ckn);
3364 }
3365
3366 kernfs_activate(kn);
3367 return 0;
3368 }
3369
mkdir_mondata_subdir(struct kernfs_node * parent_kn,struct rdt_domain_hdr * hdr,struct rdt_resource * r,struct rdtgroup * prgrp)3370 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
3371 struct rdt_domain_hdr *hdr,
3372 struct rdt_resource *r, struct rdtgroup *prgrp)
3373 {
3374 struct kernfs_node *kn;
3375 char name[32];
3376
3377 lockdep_assert_held(&rdtgroup_mutex);
3378
3379 if (r->rid == RDT_RESOURCE_L3 && r->mon_scope == RESCTRL_L3_NODE)
3380 return mkdir_mondata_subdir_snc(parent_kn, hdr, r, prgrp);
3381
3382 sprintf(name, "mon_%s_%02d", r->name, hdr->id);
3383 kn = _mkdir_mondata_subdir(parent_kn, name, hdr, r, prgrp, hdr->id);
3384 if (IS_ERR(kn))
3385 return PTR_ERR(kn);
3386
3387 kernfs_activate(kn);
3388 return 0;
3389 }
3390
3391 /*
3392 * Add all subdirectories of mon_data for "ctrl_mon" groups
3393 * and "monitor" groups with given domain id.
3394 */
mkdir_mondata_subdir_allrdtgrp(struct rdt_resource * r,struct rdt_domain_hdr * hdr)3395 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
3396 struct rdt_domain_hdr *hdr)
3397 {
3398 struct kernfs_node *parent_kn;
3399 struct rdtgroup *prgrp, *crgrp;
3400 struct list_head *head;
3401
3402 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
3403 parent_kn = prgrp->mon.mon_data_kn;
3404 mkdir_mondata_subdir(parent_kn, hdr, r, prgrp);
3405
3406 head = &prgrp->mon.crdtgrp_list;
3407 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
3408 parent_kn = crgrp->mon.mon_data_kn;
3409 mkdir_mondata_subdir(parent_kn, hdr, r, crgrp);
3410 }
3411 }
3412 }
3413
mkdir_mondata_subdir_alldom(struct kernfs_node * parent_kn,struct rdt_resource * r,struct rdtgroup * prgrp)3414 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
3415 struct rdt_resource *r,
3416 struct rdtgroup *prgrp)
3417 {
3418 struct rdt_domain_hdr *hdr;
3419 int ret;
3420
3421 /* Walking r->domains, ensure it can't race with cpuhp */
3422 lockdep_assert_cpus_held();
3423
3424 list_for_each_entry(hdr, &r->mon_domains, list) {
3425 ret = mkdir_mondata_subdir(parent_kn, hdr, r, prgrp);
3426 if (ret)
3427 return ret;
3428 }
3429
3430 return 0;
3431 }
3432
3433 /*
3434 * This creates a directory mon_data which contains the monitored data.
3435 *
3436 * mon_data has one directory for each domain which are named
3437 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
3438 * with L3 domain looks as below:
3439 * ./mon_data:
3440 * mon_L3_00
3441 * mon_L3_01
3442 * mon_L3_02
3443 * ...
3444 *
3445 * Each domain directory has one file per event:
3446 * ./mon_L3_00/:
3447 * llc_occupancy
3448 *
3449 */
mkdir_mondata_all(struct kernfs_node * parent_kn,struct rdtgroup * prgrp,struct kernfs_node ** dest_kn)3450 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
3451 struct rdtgroup *prgrp,
3452 struct kernfs_node **dest_kn)
3453 {
3454 struct rdt_resource *r;
3455 struct kernfs_node *kn;
3456 int ret;
3457
3458 /*
3459 * Create the mon_data directory first.
3460 */
3461 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
3462 if (ret)
3463 return ret;
3464
3465 if (dest_kn)
3466 *dest_kn = kn;
3467
3468 /*
3469 * Create the subdirectories for each domain. Note that all events
3470 * in a domain like L3 are grouped into a resource whose domain is L3
3471 */
3472 for_each_mon_capable_rdt_resource(r) {
3473 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
3474 if (ret)
3475 goto out_destroy;
3476 }
3477
3478 return 0;
3479
3480 out_destroy:
3481 kernfs_remove(kn);
3482 return ret;
3483 }
3484
3485 /**
3486 * cbm_ensure_valid - Enforce validity on provided CBM
3487 * @_val: Candidate CBM
3488 * @r: RDT resource to which the CBM belongs
3489 *
3490 * The provided CBM represents all cache portions available for use. This
3491 * may be represented by a bitmap that does not consist of contiguous ones
3492 * and thus be an invalid CBM.
3493 * Here the provided CBM is forced to be a valid CBM by only considering
3494 * the first set of contiguous bits as valid and clearing all bits.
3495 * The intention here is to provide a valid default CBM with which a new
3496 * resource group is initialized. The user can follow this with a
3497 * modification to the CBM if the default does not satisfy the
3498 * requirements.
3499 */
cbm_ensure_valid(u32 _val,struct rdt_resource * r)3500 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
3501 {
3502 unsigned int cbm_len = r->cache.cbm_len;
3503 unsigned long first_bit, zero_bit;
3504 unsigned long val;
3505
3506 if (!_val || r->cache.arch_has_sparse_bitmasks)
3507 return _val;
3508
3509 val = _val;
3510 first_bit = find_first_bit(&val, cbm_len);
3511 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
3512
3513 /* Clear any remaining bits to ensure contiguous region */
3514 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
3515 return (u32)val;
3516 }
3517
3518 /*
3519 * Initialize cache resources per RDT domain
3520 *
3521 * Set the RDT domain up to start off with all usable allocations. That is,
3522 * all shareable and unused bits. All-zero CBM is invalid.
3523 */
__init_one_rdt_domain(struct rdt_ctrl_domain * d,struct resctrl_schema * s,u32 closid)3524 static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s,
3525 u32 closid)
3526 {
3527 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
3528 enum resctrl_conf_type t = s->conf_type;
3529 struct resctrl_staged_config *cfg;
3530 struct rdt_resource *r = s->res;
3531 u32 used_b = 0, unused_b = 0;
3532 unsigned long tmp_cbm;
3533 enum rdtgrp_mode mode;
3534 u32 peer_ctl, ctrl_val;
3535 int i;
3536
3537 cfg = &d->staged_config[t];
3538 cfg->have_new_ctrl = false;
3539 cfg->new_ctrl = r->cache.shareable_bits;
3540 used_b = r->cache.shareable_bits;
3541 for (i = 0; i < closids_supported(); i++) {
3542 if (closid_allocated(i) && i != closid) {
3543 mode = rdtgroup_mode_by_closid(i);
3544 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
3545 /*
3546 * ctrl values for locksetup aren't relevant
3547 * until the schemata is written, and the mode
3548 * becomes RDT_MODE_PSEUDO_LOCKED.
3549 */
3550 continue;
3551 /*
3552 * If CDP is active include peer domain's
3553 * usage to ensure there is no overlap
3554 * with an exclusive group.
3555 */
3556 if (resctrl_arch_get_cdp_enabled(r->rid))
3557 peer_ctl = resctrl_arch_get_config(r, d, i,
3558 peer_type);
3559 else
3560 peer_ctl = 0;
3561 ctrl_val = resctrl_arch_get_config(r, d, i,
3562 s->conf_type);
3563 used_b |= ctrl_val | peer_ctl;
3564 if (mode == RDT_MODE_SHAREABLE)
3565 cfg->new_ctrl |= ctrl_val | peer_ctl;
3566 }
3567 }
3568 if (d->plr && d->plr->cbm > 0)
3569 used_b |= d->plr->cbm;
3570 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
3571 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
3572 cfg->new_ctrl |= unused_b;
3573 /*
3574 * Force the initial CBM to be valid, user can
3575 * modify the CBM based on system availability.
3576 */
3577 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
3578 /*
3579 * Assign the u32 CBM to an unsigned long to ensure that
3580 * bitmap_weight() does not access out-of-bound memory.
3581 */
3582 tmp_cbm = cfg->new_ctrl;
3583 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
3584 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id);
3585 return -ENOSPC;
3586 }
3587 cfg->have_new_ctrl = true;
3588
3589 return 0;
3590 }
3591
3592 /*
3593 * Initialize cache resources with default values.
3594 *
3595 * A new RDT group is being created on an allocation capable (CAT)
3596 * supporting system. Set this group up to start off with all usable
3597 * allocations.
3598 *
3599 * If there are no more shareable bits available on any domain then
3600 * the entire allocation will fail.
3601 */
rdtgroup_init_cat(struct resctrl_schema * s,u32 closid)3602 int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
3603 {
3604 struct rdt_ctrl_domain *d;
3605 int ret;
3606
3607 list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) {
3608 ret = __init_one_rdt_domain(d, s, closid);
3609 if (ret < 0)
3610 return ret;
3611 }
3612
3613 return 0;
3614 }
3615
3616 /* Initialize MBA resource with default values. */
rdtgroup_init_mba(struct rdt_resource * r,u32 closid)3617 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
3618 {
3619 struct resctrl_staged_config *cfg;
3620 struct rdt_ctrl_domain *d;
3621
3622 list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
3623 if (is_mba_sc(r)) {
3624 d->mbps_val[closid] = MBA_MAX_MBPS;
3625 continue;
3626 }
3627
3628 cfg = &d->staged_config[CDP_NONE];
3629 cfg->new_ctrl = resctrl_get_default_ctrl(r);
3630 cfg->have_new_ctrl = true;
3631 }
3632 }
3633
3634 /* Initialize the RDT group's allocations. */
rdtgroup_init_alloc(struct rdtgroup * rdtgrp)3635 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
3636 {
3637 struct resctrl_schema *s;
3638 struct rdt_resource *r;
3639 int ret = 0;
3640
3641 rdt_staged_configs_clear();
3642
3643 list_for_each_entry(s, &resctrl_schema_all, list) {
3644 r = s->res;
3645 if (r->rid == RDT_RESOURCE_MBA ||
3646 r->rid == RDT_RESOURCE_SMBA) {
3647 rdtgroup_init_mba(r, rdtgrp->closid);
3648 if (is_mba_sc(r))
3649 continue;
3650 } else {
3651 ret = rdtgroup_init_cat(s, rdtgrp->closid);
3652 if (ret < 0)
3653 goto out;
3654 }
3655
3656 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
3657 if (ret < 0) {
3658 rdt_last_cmd_puts("Failed to initialize allocations\n");
3659 goto out;
3660 }
3661 }
3662
3663 rdtgrp->mode = RDT_MODE_SHAREABLE;
3664
3665 out:
3666 rdt_staged_configs_clear();
3667 return ret;
3668 }
3669
mkdir_rdt_prepare_rmid_alloc(struct rdtgroup * rdtgrp)3670 static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
3671 {
3672 int ret;
3673
3674 if (!resctrl_arch_mon_capable())
3675 return 0;
3676
3677 ret = alloc_rmid(rdtgrp->closid);
3678 if (ret < 0) {
3679 rdt_last_cmd_puts("Out of RMIDs\n");
3680 return ret;
3681 }
3682 rdtgrp->mon.rmid = ret;
3683
3684 rdtgroup_assign_cntrs(rdtgrp);
3685
3686 ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
3687 if (ret) {
3688 rdt_last_cmd_puts("kernfs subdir error\n");
3689 rdtgroup_unassign_cntrs(rdtgrp);
3690 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3691 return ret;
3692 }
3693
3694 return 0;
3695 }
3696
mkdir_rdt_prepare_rmid_free(struct rdtgroup * rgrp)3697 static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
3698 {
3699 if (resctrl_arch_mon_capable()) {
3700 rdtgroup_unassign_cntrs(rgrp);
3701 free_rmid(rgrp->closid, rgrp->mon.rmid);
3702 }
3703 }
3704
3705 /*
3706 * We allow creating mon groups only with in a directory called "mon_groups"
3707 * which is present in every ctrl_mon group. Check if this is a valid
3708 * "mon_groups" directory.
3709 *
3710 * 1. The directory should be named "mon_groups".
3711 * 2. The mon group itself should "not" be named "mon_groups".
3712 * This makes sure "mon_groups" directory always has a ctrl_mon group
3713 * as parent.
3714 */
is_mon_groups(struct kernfs_node * kn,const char * name)3715 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
3716 {
3717 return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
3718 strcmp(name, "mon_groups"));
3719 }
3720
mkdir_rdt_prepare(struct kernfs_node * parent_kn,const char * name,umode_t mode,enum rdt_group_type rtype,struct rdtgroup ** r)3721 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
3722 const char *name, umode_t mode,
3723 enum rdt_group_type rtype, struct rdtgroup **r)
3724 {
3725 struct rdtgroup *prdtgrp, *rdtgrp;
3726 unsigned long files = 0;
3727 struct kernfs_node *kn;
3728 int ret;
3729
3730 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
3731 if (!prdtgrp) {
3732 ret = -ENODEV;
3733 goto out_unlock;
3734 }
3735
3736 rdt_last_cmd_clear();
3737
3738 /*
3739 * Check that the parent directory for a monitor group is a "mon_groups"
3740 * directory.
3741 */
3742 if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) {
3743 ret = -EPERM;
3744 goto out_unlock;
3745 }
3746
3747 if (rtype == RDTMON_GROUP &&
3748 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3749 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
3750 ret = -EINVAL;
3751 rdt_last_cmd_puts("Pseudo-locking in progress\n");
3752 goto out_unlock;
3753 }
3754
3755 /* allocate the rdtgroup. */
3756 rdtgrp = kzalloc_obj(*rdtgrp, GFP_KERNEL);
3757 if (!rdtgrp) {
3758 ret = -ENOSPC;
3759 rdt_last_cmd_puts("Kernel out of memory\n");
3760 goto out_unlock;
3761 }
3762 *r = rdtgrp;
3763 rdtgrp->mon.parent = prdtgrp;
3764 rdtgrp->type = rtype;
3765 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
3766
3767 /* kernfs creates the directory for rdtgrp */
3768 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
3769 if (IS_ERR(kn)) {
3770 ret = PTR_ERR(kn);
3771 rdt_last_cmd_puts("kernfs create error\n");
3772 goto out_free_rgrp;
3773 }
3774 rdtgrp->kn = kn;
3775
3776 /*
3777 * kernfs_remove() will drop the reference count on "kn" which
3778 * will free it. But we still need it to stick around for the
3779 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
3780 * which will be dropped by kernfs_put() in rdtgroup_remove().
3781 */
3782 kernfs_get(kn);
3783
3784 ret = rdtgroup_kn_set_ugid(kn);
3785 if (ret) {
3786 rdt_last_cmd_puts("kernfs perm error\n");
3787 goto out_destroy;
3788 }
3789
3790 if (rtype == RDTCTRL_GROUP) {
3791 files = RFTYPE_BASE | RFTYPE_CTRL;
3792 if (resctrl_arch_mon_capable())
3793 files |= RFTYPE_MON;
3794 } else {
3795 files = RFTYPE_BASE | RFTYPE_MON;
3796 }
3797
3798 ret = rdtgroup_add_files(kn, files);
3799 if (ret) {
3800 rdt_last_cmd_puts("kernfs fill error\n");
3801 goto out_destroy;
3802 }
3803
3804 /*
3805 * The caller unlocks the parent_kn upon success.
3806 */
3807 return 0;
3808
3809 out_destroy:
3810 kernfs_put(rdtgrp->kn);
3811 kernfs_remove(rdtgrp->kn);
3812 out_free_rgrp:
3813 kfree(rdtgrp);
3814 out_unlock:
3815 rdtgroup_kn_unlock(parent_kn);
3816 return ret;
3817 }
3818
mkdir_rdt_prepare_clean(struct rdtgroup * rgrp)3819 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
3820 {
3821 kernfs_remove(rgrp->kn);
3822 rdtgroup_remove(rgrp);
3823 }
3824
3825 /*
3826 * Create a monitor group under "mon_groups" directory of a control
3827 * and monitor group(ctrl_mon). This is a resource group
3828 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
3829 */
rdtgroup_mkdir_mon(struct kernfs_node * parent_kn,const char * name,umode_t mode)3830 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
3831 const char *name, umode_t mode)
3832 {
3833 struct rdtgroup *rdtgrp, *prgrp;
3834 int ret;
3835
3836 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
3837 if (ret)
3838 return ret;
3839
3840 prgrp = rdtgrp->mon.parent;
3841 rdtgrp->closid = prgrp->closid;
3842
3843 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
3844 if (ret) {
3845 mkdir_rdt_prepare_clean(rdtgrp);
3846 goto out_unlock;
3847 }
3848
3849 kernfs_activate(rdtgrp->kn);
3850
3851 /*
3852 * Add the rdtgrp to the list of rdtgrps the parent
3853 * ctrl_mon group has to track.
3854 */
3855 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
3856
3857 out_unlock:
3858 rdtgroup_kn_unlock(parent_kn);
3859 return ret;
3860 }
3861
3862 /*
3863 * These are rdtgroups created under the root directory. Can be used
3864 * to allocate and monitor resources.
3865 */
rdtgroup_mkdir_ctrl_mon(struct kernfs_node * parent_kn,const char * name,umode_t mode)3866 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
3867 const char *name, umode_t mode)
3868 {
3869 struct rdtgroup *rdtgrp;
3870 struct kernfs_node *kn;
3871 u32 closid;
3872 int ret;
3873
3874 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
3875 if (ret)
3876 return ret;
3877
3878 kn = rdtgrp->kn;
3879 ret = closid_alloc();
3880 if (ret < 0) {
3881 rdt_last_cmd_puts("Out of CLOSIDs\n");
3882 goto out_common_fail;
3883 }
3884 closid = ret;
3885 ret = 0;
3886
3887 rdtgrp->closid = closid;
3888
3889 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
3890 if (ret)
3891 goto out_closid_free;
3892
3893 kernfs_activate(rdtgrp->kn);
3894
3895 ret = rdtgroup_init_alloc(rdtgrp);
3896 if (ret < 0)
3897 goto out_rmid_free;
3898
3899 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
3900
3901 if (resctrl_arch_mon_capable()) {
3902 /*
3903 * Create an empty mon_groups directory to hold the subset
3904 * of tasks and cpus to monitor.
3905 */
3906 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
3907 if (ret) {
3908 rdt_last_cmd_puts("kernfs subdir error\n");
3909 goto out_del_list;
3910 }
3911 if (is_mba_sc(NULL))
3912 rdtgrp->mba_mbps_event = mba_mbps_default_event;
3913 }
3914
3915 goto out_unlock;
3916
3917 out_del_list:
3918 list_del(&rdtgrp->rdtgroup_list);
3919 out_rmid_free:
3920 mkdir_rdt_prepare_rmid_free(rdtgrp);
3921 out_closid_free:
3922 closid_free(closid);
3923 out_common_fail:
3924 mkdir_rdt_prepare_clean(rdtgrp);
3925 out_unlock:
3926 rdtgroup_kn_unlock(parent_kn);
3927 return ret;
3928 }
3929
rdtgroup_mkdir(struct kernfs_node * parent_kn,const char * name,umode_t mode)3930 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
3931 umode_t mode)
3932 {
3933 /* Do not accept '\n' to avoid unparsable situation. */
3934 if (strchr(name, '\n'))
3935 return -EINVAL;
3936
3937 /*
3938 * If the parent directory is the root directory and RDT
3939 * allocation is supported, add a control and monitoring
3940 * subdirectory
3941 */
3942 if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
3943 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3944
3945 /* Else, attempt to add a monitoring subdirectory. */
3946 if (resctrl_arch_mon_capable())
3947 return rdtgroup_mkdir_mon(parent_kn, name, mode);
3948
3949 return -EPERM;
3950 }
3951
rdtgroup_rmdir_mon(struct rdtgroup * rdtgrp,cpumask_var_t tmpmask)3952 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3953 {
3954 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3955 u32 closid, rmid;
3956 int cpu;
3957
3958 /* Give any tasks back to the parent group */
3959 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3960
3961 /*
3962 * Update per cpu closid/rmid of the moved CPUs first.
3963 * Note: the closid will not change, but the arch code still needs it.
3964 */
3965 closid = prdtgrp->closid;
3966 rmid = prdtgrp->mon.rmid;
3967 for_each_cpu(cpu, &rdtgrp->cpu_mask)
3968 resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
3969
3970 /*
3971 * Update the MSR on moved CPUs and CPUs which have moved
3972 * task running on them.
3973 */
3974 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3975 update_closid_rmid(tmpmask, NULL);
3976
3977 rdtgrp->flags = RDT_DELETED;
3978
3979 rdtgroup_unassign_cntrs(rdtgrp);
3980
3981 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
3982
3983 /*
3984 * Remove the rdtgrp from the parent ctrl_mon group's list
3985 */
3986 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3987 list_del(&rdtgrp->mon.crdtgrp_list);
3988
3989 kernfs_remove(rdtgrp->kn);
3990
3991 return 0;
3992 }
3993
rdtgroup_ctrl_remove(struct rdtgroup * rdtgrp)3994 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
3995 {
3996 rdtgrp->flags = RDT_DELETED;
3997 list_del(&rdtgrp->rdtgroup_list);
3998
3999 kernfs_remove(rdtgrp->kn);
4000 return 0;
4001 }
4002
rdtgroup_rmdir_ctrl(struct rdtgroup * rdtgrp,cpumask_var_t tmpmask)4003 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
4004 {
4005 u32 closid, rmid;
4006 int cpu;
4007
4008 /* Give any tasks back to the default group */
4009 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
4010
4011 /* Give any CPUs back to the default group */
4012 cpumask_or(&rdtgroup_default.cpu_mask,
4013 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
4014
4015 /* Update per cpu closid and rmid of the moved CPUs first */
4016 closid = rdtgroup_default.closid;
4017 rmid = rdtgroup_default.mon.rmid;
4018 for_each_cpu(cpu, &rdtgrp->cpu_mask)
4019 resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
4020
4021 /*
4022 * Update the MSR on moved CPUs and CPUs which have moved
4023 * task running on them.
4024 */
4025 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
4026 update_closid_rmid(tmpmask, NULL);
4027
4028 rdtgroup_unassign_cntrs(rdtgrp);
4029
4030 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
4031 closid_free(rdtgrp->closid);
4032
4033 rdtgroup_ctrl_remove(rdtgrp);
4034
4035 /*
4036 * Free all the child monitor group rmids.
4037 */
4038 free_all_child_rdtgrp(rdtgrp);
4039
4040 return 0;
4041 }
4042
rdt_kn_parent(struct kernfs_node * kn)4043 static struct kernfs_node *rdt_kn_parent(struct kernfs_node *kn)
4044 {
4045 /*
4046 * Valid within the RCU section it was obtained or while rdtgroup_mutex
4047 * is held.
4048 */
4049 return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex));
4050 }
4051
rdtgroup_rmdir(struct kernfs_node * kn)4052 static int rdtgroup_rmdir(struct kernfs_node *kn)
4053 {
4054 struct kernfs_node *parent_kn;
4055 struct rdtgroup *rdtgrp;
4056 cpumask_var_t tmpmask;
4057 int ret = 0;
4058
4059 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
4060 return -ENOMEM;
4061
4062 rdtgrp = rdtgroup_kn_lock_live(kn);
4063 if (!rdtgrp) {
4064 ret = -EPERM;
4065 goto out;
4066 }
4067 parent_kn = rdt_kn_parent(kn);
4068
4069 /*
4070 * If the rdtgroup is a ctrl_mon group and parent directory
4071 * is the root directory, remove the ctrl_mon group.
4072 *
4073 * If the rdtgroup is a mon group and parent directory
4074 * is a valid "mon_groups" directory, remove the mon group.
4075 */
4076 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
4077 rdtgrp != &rdtgroup_default) {
4078 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
4079 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
4080 ret = rdtgroup_ctrl_remove(rdtgrp);
4081 } else {
4082 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
4083 }
4084 } else if (rdtgrp->type == RDTMON_GROUP &&
4085 is_mon_groups(parent_kn, rdt_kn_name(kn))) {
4086 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
4087 } else {
4088 ret = -EPERM;
4089 }
4090
4091 out:
4092 rdtgroup_kn_unlock(kn);
4093 free_cpumask_var(tmpmask);
4094 return ret;
4095 }
4096
4097 /**
4098 * mongrp_reparent() - replace parent CTRL_MON group of a MON group
4099 * @rdtgrp: the MON group whose parent should be replaced
4100 * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp
4101 * @cpus: cpumask provided by the caller for use during this call
4102 *
4103 * Replaces the parent CTRL_MON group for a MON group, resulting in all member
4104 * tasks' CLOSID immediately changing to that of the new parent group.
4105 * Monitoring data for the group is unaffected by this operation.
4106 */
mongrp_reparent(struct rdtgroup * rdtgrp,struct rdtgroup * new_prdtgrp,cpumask_var_t cpus)4107 static void mongrp_reparent(struct rdtgroup *rdtgrp,
4108 struct rdtgroup *new_prdtgrp,
4109 cpumask_var_t cpus)
4110 {
4111 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
4112
4113 WARN_ON(rdtgrp->type != RDTMON_GROUP);
4114 WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP);
4115
4116 /* Nothing to do when simply renaming a MON group. */
4117 if (prdtgrp == new_prdtgrp)
4118 return;
4119
4120 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
4121 list_move_tail(&rdtgrp->mon.crdtgrp_list,
4122 &new_prdtgrp->mon.crdtgrp_list);
4123
4124 rdtgrp->mon.parent = new_prdtgrp;
4125 rdtgrp->closid = new_prdtgrp->closid;
4126
4127 /* Propagate updated closid to all tasks in this group. */
4128 rdt_move_group_tasks(rdtgrp, rdtgrp, cpus);
4129
4130 update_closid_rmid(cpus, NULL);
4131 }
4132
rdtgroup_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name)4133 static int rdtgroup_rename(struct kernfs_node *kn,
4134 struct kernfs_node *new_parent, const char *new_name)
4135 {
4136 struct kernfs_node *kn_parent;
4137 struct rdtgroup *new_prdtgrp;
4138 struct rdtgroup *rdtgrp;
4139 cpumask_var_t tmpmask;
4140 int ret;
4141
4142 rdtgrp = kernfs_to_rdtgroup(kn);
4143 new_prdtgrp = kernfs_to_rdtgroup(new_parent);
4144 if (!rdtgrp || !new_prdtgrp)
4145 return -ENOENT;
4146
4147 /* Release both kernfs active_refs before obtaining rdtgroup mutex. */
4148 rdtgroup_kn_get(rdtgrp, kn);
4149 rdtgroup_kn_get(new_prdtgrp, new_parent);
4150
4151 mutex_lock(&rdtgroup_mutex);
4152
4153 rdt_last_cmd_clear();
4154
4155 /*
4156 * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if
4157 * either kernfs_node is a file.
4158 */
4159 if (kernfs_type(kn) != KERNFS_DIR ||
4160 kernfs_type(new_parent) != KERNFS_DIR) {
4161 rdt_last_cmd_puts("Source and destination must be directories");
4162 ret = -EPERM;
4163 goto out;
4164 }
4165
4166 if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) {
4167 ret = -ENOENT;
4168 goto out;
4169 }
4170
4171 kn_parent = rdt_kn_parent(kn);
4172 if (rdtgrp->type != RDTMON_GROUP || !kn_parent ||
4173 !is_mon_groups(kn_parent, rdt_kn_name(kn))) {
4174 rdt_last_cmd_puts("Source must be a MON group\n");
4175 ret = -EPERM;
4176 goto out;
4177 }
4178
4179 if (!is_mon_groups(new_parent, new_name)) {
4180 rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n");
4181 ret = -EPERM;
4182 goto out;
4183 }
4184
4185 /*
4186 * If the MON group is monitoring CPUs, the CPUs must be assigned to the
4187 * current parent CTRL_MON group and therefore cannot be assigned to
4188 * the new parent, making the move illegal.
4189 */
4190 if (!cpumask_empty(&rdtgrp->cpu_mask) &&
4191 rdtgrp->mon.parent != new_prdtgrp) {
4192 rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n");
4193 ret = -EPERM;
4194 goto out;
4195 }
4196
4197 /*
4198 * Allocate the cpumask for use in mongrp_reparent() to avoid the
4199 * possibility of failing to allocate it after kernfs_rename() has
4200 * succeeded.
4201 */
4202 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) {
4203 ret = -ENOMEM;
4204 goto out;
4205 }
4206
4207 /*
4208 * Perform all input validation and allocations needed to ensure
4209 * mongrp_reparent() will succeed before calling kernfs_rename(),
4210 * otherwise it would be necessary to revert this call if
4211 * mongrp_reparent() failed.
4212 */
4213 ret = kernfs_rename(kn, new_parent, new_name);
4214 if (!ret)
4215 mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask);
4216
4217 free_cpumask_var(tmpmask);
4218
4219 out:
4220 mutex_unlock(&rdtgroup_mutex);
4221 rdtgroup_kn_put(rdtgrp, kn);
4222 rdtgroup_kn_put(new_prdtgrp, new_parent);
4223 return ret;
4224 }
4225
rdtgroup_show_options(struct seq_file * seq,struct kernfs_root * kf)4226 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
4227 {
4228 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
4229 seq_puts(seq, ",cdp");
4230
4231 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
4232 seq_puts(seq, ",cdpl2");
4233
4234 if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA)))
4235 seq_puts(seq, ",mba_MBps");
4236
4237 if (resctrl_debug)
4238 seq_puts(seq, ",debug");
4239
4240 return 0;
4241 }
4242
4243 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
4244 .mkdir = rdtgroup_mkdir,
4245 .rmdir = rdtgroup_rmdir,
4246 .rename = rdtgroup_rename,
4247 .show_options = rdtgroup_show_options,
4248 };
4249
rdtgroup_setup_root(struct rdt_fs_context * ctx)4250 static int rdtgroup_setup_root(struct rdt_fs_context *ctx)
4251 {
4252 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
4253 KERNFS_ROOT_CREATE_DEACTIVATED |
4254 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
4255 &rdtgroup_default);
4256 if (IS_ERR(rdt_root))
4257 return PTR_ERR(rdt_root);
4258
4259 ctx->kfc.root = rdt_root;
4260 rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
4261
4262 return 0;
4263 }
4264
rdtgroup_destroy_root(void)4265 static void rdtgroup_destroy_root(void)
4266 {
4267 lockdep_assert_held(&rdtgroup_mutex);
4268
4269 kernfs_destroy_root(rdt_root);
4270 rdtgroup_default.kn = NULL;
4271 }
4272
rdtgroup_setup_default(void)4273 static void rdtgroup_setup_default(void)
4274 {
4275 mutex_lock(&rdtgroup_mutex);
4276
4277 rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID;
4278 rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID;
4279 rdtgroup_default.type = RDTCTRL_GROUP;
4280 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
4281
4282 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
4283
4284 mutex_unlock(&rdtgroup_mutex);
4285 }
4286
domain_destroy_l3_mon_state(struct rdt_l3_mon_domain * d)4287 static void domain_destroy_l3_mon_state(struct rdt_l3_mon_domain *d)
4288 {
4289 int idx;
4290
4291 kfree(d->cntr_cfg);
4292 bitmap_free(d->rmid_busy_llc);
4293 for_each_mbm_idx(idx) {
4294 kfree(d->mbm_states[idx]);
4295 d->mbm_states[idx] = NULL;
4296 }
4297 }
4298
resctrl_offline_ctrl_domain(struct rdt_resource * r,struct rdt_ctrl_domain * d)4299 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
4300 {
4301 mutex_lock(&rdtgroup_mutex);
4302
4303 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
4304 mba_sc_domain_destroy(r, d);
4305
4306 mutex_unlock(&rdtgroup_mutex);
4307 }
4308
resctrl_offline_mon_domain(struct rdt_resource * r,struct rdt_domain_hdr * hdr)4309 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr)
4310 {
4311 struct rdt_l3_mon_domain *d;
4312
4313 mutex_lock(&rdtgroup_mutex);
4314
4315 /*
4316 * If resctrl is mounted, remove all the
4317 * per domain monitor data directories.
4318 */
4319 if (resctrl_mounted && resctrl_arch_mon_capable())
4320 rmdir_mondata_subdir_allrdtgrp(r, hdr);
4321
4322 if (r->rid != RDT_RESOURCE_L3)
4323 goto out_unlock;
4324
4325 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
4326 goto out_unlock;
4327
4328 d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
4329 if (resctrl_is_mbm_enabled())
4330 cancel_delayed_work(&d->mbm_over);
4331 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) && has_busy_rmid(d)) {
4332 /*
4333 * When a package is going down, forcefully
4334 * decrement rmid->ebusy. There is no way to know
4335 * that the L3 was flushed and hence may lead to
4336 * incorrect counts in rare scenarios, but leaving
4337 * the RMID as busy creates RMID leaks if the
4338 * package never comes back.
4339 */
4340 __check_limbo(d, true);
4341 cancel_delayed_work(&d->cqm_limbo);
4342 }
4343
4344 domain_destroy_l3_mon_state(d);
4345 out_unlock:
4346 mutex_unlock(&rdtgroup_mutex);
4347 }
4348
4349 /**
4350 * domain_setup_l3_mon_state() - Initialise domain monitoring structures.
4351 * @r: The resource for the newly online domain.
4352 * @d: The newly online domain.
4353 *
4354 * Allocate monitor resources that belong to this domain.
4355 * Called when the first CPU of a domain comes online, regardless of whether
4356 * the filesystem is mounted.
4357 * During boot this may be called before global allocations have been made by
4358 * resctrl_l3_mon_resource_init().
4359 *
4360 * Called during CPU online that may run as soon as CPU online callbacks
4361 * are set up during resctrl initialization. The number of supported RMIDs
4362 * may be reduced if additional mon_capable resources are enumerated
4363 * at mount time. This means the rdt_l3_mon_domain::mbm_states[] and
4364 * rdt_l3_mon_domain::rmid_busy_llc allocations may be larger than needed.
4365 *
4366 * Return: 0 for success, or -ENOMEM.
4367 */
domain_setup_l3_mon_state(struct rdt_resource * r,struct rdt_l3_mon_domain * d)4368 static int domain_setup_l3_mon_state(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
4369 {
4370 u32 idx_limit = resctrl_arch_system_num_rmid_idx();
4371 size_t tsize = sizeof(*d->mbm_states[0]);
4372 enum resctrl_event_id eventid;
4373 int idx;
4374
4375 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) {
4376 d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL);
4377 if (!d->rmid_busy_llc)
4378 return -ENOMEM;
4379 }
4380
4381 for_each_mbm_event_id(eventid) {
4382 if (!resctrl_is_mon_event_enabled(eventid))
4383 continue;
4384 idx = MBM_STATE_IDX(eventid);
4385 d->mbm_states[idx] = kcalloc(idx_limit, tsize, GFP_KERNEL);
4386 if (!d->mbm_states[idx])
4387 goto cleanup;
4388 }
4389
4390 if (resctrl_is_mbm_enabled() && r->mon.mbm_cntr_assignable) {
4391 tsize = sizeof(*d->cntr_cfg);
4392 d->cntr_cfg = kcalloc(r->mon.num_mbm_cntrs, tsize, GFP_KERNEL);
4393 if (!d->cntr_cfg)
4394 goto cleanup;
4395 }
4396
4397 return 0;
4398 cleanup:
4399 bitmap_free(d->rmid_busy_llc);
4400 for_each_mbm_idx(idx) {
4401 kfree(d->mbm_states[idx]);
4402 d->mbm_states[idx] = NULL;
4403 }
4404
4405 return -ENOMEM;
4406 }
4407
resctrl_online_ctrl_domain(struct rdt_resource * r,struct rdt_ctrl_domain * d)4408 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
4409 {
4410 int err = 0;
4411
4412 mutex_lock(&rdtgroup_mutex);
4413
4414 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) {
4415 /* RDT_RESOURCE_MBA is never mon_capable */
4416 err = mba_sc_domain_allocate(r, d);
4417 }
4418
4419 mutex_unlock(&rdtgroup_mutex);
4420
4421 return err;
4422 }
4423
resctrl_online_mon_domain(struct rdt_resource * r,struct rdt_domain_hdr * hdr)4424 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr)
4425 {
4426 struct rdt_l3_mon_domain *d;
4427 int err = -EINVAL;
4428
4429 mutex_lock(&rdtgroup_mutex);
4430
4431 if (r->rid != RDT_RESOURCE_L3)
4432 goto mkdir;
4433
4434 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
4435 goto out_unlock;
4436
4437 d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
4438 err = domain_setup_l3_mon_state(r, d);
4439 if (err)
4440 goto out_unlock;
4441
4442 if (resctrl_is_mbm_enabled()) {
4443 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
4444 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL,
4445 RESCTRL_PICK_ANY_CPU);
4446 }
4447
4448 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID))
4449 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
4450
4451 mkdir:
4452 err = 0;
4453 /*
4454 * If the filesystem is not mounted then only the default resource group
4455 * exists. Creation of its directories is deferred until mount time
4456 * by rdt_get_tree() calling mkdir_mondata_all().
4457 * If resctrl is mounted, add per domain monitor data directories.
4458 */
4459 if (resctrl_mounted && resctrl_arch_mon_capable())
4460 mkdir_mondata_subdir_allrdtgrp(r, hdr);
4461
4462 out_unlock:
4463 mutex_unlock(&rdtgroup_mutex);
4464
4465 return err;
4466 }
4467
resctrl_online_cpu(unsigned int cpu)4468 void resctrl_online_cpu(unsigned int cpu)
4469 {
4470 mutex_lock(&rdtgroup_mutex);
4471 /* The CPU is set in default rdtgroup after online. */
4472 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
4473 mutex_unlock(&rdtgroup_mutex);
4474 }
4475
clear_childcpus(struct rdtgroup * r,unsigned int cpu)4476 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
4477 {
4478 struct rdtgroup *cr;
4479
4480 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
4481 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
4482 break;
4483 }
4484 }
4485
get_mon_domain_from_cpu(int cpu,struct rdt_resource * r)4486 static struct rdt_l3_mon_domain *get_mon_domain_from_cpu(int cpu,
4487 struct rdt_resource *r)
4488 {
4489 struct rdt_l3_mon_domain *d;
4490
4491 lockdep_assert_cpus_held();
4492
4493 list_for_each_entry(d, &r->mon_domains, hdr.list) {
4494 /* Find the domain that contains this CPU */
4495 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
4496 return d;
4497 }
4498
4499 return NULL;
4500 }
4501
resctrl_offline_cpu(unsigned int cpu)4502 void resctrl_offline_cpu(unsigned int cpu)
4503 {
4504 struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3);
4505 struct rdt_l3_mon_domain *d;
4506 struct rdtgroup *rdtgrp;
4507
4508 mutex_lock(&rdtgroup_mutex);
4509 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
4510 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
4511 clear_childcpus(rdtgrp, cpu);
4512 break;
4513 }
4514 }
4515
4516 if (!l3->mon_capable)
4517 goto out_unlock;
4518
4519 d = get_mon_domain_from_cpu(cpu, l3);
4520 if (d) {
4521 if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) {
4522 cancel_delayed_work(&d->mbm_over);
4523 mbm_setup_overflow_handler(d, 0, cpu);
4524 }
4525 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) &&
4526 cpu == d->cqm_work_cpu && has_busy_rmid(d)) {
4527 cancel_delayed_work(&d->cqm_limbo);
4528 cqm_setup_limbo_handler(d, 0, cpu);
4529 }
4530 }
4531
4532 out_unlock:
4533 mutex_unlock(&rdtgroup_mutex);
4534 }
4535
4536 /*
4537 * resctrl_init - resctrl filesystem initialization
4538 *
4539 * Setup resctrl file system including set up root, create mount point,
4540 * register resctrl filesystem, and initialize files under root directory.
4541 *
4542 * Return: 0 on success or -errno
4543 */
resctrl_init(void)4544 int resctrl_init(void)
4545 {
4546 int ret = 0;
4547
4548 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
4549 sizeof(last_cmd_status_buf));
4550
4551 rdtgroup_setup_default();
4552
4553 thread_throttle_mode_init();
4554
4555 io_alloc_init();
4556
4557 ret = resctrl_l3_mon_resource_init();
4558 if (ret)
4559 return ret;
4560
4561 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
4562 if (ret) {
4563 resctrl_l3_mon_resource_exit();
4564 return ret;
4565 }
4566
4567 ret = register_filesystem(&rdt_fs_type);
4568 if (ret)
4569 goto cleanup_mountpoint;
4570
4571 /*
4572 * Adding the resctrl debugfs directory here may not be ideal since
4573 * it would let the resctrl debugfs directory appear on the debugfs
4574 * filesystem before the resctrl filesystem is mounted.
4575 * It may also be ok since that would enable debugging of RDT before
4576 * resctrl is mounted.
4577 * The reason why the debugfs directory is created here and not in
4578 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
4579 * during the debugfs directory creation also &sb->s_type->i_mutex_key
4580 * (the lockdep class of inode->i_rwsem). Other filesystem
4581 * interactions (eg. SyS_getdents) have the lock ordering:
4582 * &sb->s_type->i_mutex_key --> &mm->mmap_lock
4583 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
4584 * is taken, thus creating dependency:
4585 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
4586 * issues considering the other two lock dependencies.
4587 * By creating the debugfs directory here we avoid a dependency
4588 * that may cause deadlock (even though file operations cannot
4589 * occur until the filesystem is mounted, but I do not know how to
4590 * tell lockdep that).
4591 */
4592 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
4593
4594 return 0;
4595
4596 cleanup_mountpoint:
4597 sysfs_remove_mount_point(fs_kobj, "resctrl");
4598 resctrl_l3_mon_resource_exit();
4599
4600 return ret;
4601 }
4602
resctrl_online_domains_exist(void)4603 static bool resctrl_online_domains_exist(void)
4604 {
4605 struct rdt_resource *r;
4606
4607 /*
4608 * Only walk capable resources to allow resctrl_arch_get_resource()
4609 * to return dummy 'not capable' resources.
4610 */
4611 for_each_alloc_capable_rdt_resource(r) {
4612 if (!list_empty(&r->ctrl_domains))
4613 return true;
4614 }
4615
4616 for_each_mon_capable_rdt_resource(r) {
4617 if (!list_empty(&r->mon_domains))
4618 return true;
4619 }
4620
4621 return false;
4622 }
4623
4624 /**
4625 * resctrl_exit() - Remove the resctrl filesystem and free resources.
4626 *
4627 * Called by the architecture code in response to a fatal error.
4628 * Removes resctrl files and structures from kernfs to prevent further
4629 * configuration.
4630 *
4631 * When called by the architecture code, all CPUs and resctrl domains must be
4632 * offline. This ensures the limbo and overflow handlers are not scheduled to
4633 * run, meaning the data structures they access can be freed by
4634 * resctrl_l3_mon_resource_exit().
4635 *
4636 * After resctrl_exit() returns, the architecture code should return an
4637 * error from all resctrl_arch_ functions that can do this.
4638 * resctrl_arch_get_resource() must continue to return struct rdt_resources
4639 * with the correct rid field to ensure the filesystem can be unmounted.
4640 */
resctrl_exit(void)4641 void resctrl_exit(void)
4642 {
4643 cpus_read_lock();
4644 WARN_ON_ONCE(resctrl_online_domains_exist());
4645
4646 mutex_lock(&rdtgroup_mutex);
4647 resctrl_fs_teardown();
4648 mutex_unlock(&rdtgroup_mutex);
4649
4650 cpus_read_unlock();
4651
4652 debugfs_remove_recursive(debugfs_resctrl);
4653 debugfs_resctrl = NULL;
4654 unregister_filesystem(&rdt_fs_type);
4655
4656 /*
4657 * Do not remove the sysfs mount point added by resctrl_init() so that
4658 * it can be used to umount resctrl.
4659 */
4660
4661 resctrl_l3_mon_resource_exit();
4662 free_rmid_lru_list();
4663 }
4664