Lines Matching +full:s +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
95 void rdt_last_cmd_puts(const char *s) in rdt_last_cmd_puts() argument
98 seq_buf_puts(&last_cmd_status, s); in rdt_last_cmd_puts()
119 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) in rdt_staged_configs_clear()
120 memset(dom->staged_config, 0, sizeof(dom->staged_config)); in rdt_staged_configs_clear()
136 * + We can simply set current's closid to assign a task to a resource
140 * - We give up some options in configuring resource groups across multi-socket
142 * - Our choices on how to configure each resource become progressively more
156 struct resctrl_schema *s; in closid_init() local
164 list_for_each_entry(s, &resctrl_schema_all, list) in closid_init()
165 rdt_min_closid = min(rdt_min_closid, s->num_closid); in closid_init()
169 return -ENOMEM; in closid_init()
201 return -ENOSPC; in closid_alloc()
216 * closid_allocated - test if provided closid is in use
230 * rdtgroup_mode_by_closid - Return mode of resource group with closid
233 * Each resource group is associated with a @closid. Here the mode
236 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
243 if (rdtgrp->closid == closid) in rdtgroup_mode_by_closid()
244 return rdtgrp->mode; in rdtgroup_mode_by_closid()
253 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
254 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
258 * rdtgroup_mode_str - Return the string representation of mode
259 * @mode: the resource group mode as &enum rdtgroup_mode
261 * Return: string representation of valid mode, "unknown" otherwise
263 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) in rdtgroup_mode_str() argument
265 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) in rdtgroup_mode_str()
268 return rdt_mode_str[mode]; in rdtgroup_mode_str()
290 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, in rdtgroup_add_file()
292 0, rft->kf_ops, rft, NULL, NULL); in rdtgroup_add_file()
307 struct kernfs_open_file *of = m->private; in rdtgroup_seqfile_show()
308 struct rftype *rft = of->kn->priv; in rdtgroup_seqfile_show()
310 if (rft->seq_show) in rdtgroup_seqfile_show()
311 return rft->seq_show(of, m, arg); in rdtgroup_seqfile_show()
318 struct rftype *rft = of->kn->priv; in rdtgroup_file_write()
320 if (rft->write) in rdtgroup_file_write()
321 return rft->write(of, buf, nbytes, off); in rdtgroup_file_write()
323 return -EINVAL; in rdtgroup_file_write()
339 struct rftype *rft = of->kn->priv; in is_cpu_list()
341 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; in is_cpu_list()
345 struct seq_file *s, void *v) in rdtgroup_cpus_show() argument
351 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_cpus_show()
354 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { in rdtgroup_cpus_show()
355 if (!rdtgrp->plr->d) { in rdtgroup_cpus_show()
358 ret = -ENODEV; in rdtgroup_cpus_show()
360 mask = &rdtgrp->plr->d->hdr.cpu_mask; in rdtgroup_cpus_show()
361 seq_printf(s, is_cpu_list(of) ? in rdtgroup_cpus_show()
366 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", in rdtgroup_cpus_show()
367 cpumask_pr_args(&rdtgrp->cpu_mask)); in rdtgroup_cpus_show()
370 ret = -ENOENT; in rdtgroup_cpus_show()
372 rdtgroup_kn_unlock(of->kn); in rdtgroup_cpus_show()
389 defaults.closid = r->closid; in update_closid_rmid()
390 defaults.rmid = r->mon.rmid; in update_closid_rmid()
400 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; in cpus_mon_write()
404 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); in cpus_mon_write()
407 return -EINVAL; in cpus_mon_write()
411 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); in cpus_mon_write()
414 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); in cpus_mon_write()
420 * and update per-cpu rmid in cpus_mon_write()
422 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); in cpus_mon_write()
424 head = &prgrp->mon.crdtgrp_list; in cpus_mon_write()
428 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, in cpus_mon_write()
434 /* Done pushing/pulling - update this group with new mask */ in cpus_mon_write()
435 cpumask_copy(&rdtgrp->cpu_mask, newmask); in cpus_mon_write()
444 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); in cpumask_rdtgrp_clear()
446 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) in cpumask_rdtgrp_clear()
447 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); in cpumask_rdtgrp_clear()
457 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); in cpus_ctrl_write()
462 return -EINVAL; in cpus_ctrl_write()
473 * the prev group's child groups that owned them in cpus_ctrl_write()
474 * and update per-cpu closid/rmid. in cpus_ctrl_write()
476 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); in cpus_ctrl_write()
481 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); in cpus_ctrl_write()
488 /* Done pushing/pulling - update this group with new mask */ in cpus_ctrl_write()
489 cpumask_copy(&rdtgrp->cpu_mask, newmask); in cpus_ctrl_write()
495 head = &rdtgrp->mon.crdtgrp_list; in cpus_ctrl_write()
497 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); in cpus_ctrl_write()
499 cpumask_clear(&crgrp->cpu_mask); in cpus_ctrl_write()
513 return -EINVAL; in rdtgroup_cpus_write()
516 return -ENOMEM; in rdtgroup_cpus_write()
519 return -ENOMEM; in rdtgroup_cpus_write()
524 return -ENOMEM; in rdtgroup_cpus_write()
527 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_cpus_write()
529 ret = -ENOENT; in rdtgroup_cpus_write()
535 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || in rdtgroup_cpus_write()
536 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_cpus_write()
537 ret = -EINVAL; in rdtgroup_cpus_write()
538 rdt_last_cmd_puts("Pseudo-locking in progress\n"); in rdtgroup_cpus_write()
555 ret = -EINVAL; in rdtgroup_cpus_write()
560 if (rdtgrp->type == RDTCTRL_GROUP) in rdtgroup_cpus_write()
562 else if (rdtgrp->type == RDTMON_GROUP) in rdtgroup_cpus_write()
565 ret = -EINVAL; in rdtgroup_cpus_write()
568 rdtgroup_kn_unlock(of->kn); in rdtgroup_cpus_write()
577 * rdtgroup_remove - the helper to remove resource group safely
590 kernfs_put(rdtgrp->kn); in rdtgroup_remove()
614 u32 closid, rmid = rdtgrp->mon.rmid; in task_in_rdtgroup()
616 if (rdtgrp->type == RDTCTRL_GROUP) in task_in_rdtgroup()
617 closid = rdtgrp->closid; in task_in_rdtgroup()
618 else if (rdtgrp->type == RDTMON_GROUP) in task_in_rdtgroup()
619 closid = rdtgrp->mon.parent->closid; in task_in_rdtgroup()
635 * Set the task's closid/rmid before the PQR_ASSOC MSR can be in __rdtgroup_move_task()
642 if (rdtgrp->type == RDTMON_GROUP && in __rdtgroup_move_task()
643 !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { in __rdtgroup_move_task()
645 return -EINVAL; in __rdtgroup_move_task()
648 if (rdtgrp->type == RDTMON_GROUP) in __rdtgroup_move_task()
649 resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, in __rdtgroup_move_task()
650 rdtgrp->mon.rmid); in __rdtgroup_move_task()
652 resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, in __rdtgroup_move_task()
653 rdtgrp->mon.rmid); in __rdtgroup_move_task()
656 * Ensure the task's closid and rmid are written before determining if in __rdtgroup_move_task()
658 * This pairs with the full barrier between the rq->curr update and in __rdtgroup_move_task()
664 * By now, the task's closid and rmid are set. If the task is current in __rdtgroup_move_task()
676 return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && in is_closid_match()
677 resctrl_arch_match_closid(t, r->closid)); in is_closid_match()
682 return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && in is_rmid_match()
683 resctrl_arch_match_rmid(t, r->mon.parent->closid, in is_rmid_match()
684 r->mon.rmid)); in is_rmid_match()
688 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
723 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && in rdtgroup_task_write_permission()
724 !uid_eq(cred->euid, tcred->uid) && in rdtgroup_task_write_permission()
725 !uid_eq(cred->euid, tcred->suid)) { in rdtgroup_task_write_permission()
726 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); in rdtgroup_task_write_permission()
727 ret = -EPERM; in rdtgroup_task_write_permission()
746 return -ESRCH; in rdtgroup_move_task()
771 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_tasks_write()
773 rdtgroup_kn_unlock(of->kn); in rdtgroup_tasks_write()
774 return -ENOENT; in rdtgroup_tasks_write()
778 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || in rdtgroup_tasks_write()
779 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_tasks_write()
780 ret = -EINVAL; in rdtgroup_tasks_write()
781 rdt_last_cmd_puts("Pseudo-locking in progress\n"); in rdtgroup_tasks_write()
789 rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); in rdtgroup_tasks_write()
790 ret = -EINVAL; in rdtgroup_tasks_write()
796 ret = -EINVAL; in rdtgroup_tasks_write()
808 rdtgroup_kn_unlock(of->kn); in rdtgroup_tasks_write()
813 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) in show_rdt_tasks() argument
823 seq_printf(s, "%d\n", pid); in show_rdt_tasks()
830 struct seq_file *s, void *v) in rdtgroup_tasks_show() argument
835 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_tasks_show()
837 show_rdt_tasks(rdtgrp, s); in rdtgroup_tasks_show()
839 ret = -ENOENT; in rdtgroup_tasks_show()
840 rdtgroup_kn_unlock(of->kn); in rdtgroup_tasks_show()
846 struct seq_file *s, void *v) in rdtgroup_closid_show() argument
851 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_closid_show()
853 seq_printf(s, "%u\n", rdtgrp->closid); in rdtgroup_closid_show()
855 ret = -ENOENT; in rdtgroup_closid_show()
856 rdtgroup_kn_unlock(of->kn); in rdtgroup_closid_show()
862 struct seq_file *s, void *v) in rdtgroup_rmid_show() argument
867 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_rmid_show()
869 seq_printf(s, "%u\n", rdtgrp->mon.rmid); in rdtgroup_rmid_show()
871 ret = -ENOENT; in rdtgroup_rmid_show()
872 rdtgroup_kn_unlock(of->kn); in rdtgroup_rmid_show()
909 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, in proc_resctrl_show() argument
919 seq_puts(s, "res:\nmon:\n"); in proc_resctrl_show()
930 if (rdtg->mode != RDT_MODE_SHAREABLE && in proc_resctrl_show()
931 rdtg->mode != RDT_MODE_EXCLUSIVE) in proc_resctrl_show()
934 if (!resctrl_arch_match_closid(tsk, rdtg->closid)) in proc_resctrl_show()
937 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", in proc_resctrl_show()
938 rdt_kn_name(rdtg->kn)); in proc_resctrl_show()
939 seq_puts(s, "mon:"); in proc_resctrl_show()
940 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, in proc_resctrl_show()
942 if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, in proc_resctrl_show()
943 crg->mon.rmid)) in proc_resctrl_show()
945 seq_printf(s, "%s", rdt_kn_name(crg->kn)); in proc_resctrl_show()
948 seq_putc(s, '\n'); in proc_resctrl_show()
955 ret = -ENOENT; in proc_resctrl_show()
971 seq_printf(seq, "%.*s", len, last_cmd_status_buf); in rdt_last_cmd_status_show()
985 return rcu_dereference(kn->__parent)->priv; in rdt_kn_parent_priv()
991 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_num_closids_show() local
993 seq_printf(seq, "%u\n", s->num_closid); in rdt_num_closids_show()
1000 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_default_ctrl_show() local
1001 struct rdt_resource *r = s->res; in rdt_default_ctrl_show()
1010 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_min_cbm_bits_show() local
1011 struct rdt_resource *r = s->res; in rdt_min_cbm_bits_show()
1013 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); in rdt_min_cbm_bits_show()
1020 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_shareable_bits_show() local
1021 struct rdt_resource *r = s->res; in rdt_shareable_bits_show()
1023 seq_printf(seq, "%x\n", r->cache.shareable_bits); in rdt_shareable_bits_show()
1028 * rdt_bit_usage_show - Display current usage of resources
1034 * 0 - currently unused
1035 * X - currently available for sharing and used by software and hardware
1036 * H - currently used by hardware only but available for software use
1037 * S - currently used and shareable by software only
1038 * E - currently used exclusively by one resource group
1039 * P - currently pseudo-locked by one resource group
1044 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_bit_usage_show() local
1051 struct rdt_resource *r = s->res; in rdt_bit_usage_show()
1054 enum rdtgrp_mode mode; in rdt_bit_usage_show() local
1060 hw_shareable = r->cache.shareable_bits; in rdt_bit_usage_show()
1061 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { in rdt_bit_usage_show()
1066 seq_printf(seq, "%d=", dom->hdr.id); in rdt_bit_usage_show()
1071 s->conf_type); in rdt_bit_usage_show()
1072 mode = rdtgroup_mode_by_closid(i); in rdt_bit_usage_show()
1073 switch (mode) { in rdt_bit_usage_show()
1084 * associated with this CLOSID in this mode in rdt_bit_usage_show()
1092 "invalid mode for closid %d\n", i); in rdt_bit_usage_show()
1096 for (i = r->cache.cbm_len - 1; i >= 0; i--) { in rdt_bit_usage_show()
1097 pseudo_locked = dom->plr ? dom->plr->cbm : 0; in rdt_bit_usage_show()
1107 seq_putc(seq, 'S'); in rdt_bit_usage_show()
1126 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_min_bw_show() local
1127 struct rdt_resource *r = s->res; in rdt_min_bw_show()
1129 seq_printf(seq, "%u\n", r->membw.min_bw); in rdt_min_bw_show()
1136 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in rdt_num_rmids_show()
1138 seq_printf(seq, "%d\n", r->mon.num_rmid); in rdt_num_rmids_show()
1146 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in rdt_mon_features_show()
1150 if (mevt->rid != r->rid || !mevt->enabled) in rdt_mon_features_show()
1152 seq_printf(seq, "%s\n", mevt->name); in rdt_mon_features_show()
1153 if (mevt->configurable && in rdt_mon_features_show()
1155 seq_printf(seq, "%s_config\n", mevt->name); in rdt_mon_features_show()
1164 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_bw_gran_show() local
1165 struct rdt_resource *r = s->res; in rdt_bw_gran_show()
1167 seq_printf(seq, "%u\n", r->membw.bw_gran); in rdt_bw_gran_show()
1174 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_delay_linear_show() local
1175 struct rdt_resource *r = s->res; in rdt_delay_linear_show()
1177 seq_printf(seq, "%u\n", r->membw.delay_linear); in rdt_delay_linear_show()
1192 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_thread_throttle_mode_show() local
1193 struct rdt_resource *r = s->res; in rdt_thread_throttle_mode_show()
1195 switch (r->membw.throttle_mode) { in rdt_thread_throttle_mode_show()
1197 seq_puts(seq, "per-thread\n"); in rdt_thread_throttle_mode_show()
1223 return -EINVAL; in max_threshold_occ_write()
1231 * rdtgroup_mode_show - Display mode of this resource group
1234 struct seq_file *s, void *v) in rdtgroup_mode_show() argument
1238 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_mode_show()
1240 rdtgroup_kn_unlock(of->kn); in rdtgroup_mode_show()
1241 return -ENOENT; in rdtgroup_mode_show()
1244 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); in rdtgroup_mode_show()
1246 rdtgroup_kn_unlock(of->kn); in rdtgroup_mode_show()
1266 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); in rdt_has_sparse_bitmasks_show() local
1267 struct rdt_resource *r = s->res; in rdt_has_sparse_bitmasks_show()
1269 seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); in rdt_has_sparse_bitmasks_show()
1275 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1286 * resource groups in exclusive mode will be considered. If @exclusive
1299 enum rdtgrp_mode mode; in __rdtgroup_cbm_overlaps() local
1305 ctrl_b = r->cache.shareable_bits; in __rdtgroup_cbm_overlaps()
1306 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) in __rdtgroup_cbm_overlaps()
1313 mode = rdtgroup_mode_by_closid(i); in __rdtgroup_cbm_overlaps()
1315 mode != RDT_MODE_PSEUDO_LOCKSETUP) { in __rdtgroup_cbm_overlaps()
1316 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { in __rdtgroup_cbm_overlaps()
1318 if (mode == RDT_MODE_EXCLUSIVE) in __rdtgroup_cbm_overlaps()
1331 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1332 * @s: Schema for the resource to which domain instance @d belongs.
1341 * which the CBM is intended though - when dealing with CDP resources that
1350 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d, in rdtgroup_cbm_overlaps() argument
1353 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); in rdtgroup_cbm_overlaps()
1354 struct rdt_resource *r = s->res; in rdtgroup_cbm_overlaps()
1356 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, in rdtgroup_cbm_overlaps()
1360 if (!resctrl_arch_get_cdp_enabled(r->rid)) in rdtgroup_cbm_overlaps()
1366 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1380 int closid = rdtgrp->closid; in rdtgroup_mode_test_exclusive()
1382 struct resctrl_schema *s; in rdtgroup_mode_test_exclusive() local
1387 /* Walking r->domains, ensure it can't race with cpuhp */ in rdtgroup_mode_test_exclusive()
1390 list_for_each_entry(s, &resctrl_schema_all, list) { in rdtgroup_mode_test_exclusive()
1391 r = s->res; in rdtgroup_mode_test_exclusive()
1392 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) in rdtgroup_mode_test_exclusive()
1395 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in rdtgroup_mode_test_exclusive()
1397 s->conf_type); in rdtgroup_mode_test_exclusive()
1398 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { in rdtgroup_mode_test_exclusive()
1414 * rdtgroup_mode_write - Modify the resource group's mode
1420 enum rdtgrp_mode mode; in rdtgroup_mode_write() local
1424 if (nbytes == 0 || buf[nbytes - 1] != '\n') in rdtgroup_mode_write()
1425 return -EINVAL; in rdtgroup_mode_write()
1426 buf[nbytes - 1] = '\0'; in rdtgroup_mode_write()
1428 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_mode_write()
1430 rdtgroup_kn_unlock(of->kn); in rdtgroup_mode_write()
1431 return -ENOENT; in rdtgroup_mode_write()
1436 mode = rdtgrp->mode; in rdtgroup_mode_write()
1438 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || in rdtgroup_mode_write()
1439 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || in rdtgroup_mode_write()
1440 (!strcmp(buf, "pseudo-locksetup") && in rdtgroup_mode_write()
1441 mode == RDT_MODE_PSEUDO_LOCKSETUP) || in rdtgroup_mode_write()
1442 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) in rdtgroup_mode_write()
1445 if (mode == RDT_MODE_PSEUDO_LOCKED) { in rdtgroup_mode_write()
1446 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); in rdtgroup_mode_write()
1447 ret = -EINVAL; in rdtgroup_mode_write()
1452 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_mode_write()
1457 rdtgrp->mode = RDT_MODE_SHAREABLE; in rdtgroup_mode_write()
1460 ret = -EINVAL; in rdtgroup_mode_write()
1463 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_mode_write()
1468 rdtgrp->mode = RDT_MODE_EXCLUSIVE; in rdtgroup_mode_write()
1470 !strcmp(buf, "pseudo-locksetup")) { in rdtgroup_mode_write()
1474 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; in rdtgroup_mode_write()
1476 rdt_last_cmd_puts("Unknown or unsupported mode\n"); in rdtgroup_mode_write()
1477 ret = -EINVAL; in rdtgroup_mode_write()
1481 rdtgroup_kn_unlock(of->kn); in rdtgroup_mode_write()
1486 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1507 if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE)) in rdtgroup_cbm_to_size()
1510 num_b = bitmap_weight(&cbm, r->cache.cbm_len); in rdtgroup_cbm_to_size()
1511 ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope); in rdtgroup_cbm_to_size()
1513 size = ci->size / r->cache.cbm_len * num_b; in rdtgroup_cbm_to_size()
1527 if (r->rid != RDT_RESOURCE_MBA) in is_mba_sc()
1530 return r->membw.mba_sc; in is_mba_sc()
1534 * rdtgroup_size_show - Display size in bytes of allocated regions
1540 struct seq_file *s, void *v) in rdtgroup_size_show() argument
1553 rdtgrp = rdtgroup_kn_lock_live(of->kn); in rdtgroup_size_show()
1555 rdtgroup_kn_unlock(of->kn); in rdtgroup_size_show()
1556 return -ENOENT; in rdtgroup_size_show()
1559 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { in rdtgroup_size_show()
1560 if (!rdtgrp->plr->d) { in rdtgroup_size_show()
1563 ret = -ENODEV; in rdtgroup_size_show()
1565 seq_printf(s, "%*s:", max_name_width, in rdtgroup_size_show()
1566 rdtgrp->plr->s->name); in rdtgroup_size_show()
1567 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, in rdtgroup_size_show()
1568 rdtgrp->plr->d, in rdtgroup_size_show()
1569 rdtgrp->plr->cbm); in rdtgroup_size_show()
1570 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size); in rdtgroup_size_show()
1575 closid = rdtgrp->closid; in rdtgroup_size_show()
1578 r = schema->res; in rdtgroup_size_show()
1579 type = schema->conf_type; in rdtgroup_size_show()
1581 seq_printf(s, "%*s:", max_name_width, schema->name); in rdtgroup_size_show()
1582 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in rdtgroup_size_show()
1584 seq_putc(s, ';'); in rdtgroup_size_show()
1585 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_size_show()
1589 ctrl = d->mbps_val[closid]; in rdtgroup_size_show()
1594 if (r->rid == RDT_RESOURCE_MBA || in rdtgroup_size_show()
1595 r->rid == RDT_RESOURCE_SMBA) in rdtgroup_size_show()
1600 seq_printf(s, "%d=%u", d->hdr.id, size); in rdtgroup_size_show()
1603 seq_putc(s, '\n'); in rdtgroup_size_show()
1607 rdtgroup_kn_unlock(of->kn); in rdtgroup_size_show()
1614 smp_call_function_any(&mon_info->d->hdr.cpu_mask, in mondata_config_read()
1618 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) in mbm_config_show() argument
1627 list_for_each_entry(dom, &r->mon_domains, hdr.list) { in mbm_config_show()
1629 seq_puts(s, ";"); in mbm_config_show()
1637 seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config); in mbm_config_show()
1640 seq_puts(s, "\n"); in mbm_config_show()
1651 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in mbm_total_bytes_config_show()
1661 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in mbm_local_bytes_config_show()
1692 smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write, in mbm_config_write_domain()
1713 /* Walking r->domains, ensure it can't race with cpuhp */ in mon_config_write()
1725 rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); in mon_config_write()
1726 return -EINVAL; in mon_config_write()
1730 rdt_last_cmd_puts("Non-numeric event configuration value\n"); in mon_config_write()
1731 return -EINVAL; in mon_config_write()
1735 if ((val & r->mon.mbm_cfg_mask) != val) { in mon_config_write()
1737 r->mon.mbm_cfg_mask); in mon_config_write()
1738 return -EINVAL; in mon_config_write()
1741 list_for_each_entry(d, &r->mon_domains, hdr.list) { in mon_config_write()
1742 if (d->hdr.id == dom_id) { in mon_config_write()
1748 return -EINVAL; in mon_config_write()
1755 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in mbm_total_bytes_config_write()
1759 if (nbytes == 0 || buf[nbytes - 1] != '\n') in mbm_total_bytes_config_write()
1760 return -EINVAL; in mbm_total_bytes_config_write()
1767 buf[nbytes - 1] = '\0'; in mbm_total_bytes_config_write()
1781 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); in mbm_local_bytes_config_write()
1785 if (nbytes == 0 || buf[nbytes - 1] != '\n') in mbm_local_bytes_config_write()
1786 return -EINVAL; in mbm_local_bytes_config_write()
1793 buf[nbytes - 1] = '\0'; in mbm_local_bytes_config_write()
1804 * resctrl_bmec_files_show() — Controls the visibility of BMEC-related resctrl
1817 sprintf(name, "%s_MON", r->name); in resctrl_bmec_files_show()
1845 .mode = 0444,
1852 .mode = 0644,
1859 .mode = 0444,
1866 .mode = 0444,
1873 .mode = 0444,
1879 .mode = 0444,
1886 .mode = 0444,
1893 .mode = 0444,
1899 .mode = 0444,
1906 .mode = 0444,
1913 .mode = 0444,
1920 .mode = 0444,
1927 .mode = 0444,
1934 .mode = 0444,
1946 .mode = 0444,
1952 .mode = 0644,
1960 .mode = 0644,
1967 .mode = 0644,
1974 .mode = 0644,
1981 .mode = 0644,
1988 .mode = 0644,
1996 .mode = 0644,
2004 .mode = 0644,
2013 .mode = 0644,
2021 .mode = 0444,
2028 .mode = 0644,
2036 .mode = 0644,
2042 .name = "mode",
2043 .mode = 0644,
2051 .mode = 0444,
2058 .mode = 0444,
2065 .mode = 0444,
2086 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { in rdtgroup_add_files()
2095 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); in rdtgroup_add_files()
2096 while (--rft >= rfts) { in rdtgroup_add_files()
2097 if ((fflags & rft->fflags) == rft->fflags) in rdtgroup_add_files()
2098 kernfs_remove_by_name(kn, rft->name); in rdtgroup_add_files()
2112 if (!strcmp(rft->name, name)) in rdtgroup_get_rftype_by_name()
2125 if (r_mba->alloc_capable && in thread_throttle_mode_init()
2126 r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED) in thread_throttle_mode_init()
2127 throttle_mode = r_mba->membw.throttle_mode; in thread_throttle_mode_init()
2130 if (r_smba->alloc_capable && in thread_throttle_mode_init()
2131 r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED) in thread_throttle_mode_init()
2132 throttle_mode = r_smba->membw.throttle_mode; in thread_throttle_mode_init()
2147 rft->fflags = fflags; in resctrl_file_fflags_init()
2151 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
2159 * resctrl file has been locked down - that it is not relevant to the
2161 * on to protect from user access because after the file's permissions
2173 kn = kernfs_find_and_get_ns(r->kn, name, NULL); in rdtgroup_kn_mode_restrict()
2175 return -ENOENT; in rdtgroup_kn_mode_restrict()
2195 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
2217 if (!strcmp(rft->name, name)) in rdtgroup_kn_mode_restore()
2218 iattr.ia_mode = rft->mode & mask; in rdtgroup_kn_mode_restore()
2221 kn = kernfs_find_and_get_ns(r->kn, name, NULL); in rdtgroup_kn_mode_restore()
2223 return -ENOENT; in rdtgroup_kn_mode_restore()
2229 iattr.ia_mode |= parent->mode; in rdtgroup_kn_mode_restore()
2253 kn_subdir = kernfs_create_dir(l3_mon_kn, "event_configs", l3_mon_kn->mode, NULL); in resctrl_mkdir_event_configs()
2262 if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid)) in resctrl_mkdir_event_configs()
2265 kn_subdir2 = kernfs_create_dir(kn_subdir, mevt->name, kn_subdir->mode, mevt); in resctrl_mkdir_event_configs()
2292 kn_info->mode, priv); in rdtgroup_mkdir_info_resdir()
2306 if (r->mon.mbm_cntr_assignable) { in rdtgroup_mkdir_info_resdir()
2311 * Hide BMEC related files if mbm_event mode in rdtgroup_mkdir_info_resdir()
2326 switch (r->rid) { in fflags_from_resource()
2340 struct resctrl_schema *s; in rdtgroup_create_info_dir() local
2347 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); in rdtgroup_create_info_dir()
2356 list_for_each_entry(s, &resctrl_schema_all, list) { in rdtgroup_create_info_dir()
2357 r = s->res; in rdtgroup_create_info_dir()
2359 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); in rdtgroup_create_info_dir()
2366 sprintf(name, "%s_MON", r->name); in rdtgroup_create_info_dir()
2393 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); in mongroup_create_dir()
2415 return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; in is_mba_linear()
2421 int cpu = cpumask_any(&d->hdr.cpu_mask); in mba_sc_domain_allocate()
2424 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), in mba_sc_domain_allocate()
2426 if (!d->mbps_val) in mba_sc_domain_allocate()
2427 return -ENOMEM; in mba_sc_domain_allocate()
2430 d->mbps_val[i] = MBA_MAX_MBPS; in mba_sc_domain_allocate()
2438 kfree(d->mbps_val); in mba_sc_domain_destroy()
2439 d->mbps_val = NULL; in mba_sc_domain_destroy()
2454 r->alloc_capable && is_mba_linear() && in supports_mba_mbps()
2455 r->ctrl_scope == rmbm->mon_scope); in supports_mba_mbps()
2471 return -EINVAL; in set_mba_sc()
2473 r->membw.mba_sc = mba_sc; in set_mba_sc()
2477 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in set_mba_sc()
2479 d->mbps_val[i] = MBA_MAX_MBPS; in set_mba_sc()
2500 * All the resource directories use "kn->priv" in kernfs_to_rdtgroup()
2506 rcu_access_pointer(kn->__parent) == kn_info) in kernfs_to_rdtgroup()
2509 return kn->priv; in kernfs_to_rdtgroup()
2517 atomic_inc(&rdtgrp->waitcount); in rdtgroup_kn_get()
2523 if (atomic_dec_and_test(&rdtgrp->waitcount) && in rdtgroup_kn_put()
2524 (rdtgrp->flags & RDT_DELETED)) { in rdtgroup_kn_put()
2525 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || in rdtgroup_kn_put()
2526 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) in rdtgroup_kn_put()
2548 if (rdtgrp->flags & RDT_DELETED) in rdtgroup_kn_lock_live()
2584 if (ctx->enable_cdpl2) { in rdt_enable_ctx()
2590 if (ctx->enable_cdpl3) { in rdt_enable_ctx()
2596 if (ctx->enable_mba_mbps) { in rdt_enable_ctx()
2602 if (ctx->enable_debug) in rdt_enable_ctx()
2617 struct resctrl_schema *s; in schemata_list_add() local
2621 s = kzalloc(sizeof(*s), GFP_KERNEL); in schemata_list_add()
2622 if (!s) in schemata_list_add()
2623 return -ENOMEM; in schemata_list_add()
2625 s->res = r; in schemata_list_add()
2626 s->num_closid = resctrl_arch_get_num_closid(r); in schemata_list_add()
2627 if (resctrl_arch_get_cdp_enabled(r->rid)) in schemata_list_add()
2628 s->num_closid /= 2; in schemata_list_add()
2630 s->conf_type = type; in schemata_list_add()
2643 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); in schemata_list_add()
2644 if (ret >= sizeof(s->name)) { in schemata_list_add()
2645 kfree(s); in schemata_list_add()
2646 return -EINVAL; in schemata_list_add()
2649 cl = strlen(s->name); in schemata_list_add()
2656 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) in schemata_list_add()
2662 switch (r->schema_fmt) { in schemata_list_add()
2664 s->fmt_str = "%d=%x"; in schemata_list_add()
2667 s->fmt_str = "%d=%u"; in schemata_list_add()
2671 if (WARN_ON_ONCE(!s->fmt_str)) { in schemata_list_add()
2672 kfree(s); in schemata_list_add()
2673 return -EINVAL; in schemata_list_add()
2676 INIT_LIST_HEAD(&s->list); in schemata_list_add()
2677 list_add(&s->list, &resctrl_schema_all); in schemata_list_add()
2688 if (resctrl_arch_get_cdp_enabled(r->rid)) { in schemata_list_create()
2707 struct resctrl_schema *s, *tmp; in schemata_list_destroy() local
2709 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { in schemata_list_destroy()
2710 list_del(&s->list); in schemata_list_destroy()
2711 kfree(s); in schemata_list_destroy()
2729 ret = -EBUSY; in rdt_get_tree()
2796 list_for_each_entry(dom, &r->mon_domains, hdr.list) in rdt_get_tree()
2858 ctx->enable_cdpl3 = true; in rdt_parse_param()
2861 ctx->enable_cdpl2 = true; in rdt_parse_param()
2867 ctx->enable_mba_mbps = true; in rdt_parse_param()
2870 ctx->enable_debug = true; in rdt_parse_param()
2874 return -EINVAL; in rdt_parse_param()
2897 return -ENOMEM; in rdt_init_fs_context()
2899 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; in rdt_init_fs_context()
2900 fc->fs_private = &ctx->kfc; in rdt_init_fs_context()
2901 fc->ops = &rdt_fs_context_ops; in rdt_init_fs_context()
2902 put_user_ns(fc->user_ns); in rdt_init_fs_context()
2903 fc->user_ns = get_user_ns(&init_user_ns); in rdt_init_fs_context()
2904 fc->global = true; in rdt_init_fs_context()
2925 resctrl_arch_set_closid_rmid(t, to->closid, in rdt_move_group_tasks()
2926 to->mon.rmid); in rdt_move_group_tasks()
2931 * between the rq->curr update and in rdt_move_group_tasks()
2955 head = &rdtgrp->mon.crdtgrp_list; in free_all_child_rdtgrp()
2958 free_rmid(sentry->closid, sentry->mon.rmid); in free_all_child_rdtgrp()
2959 list_del(&sentry->mon.crdtgrp_list); in free_all_child_rdtgrp()
2961 if (atomic_read(&sentry->waitcount) != 0) in free_all_child_rdtgrp()
2962 sentry->flags = RDT_DELETED; in free_all_child_rdtgrp()
2986 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || in rmdir_all_sub()
2987 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) in rmdir_all_sub()
2996 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); in rmdir_all_sub()
3000 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rmdir_all_sub()
3002 kernfs_remove(rdtgrp->kn); in rmdir_all_sub()
3003 list_del(&rdtgrp->rdtgroup_list); in rmdir_all_sub()
3005 if (atomic_read(&rdtgrp->waitcount) != 0) in rmdir_all_sub()
3006 rdtgrp->flags = RDT_DELETED; in rmdir_all_sub()
3019 * mon_get_kn_priv() - Get the mon_data priv data for this event.
3023 * allocated structures and re-use an existing one with the same values for
3040 if (priv->rid == rid && priv->domid == domid && in mon_get_kn_priv()
3041 priv->sum == do_sum && priv->evtid == mevt->evtid) in mon_get_kn_priv()
3049 priv->rid = rid; in mon_get_kn_priv()
3050 priv->domid = domid; in mon_get_kn_priv()
3051 priv->sum = do_sum; in mon_get_kn_priv()
3052 priv->evtid = mevt->evtid; in mon_get_kn_priv()
3053 list_add_tail(&priv->list, &mon_data_kn_priv_list); in mon_get_kn_priv()
3059 * mon_put_kn_priv() - Free all allocated mon_data structures.
3070 list_del(&priv->list); in mon_put_kn_priv()
3087 rdtgroup_default.mode = RDT_MODE_SHAREABLE; in resctrl_fs_teardown()
3154 if (kn->dir.subdirs <= 1) in mon_rmdir_one_subdir()
3174 snc_mode = r->mon_scope == RESCTRL_L3_NODE; in rmdir_mondata_subdir_allrdtgrp()
3175 sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : d->hdr.id); in rmdir_mondata_subdir_allrdtgrp()
3177 sprintf(subname, "mon_sub_%s_%02d", r->name, d->hdr.id); in rmdir_mondata_subdir_allrdtgrp()
3180 mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname); in rmdir_mondata_subdir_allrdtgrp()
3182 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) in rmdir_mondata_subdir_allrdtgrp()
3183 mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname); in rmdir_mondata_subdir_allrdtgrp()
3197 if (mevt->rid != r->rid || !mevt->enabled) in mon_add_all_files()
3199 domid = do_sum ? d->ci_id : d->hdr.id; in mon_add_all_files()
3200 priv = mon_get_kn_priv(r->rid, domid, mevt, do_sum); in mon_add_all_files()
3202 return -EINVAL; in mon_add_all_files()
3204 ret = mon_addfile(kn, mevt->name, priv); in mon_add_all_files()
3208 if (!do_sum && resctrl_is_mbm_event(mevt->evtid)) in mon_add_all_files()
3209 mon_event_read(&rr, r, d, prgrp, &d->hdr.cpu_mask, mevt->evtid, true); in mon_add_all_files()
3226 snc_mode = r->mon_scope == RESCTRL_L3_NODE; in mkdir_mondata_subdir()
3227 sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : d->hdr.id); in mkdir_mondata_subdir()
3236 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); in mkdir_mondata_subdir()
3249 sprintf(name, "mon_sub_%s_%02d", r->name, d->hdr.id); in mkdir_mondata_subdir()
3250 ckn = kernfs_create_dir(kn, name, parent_kn->mode, prgrp); in mkdir_mondata_subdir()
3252 ret = -EINVAL; in mkdir_mondata_subdir()
3285 parent_kn = prgrp->mon.mon_data_kn; in mkdir_mondata_subdir_allrdtgrp()
3288 head = &prgrp->mon.crdtgrp_list; in mkdir_mondata_subdir_allrdtgrp()
3290 parent_kn = crgrp->mon.mon_data_kn; in mkdir_mondata_subdir_allrdtgrp()
3303 /* Walking r->domains, ensure it can't race with cpuhp */ in mkdir_mondata_subdir_alldom()
3306 list_for_each_entry(dom, &r->mon_domains, hdr.list) { in mkdir_mondata_subdir_alldom()
3368 * cbm_ensure_valid - Enforce validity on provided CBM
3384 unsigned int cbm_len = r->cache.cbm_len; in cbm_ensure_valid()
3395 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); in cbm_ensure_valid()
3403 * all shareable and unused bits. All-zero CBM is invalid.
3405 static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s, in __init_one_rdt_domain() argument
3408 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); in __init_one_rdt_domain()
3409 enum resctrl_conf_type t = s->conf_type; in __init_one_rdt_domain()
3411 struct rdt_resource *r = s->res; in __init_one_rdt_domain()
3414 enum rdtgrp_mode mode; in __init_one_rdt_domain() local
3418 cfg = &d->staged_config[t]; in __init_one_rdt_domain()
3419 cfg->have_new_ctrl = false; in __init_one_rdt_domain()
3420 cfg->new_ctrl = r->cache.shareable_bits; in __init_one_rdt_domain()
3421 used_b = r->cache.shareable_bits; in __init_one_rdt_domain()
3424 mode = rdtgroup_mode_by_closid(i); in __init_one_rdt_domain()
3425 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) in __init_one_rdt_domain()
3428 * until the schemata is written, and the mode in __init_one_rdt_domain()
3433 * If CDP is active include peer domain's in __init_one_rdt_domain()
3437 if (resctrl_arch_get_cdp_enabled(r->rid)) in __init_one_rdt_domain()
3443 s->conf_type); in __init_one_rdt_domain()
3445 if (mode == RDT_MODE_SHAREABLE) in __init_one_rdt_domain()
3446 cfg->new_ctrl |= ctrl_val | peer_ctl; in __init_one_rdt_domain()
3449 if (d->plr && d->plr->cbm > 0) in __init_one_rdt_domain()
3450 used_b |= d->plr->cbm; in __init_one_rdt_domain()
3451 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); in __init_one_rdt_domain()
3452 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; in __init_one_rdt_domain()
3453 cfg->new_ctrl |= unused_b; in __init_one_rdt_domain()
3458 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); in __init_one_rdt_domain()
3461 * bitmap_weight() does not access out-of-bound memory. in __init_one_rdt_domain()
3463 tmp_cbm = cfg->new_ctrl; in __init_one_rdt_domain()
3464 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { in __init_one_rdt_domain()
3465 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id); in __init_one_rdt_domain()
3466 return -ENOSPC; in __init_one_rdt_domain()
3468 cfg->have_new_ctrl = true; in __init_one_rdt_domain()
3483 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) in rdtgroup_init_cat() argument
3488 list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) { in rdtgroup_init_cat()
3489 ret = __init_one_rdt_domain(d, s, closid); in rdtgroup_init_cat()
3503 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in rdtgroup_init_mba()
3505 d->mbps_val[closid] = MBA_MAX_MBPS; in rdtgroup_init_mba()
3509 cfg = &d->staged_config[CDP_NONE]; in rdtgroup_init_mba()
3510 cfg->new_ctrl = resctrl_get_default_ctrl(r); in rdtgroup_init_mba()
3511 cfg->have_new_ctrl = true; in rdtgroup_init_mba()
3515 /* Initialize the RDT group's allocations. */
3518 struct resctrl_schema *s; in rdtgroup_init_alloc() local
3524 list_for_each_entry(s, &resctrl_schema_all, list) { in rdtgroup_init_alloc()
3525 r = s->res; in rdtgroup_init_alloc()
3526 if (r->rid == RDT_RESOURCE_MBA || in rdtgroup_init_alloc()
3527 r->rid == RDT_RESOURCE_SMBA) { in rdtgroup_init_alloc()
3528 rdtgroup_init_mba(r, rdtgrp->closid); in rdtgroup_init_alloc()
3532 ret = rdtgroup_init_cat(s, rdtgrp->closid); in rdtgroup_init_alloc()
3537 ret = resctrl_arch_update_domains(r, rdtgrp->closid); in rdtgroup_init_alloc()
3544 rdtgrp->mode = RDT_MODE_SHAREABLE; in rdtgroup_init_alloc()
3558 ret = alloc_rmid(rdtgrp->closid); in mkdir_rdt_prepare_rmid_alloc()
3563 rdtgrp->mon.rmid = ret; in mkdir_rdt_prepare_rmid_alloc()
3567 ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); in mkdir_rdt_prepare_rmid_alloc()
3571 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in mkdir_rdt_prepare_rmid_alloc()
3582 free_rmid(rgrp->closid, rgrp->mon.rmid); in mkdir_rdt_prepare_rmid_free()
3603 const char *name, umode_t mode, in mkdir_rdt_prepare() argument
3613 ret = -ENODEV; in mkdir_rdt_prepare()
3624 ret = -EPERM; in mkdir_rdt_prepare()
3629 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || in mkdir_rdt_prepare()
3630 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { in mkdir_rdt_prepare()
3631 ret = -EINVAL; in mkdir_rdt_prepare()
3632 rdt_last_cmd_puts("Pseudo-locking in progress\n"); in mkdir_rdt_prepare()
3639 ret = -ENOSPC; in mkdir_rdt_prepare()
3644 rdtgrp->mon.parent = prdtgrp; in mkdir_rdt_prepare()
3645 rdtgrp->type = rtype; in mkdir_rdt_prepare()
3646 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); in mkdir_rdt_prepare()
3649 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); in mkdir_rdt_prepare()
3655 rdtgrp->kn = kn; in mkdir_rdt_prepare()
3691 kernfs_put(rdtgrp->kn); in mkdir_rdt_prepare()
3692 kernfs_remove(rdtgrp->kn); in mkdir_rdt_prepare()
3702 kernfs_remove(rgrp->kn); in mkdir_rdt_prepare_clean()
3712 const char *name, umode_t mode) in rdtgroup_mkdir_mon() argument
3717 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); in rdtgroup_mkdir_mon()
3721 prgrp = rdtgrp->mon.parent; in rdtgroup_mkdir_mon()
3722 rdtgrp->closid = prgrp->closid; in rdtgroup_mkdir_mon()
3730 kernfs_activate(rdtgrp->kn); in rdtgroup_mkdir_mon()
3736 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); in rdtgroup_mkdir_mon()
3748 const char *name, umode_t mode) in rdtgroup_mkdir_ctrl_mon() argument
3755 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); in rdtgroup_mkdir_ctrl_mon()
3759 kn = rdtgrp->kn; in rdtgroup_mkdir_ctrl_mon()
3768 rdtgrp->closid = closid; in rdtgroup_mkdir_ctrl_mon()
3774 kernfs_activate(rdtgrp->kn); in rdtgroup_mkdir_ctrl_mon()
3780 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); in rdtgroup_mkdir_ctrl_mon()
3793 rdtgrp->mba_mbps_event = mba_mbps_default_event; in rdtgroup_mkdir_ctrl_mon()
3799 list_del(&rdtgrp->rdtgroup_list); in rdtgroup_mkdir_ctrl_mon()
3812 umode_t mode) in rdtgroup_mkdir() argument
3816 return -EINVAL; in rdtgroup_mkdir()
3824 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); in rdtgroup_mkdir()
3828 return rdtgroup_mkdir_mon(parent_kn, name, mode); in rdtgroup_mkdir()
3830 return -EPERM; in rdtgroup_mkdir()
3835 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; in rdtgroup_rmdir_mon()
3846 closid = prdtgrp->closid; in rdtgroup_rmdir_mon()
3847 rmid = prdtgrp->mon.rmid; in rdtgroup_rmdir_mon()
3848 for_each_cpu(cpu, &rdtgrp->cpu_mask) in rdtgroup_rmdir_mon()
3855 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); in rdtgroup_rmdir_mon()
3858 rdtgrp->flags = RDT_DELETED; in rdtgroup_rmdir_mon()
3862 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rdtgroup_rmdir_mon()
3865 * Remove the rdtgrp from the parent ctrl_mon group's list in rdtgroup_rmdir_mon()
3867 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); in rdtgroup_rmdir_mon()
3868 list_del(&rdtgrp->mon.crdtgrp_list); in rdtgroup_rmdir_mon()
3870 kernfs_remove(rdtgrp->kn); in rdtgroup_rmdir_mon()
3877 rdtgrp->flags = RDT_DELETED; in rdtgroup_ctrl_remove()
3878 list_del(&rdtgrp->rdtgroup_list); in rdtgroup_ctrl_remove()
3880 kernfs_remove(rdtgrp->kn); in rdtgroup_ctrl_remove()
3894 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); in rdtgroup_rmdir_ctrl()
3899 for_each_cpu(cpu, &rdtgrp->cpu_mask) in rdtgroup_rmdir_ctrl()
3906 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); in rdtgroup_rmdir_ctrl()
3911 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rdtgroup_rmdir_ctrl()
3912 closid_free(rdtgrp->closid); in rdtgroup_rmdir_ctrl()
3930 return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex)); in rdt_kn_parent()
3941 return -ENOMEM; in rdtgroup_rmdir()
3945 ret = -EPERM; in rdtgroup_rmdir()
3957 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && in rdtgroup_rmdir()
3959 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || in rdtgroup_rmdir()
3960 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { in rdtgroup_rmdir()
3965 } else if (rdtgrp->type == RDTMON_GROUP && in rdtgroup_rmdir()
3969 ret = -EPERM; in rdtgroup_rmdir()
3979 * mongrp_reparent() - replace parent CTRL_MON group of a MON group
3992 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; in mongrp_reparent()
3994 WARN_ON(rdtgrp->type != RDTMON_GROUP); in mongrp_reparent()
3995 WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); in mongrp_reparent()
4001 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); in mongrp_reparent()
4002 list_move_tail(&rdtgrp->mon.crdtgrp_list, in mongrp_reparent()
4003 &new_prdtgrp->mon.crdtgrp_list); in mongrp_reparent()
4005 rdtgrp->mon.parent = new_prdtgrp; in mongrp_reparent()
4006 rdtgrp->closid = new_prdtgrp->closid; in mongrp_reparent()
4026 return -ENOENT; in rdtgroup_rename()
4043 ret = -EPERM; in rdtgroup_rename()
4047 if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { in rdtgroup_rename()
4048 ret = -ENOENT; in rdtgroup_rename()
4053 if (rdtgrp->type != RDTMON_GROUP || !kn_parent || in rdtgroup_rename()
4056 ret = -EPERM; in rdtgroup_rename()
4062 ret = -EPERM; in rdtgroup_rename()
4071 if (!cpumask_empty(&rdtgrp->cpu_mask) && in rdtgroup_rename()
4072 rdtgrp->mon.parent != new_prdtgrp) { in rdtgroup_rename()
4074 ret = -EPERM; in rdtgroup_rename()
4084 ret = -ENOMEM; in rdtgroup_rename()
4140 ctx->kfc.root = rdt_root; in rdtgroup_setup_root()
4172 kfree(d->cntr_cfg); in domain_destroy_mon_state()
4173 bitmap_free(d->rmid_busy_llc); in domain_destroy_mon_state()
4175 kfree(d->mbm_states[idx]); in domain_destroy_mon_state()
4176 d->mbm_states[idx] = NULL; in domain_destroy_mon_state()
4184 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) in resctrl_offline_ctrl_domain()
4202 cancel_delayed_work(&d->mbm_over); in resctrl_offline_mon_domain()
4206 * decrement rmid->ebusy. There is no way to know in resctrl_offline_mon_domain()
4213 cancel_delayed_work(&d->cqm_limbo); in resctrl_offline_mon_domain()
4222 * domain_setup_mon_state() - Initialise domain monitoring structures.
4232 * Returns 0 for success, or -ENOMEM.
4237 size_t tsize = sizeof(*d->mbm_states[0]); in domain_setup_mon_state()
4242 d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); in domain_setup_mon_state()
4243 if (!d->rmid_busy_llc) in domain_setup_mon_state()
4244 return -ENOMEM; in domain_setup_mon_state()
4251 d->mbm_states[idx] = kcalloc(idx_limit, tsize, GFP_KERNEL); in domain_setup_mon_state()
4252 if (!d->mbm_states[idx]) in domain_setup_mon_state()
4256 if (resctrl_is_mbm_enabled() && r->mon.mbm_cntr_assignable) { in domain_setup_mon_state()
4257 tsize = sizeof(*d->cntr_cfg); in domain_setup_mon_state()
4258 d->cntr_cfg = kcalloc(r->mon.num_mbm_cntrs, tsize, GFP_KERNEL); in domain_setup_mon_state()
4259 if (!d->cntr_cfg) in domain_setup_mon_state()
4265 bitmap_free(d->rmid_busy_llc); in domain_setup_mon_state()
4267 kfree(d->mbm_states[idx]); in domain_setup_mon_state()
4268 d->mbm_states[idx] = NULL; in domain_setup_mon_state()
4271 return -ENOMEM; in domain_setup_mon_state()
4280 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { in resctrl_online_ctrl_domain()
4301 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); in resctrl_online_mon_domain()
4307 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); in resctrl_online_mon_domain()
4336 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { in clear_childcpus()
4337 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) in clear_childcpus()
4349 list_for_each_entry(d, &r->mon_domains, hdr.list) { in get_mon_domain_from_cpu()
4351 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask)) in get_mon_domain_from_cpu()
4366 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { in resctrl_offline_cpu()
4372 if (!l3->mon_capable) in resctrl_offline_cpu()
4377 if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { in resctrl_offline_cpu()
4378 cancel_delayed_work(&d->mbm_over); in resctrl_offline_cpu()
4382 cpu == d->cqm_work_cpu && has_busy_rmid(d)) { in resctrl_offline_cpu()
4383 cancel_delayed_work(&d->cqm_limbo); in resctrl_offline_cpu()
4393 * resctrl_init - resctrl filesystem initialization
4398 * Return: 0 on success or -errno
4433 * during the debugfs directory creation also &sb->s_type->i_mutex_key in resctrl_init()
4434 * (the lockdep class of inode->i_rwsem). Other filesystem in resctrl_init()
4436 * &sb->s_type->i_mutex_key --> &mm->mmap_lock in resctrl_init()
4437 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex in resctrl_init()
4439 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause in resctrl_init()
4466 if (!list_empty(&r->ctrl_domains)) in resctrl_online_domains_exist()
4471 if (!list_empty(&r->mon_domains)) in resctrl_online_domains_exist()
4479 * resctrl_exit() - Remove the resctrl filesystem and free resources.