1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Allocation in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cpu.h> 16 #include <linux/debugfs.h> 17 #include <linux/fs.h> 18 #include <linux/fs_parser.h> 19 #include <linux/sysfs.h> 20 #include <linux/kernfs.h> 21 #include <linux/seq_buf.h> 22 #include <linux/seq_file.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/task.h> 25 #include <linux/slab.h> 26 #include <linux/task_work.h> 27 #include <linux/user_namespace.h> 28 29 #include <uapi/linux/magic.h> 30 31 #include <asm/msr.h> 32 #include <asm/resctrl.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 39 /* Mutex to protect rdtgroup access. */ 40 DEFINE_MUTEX(rdtgroup_mutex); 41 42 static struct kernfs_root *rdt_root; 43 struct rdtgroup rdtgroup_default; 44 LIST_HEAD(rdt_all_groups); 45 46 /* list of entries for the schemata file */ 47 LIST_HEAD(resctrl_schema_all); 48 49 /* The filesystem can only be mounted once. */ 50 bool resctrl_mounted; 51 52 /* Kernel fs node for "info" directory under root */ 53 static struct kernfs_node *kn_info; 54 55 /* Kernel fs node for "mon_groups" directory under root */ 56 static struct kernfs_node *kn_mongrp; 57 58 /* Kernel fs node for "mon_data" directory under root */ 59 static struct kernfs_node *kn_mondata; 60 61 /* 62 * Used to store the max resource name width to display the schemata names in 63 * a tabular format. 64 */ 65 int max_name_width; 66 67 static struct seq_buf last_cmd_status; 68 static char last_cmd_status_buf[512]; 69 70 static int rdtgroup_setup_root(struct rdt_fs_context *ctx); 71 static void rdtgroup_destroy_root(void); 72 73 struct dentry *debugfs_resctrl; 74 75 /* 76 * Memory bandwidth monitoring event to use for the default CTRL_MON group 77 * and each new CTRL_MON group created by the user. Only relevant when 78 * the filesystem is mounted with the "mba_MBps" option so it does not 79 * matter that it remains uninitialized on systems that do not support 80 * the "mba_MBps" option. 81 */ 82 enum resctrl_event_id mba_mbps_default_event; 83 84 static bool resctrl_debug; 85 86 void rdt_last_cmd_clear(void) 87 { 88 lockdep_assert_held(&rdtgroup_mutex); 89 seq_buf_clear(&last_cmd_status); 90 } 91 92 void rdt_last_cmd_puts(const char *s) 93 { 94 lockdep_assert_held(&rdtgroup_mutex); 95 seq_buf_puts(&last_cmd_status, s); 96 } 97 98 void rdt_last_cmd_printf(const char *fmt, ...) 99 { 100 va_list ap; 101 102 va_start(ap, fmt); 103 lockdep_assert_held(&rdtgroup_mutex); 104 seq_buf_vprintf(&last_cmd_status, fmt, ap); 105 va_end(ap); 106 } 107 108 void rdt_staged_configs_clear(void) 109 { 110 struct rdt_ctrl_domain *dom; 111 struct rdt_resource *r; 112 113 lockdep_assert_held(&rdtgroup_mutex); 114 115 for_each_alloc_capable_rdt_resource(r) { 116 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) 117 memset(dom->staged_config, 0, sizeof(dom->staged_config)); 118 } 119 } 120 121 static bool resctrl_is_mbm_enabled(void) 122 { 123 return (resctrl_arch_is_mbm_total_enabled() || 124 resctrl_arch_is_mbm_local_enabled()); 125 } 126 127 static bool resctrl_is_mbm_event(int e) 128 { 129 return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && 130 e <= QOS_L3_MBM_LOCAL_EVENT_ID); 131 } 132 133 /* 134 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 135 * we can keep a bitmap of free CLOSIDs in a single integer. 136 * 137 * Using a global CLOSID across all resources has some advantages and 138 * some drawbacks: 139 * + We can simply set current's closid to assign a task to a resource 140 * group. 141 * + Context switch code can avoid extra memory references deciding which 142 * CLOSID to load into the PQR_ASSOC MSR 143 * - We give up some options in configuring resource groups across multi-socket 144 * systems. 145 * - Our choices on how to configure each resource become progressively more 146 * limited as the number of resources grows. 147 */ 148 static unsigned long closid_free_map; 149 static int closid_free_map_len; 150 151 int closids_supported(void) 152 { 153 return closid_free_map_len; 154 } 155 156 static void closid_init(void) 157 { 158 struct resctrl_schema *s; 159 u32 rdt_min_closid = 32; 160 161 /* Compute rdt_min_closid across all resources */ 162 list_for_each_entry(s, &resctrl_schema_all, list) 163 rdt_min_closid = min(rdt_min_closid, s->num_closid); 164 165 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 166 167 /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ 168 __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); 169 closid_free_map_len = rdt_min_closid; 170 } 171 172 static int closid_alloc(void) 173 { 174 int cleanest_closid; 175 u32 closid; 176 177 lockdep_assert_held(&rdtgroup_mutex); 178 179 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && 180 resctrl_arch_is_llc_occupancy_enabled()) { 181 cleanest_closid = resctrl_find_cleanest_closid(); 182 if (cleanest_closid < 0) 183 return cleanest_closid; 184 closid = cleanest_closid; 185 } else { 186 closid = ffs(closid_free_map); 187 if (closid == 0) 188 return -ENOSPC; 189 closid--; 190 } 191 __clear_bit(closid, &closid_free_map); 192 193 return closid; 194 } 195 196 void closid_free(int closid) 197 { 198 lockdep_assert_held(&rdtgroup_mutex); 199 200 __set_bit(closid, &closid_free_map); 201 } 202 203 /** 204 * closid_allocated - test if provided closid is in use 205 * @closid: closid to be tested 206 * 207 * Return: true if @closid is currently associated with a resource group, 208 * false if @closid is free 209 */ 210 bool closid_allocated(unsigned int closid) 211 { 212 lockdep_assert_held(&rdtgroup_mutex); 213 214 return !test_bit(closid, &closid_free_map); 215 } 216 217 /** 218 * rdtgroup_mode_by_closid - Return mode of resource group with closid 219 * @closid: closid if the resource group 220 * 221 * Each resource group is associated with a @closid. Here the mode 222 * of a resource group can be queried by searching for it using its closid. 223 * 224 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 225 */ 226 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 227 { 228 struct rdtgroup *rdtgrp; 229 230 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 231 if (rdtgrp->closid == closid) 232 return rdtgrp->mode; 233 } 234 235 return RDT_NUM_MODES; 236 } 237 238 static const char * const rdt_mode_str[] = { 239 [RDT_MODE_SHAREABLE] = "shareable", 240 [RDT_MODE_EXCLUSIVE] = "exclusive", 241 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 242 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 243 }; 244 245 /** 246 * rdtgroup_mode_str - Return the string representation of mode 247 * @mode: the resource group mode as &enum rdtgroup_mode 248 * 249 * Return: string representation of valid mode, "unknown" otherwise 250 */ 251 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 252 { 253 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 254 return "unknown"; 255 256 return rdt_mode_str[mode]; 257 } 258 259 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 260 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 261 { 262 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 263 .ia_uid = current_fsuid(), 264 .ia_gid = current_fsgid(), }; 265 266 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 267 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 268 return 0; 269 270 return kernfs_setattr(kn, &iattr); 271 } 272 273 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 274 { 275 struct kernfs_node *kn; 276 int ret; 277 278 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 279 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 280 0, rft->kf_ops, rft, NULL, NULL); 281 if (IS_ERR(kn)) 282 return PTR_ERR(kn); 283 284 ret = rdtgroup_kn_set_ugid(kn); 285 if (ret) { 286 kernfs_remove(kn); 287 return ret; 288 } 289 290 return 0; 291 } 292 293 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 294 { 295 struct kernfs_open_file *of = m->private; 296 struct rftype *rft = of->kn->priv; 297 298 if (rft->seq_show) 299 return rft->seq_show(of, m, arg); 300 return 0; 301 } 302 303 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 304 size_t nbytes, loff_t off) 305 { 306 struct rftype *rft = of->kn->priv; 307 308 if (rft->write) 309 return rft->write(of, buf, nbytes, off); 310 311 return -EINVAL; 312 } 313 314 static const struct kernfs_ops rdtgroup_kf_single_ops = { 315 .atomic_write_len = PAGE_SIZE, 316 .write = rdtgroup_file_write, 317 .seq_show = rdtgroup_seqfile_show, 318 }; 319 320 static const struct kernfs_ops kf_mondata_ops = { 321 .atomic_write_len = PAGE_SIZE, 322 .seq_show = rdtgroup_mondata_show, 323 }; 324 325 static bool is_cpu_list(struct kernfs_open_file *of) 326 { 327 struct rftype *rft = of->kn->priv; 328 329 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 330 } 331 332 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 333 struct seq_file *s, void *v) 334 { 335 struct rdtgroup *rdtgrp; 336 struct cpumask *mask; 337 int ret = 0; 338 339 rdtgrp = rdtgroup_kn_lock_live(of->kn); 340 341 if (rdtgrp) { 342 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 343 if (!rdtgrp->plr->d) { 344 rdt_last_cmd_clear(); 345 rdt_last_cmd_puts("Cache domain offline\n"); 346 ret = -ENODEV; 347 } else { 348 mask = &rdtgrp->plr->d->hdr.cpu_mask; 349 seq_printf(s, is_cpu_list(of) ? 350 "%*pbl\n" : "%*pb\n", 351 cpumask_pr_args(mask)); 352 } 353 } else { 354 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 355 cpumask_pr_args(&rdtgrp->cpu_mask)); 356 } 357 } else { 358 ret = -ENOENT; 359 } 360 rdtgroup_kn_unlock(of->kn); 361 362 return ret; 363 } 364 365 /* 366 * This is safe against resctrl_sched_in() called from __switch_to() 367 * because __switch_to() is executed with interrupts disabled. A local call 368 * from update_closid_rmid() is protected against __switch_to() because 369 * preemption is disabled. 370 */ 371 void resctrl_arch_sync_cpu_closid_rmid(void *info) 372 { 373 struct resctrl_cpu_defaults *r = info; 374 375 if (r) { 376 this_cpu_write(pqr_state.default_closid, r->closid); 377 this_cpu_write(pqr_state.default_rmid, r->rmid); 378 } 379 380 /* 381 * We cannot unconditionally write the MSR because the current 382 * executing task might have its own closid selected. Just reuse 383 * the context switch code. 384 */ 385 resctrl_sched_in(current); 386 } 387 388 /* 389 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 390 * 391 * Per task closids/rmids must have been set up before calling this function. 392 * @r may be NULL. 393 */ 394 static void 395 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 396 { 397 struct resctrl_cpu_defaults defaults, *p = NULL; 398 399 if (r) { 400 defaults.closid = r->closid; 401 defaults.rmid = r->mon.rmid; 402 p = &defaults; 403 } 404 405 on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_closid_rmid, p, 1); 406 } 407 408 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 409 cpumask_var_t tmpmask) 410 { 411 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 412 struct list_head *head; 413 414 /* Check whether cpus belong to parent ctrl group */ 415 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 416 if (!cpumask_empty(tmpmask)) { 417 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 418 return -EINVAL; 419 } 420 421 /* Check whether cpus are dropped from this group */ 422 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 423 if (!cpumask_empty(tmpmask)) { 424 /* Give any dropped cpus to parent rdtgroup */ 425 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 426 update_closid_rmid(tmpmask, prgrp); 427 } 428 429 /* 430 * If we added cpus, remove them from previous group that owned them 431 * and update per-cpu rmid 432 */ 433 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 434 if (!cpumask_empty(tmpmask)) { 435 head = &prgrp->mon.crdtgrp_list; 436 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 437 if (crgrp == rdtgrp) 438 continue; 439 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 440 tmpmask); 441 } 442 update_closid_rmid(tmpmask, rdtgrp); 443 } 444 445 /* Done pushing/pulling - update this group with new mask */ 446 cpumask_copy(&rdtgrp->cpu_mask, newmask); 447 448 return 0; 449 } 450 451 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 452 { 453 struct rdtgroup *crgrp; 454 455 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 456 /* update the child mon group masks as well*/ 457 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 458 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 459 } 460 461 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 462 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 463 { 464 struct rdtgroup *r, *crgrp; 465 struct list_head *head; 466 467 /* Check whether cpus are dropped from this group */ 468 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 469 if (!cpumask_empty(tmpmask)) { 470 /* Can't drop from default group */ 471 if (rdtgrp == &rdtgroup_default) { 472 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 473 return -EINVAL; 474 } 475 476 /* Give any dropped cpus to rdtgroup_default */ 477 cpumask_or(&rdtgroup_default.cpu_mask, 478 &rdtgroup_default.cpu_mask, tmpmask); 479 update_closid_rmid(tmpmask, &rdtgroup_default); 480 } 481 482 /* 483 * If we added cpus, remove them from previous group and 484 * the prev group's child groups that owned them 485 * and update per-cpu closid/rmid. 486 */ 487 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 488 if (!cpumask_empty(tmpmask)) { 489 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 490 if (r == rdtgrp) 491 continue; 492 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 493 if (!cpumask_empty(tmpmask1)) 494 cpumask_rdtgrp_clear(r, tmpmask1); 495 } 496 update_closid_rmid(tmpmask, rdtgrp); 497 } 498 499 /* Done pushing/pulling - update this group with new mask */ 500 cpumask_copy(&rdtgrp->cpu_mask, newmask); 501 502 /* 503 * Clear child mon group masks since there is a new parent mask 504 * now and update the rmid for the cpus the child lost. 505 */ 506 head = &rdtgrp->mon.crdtgrp_list; 507 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 508 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 509 update_closid_rmid(tmpmask, rdtgrp); 510 cpumask_clear(&crgrp->cpu_mask); 511 } 512 513 return 0; 514 } 515 516 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 517 char *buf, size_t nbytes, loff_t off) 518 { 519 cpumask_var_t tmpmask, newmask, tmpmask1; 520 struct rdtgroup *rdtgrp; 521 int ret; 522 523 if (!buf) 524 return -EINVAL; 525 526 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 527 return -ENOMEM; 528 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 529 free_cpumask_var(tmpmask); 530 return -ENOMEM; 531 } 532 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 533 free_cpumask_var(tmpmask); 534 free_cpumask_var(newmask); 535 return -ENOMEM; 536 } 537 538 rdtgrp = rdtgroup_kn_lock_live(of->kn); 539 if (!rdtgrp) { 540 ret = -ENOENT; 541 goto unlock; 542 } 543 544 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 545 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 546 ret = -EINVAL; 547 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 548 goto unlock; 549 } 550 551 if (is_cpu_list(of)) 552 ret = cpulist_parse(buf, newmask); 553 else 554 ret = cpumask_parse(buf, newmask); 555 556 if (ret) { 557 rdt_last_cmd_puts("Bad CPU list/mask\n"); 558 goto unlock; 559 } 560 561 /* check that user didn't specify any offline cpus */ 562 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 563 if (!cpumask_empty(tmpmask)) { 564 ret = -EINVAL; 565 rdt_last_cmd_puts("Can only assign online CPUs\n"); 566 goto unlock; 567 } 568 569 if (rdtgrp->type == RDTCTRL_GROUP) 570 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 571 else if (rdtgrp->type == RDTMON_GROUP) 572 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 573 else 574 ret = -EINVAL; 575 576 unlock: 577 rdtgroup_kn_unlock(of->kn); 578 free_cpumask_var(tmpmask); 579 free_cpumask_var(newmask); 580 free_cpumask_var(tmpmask1); 581 582 return ret ?: nbytes; 583 } 584 585 /** 586 * rdtgroup_remove - the helper to remove resource group safely 587 * @rdtgrp: resource group to remove 588 * 589 * On resource group creation via a mkdir, an extra kernfs_node reference is 590 * taken to ensure that the rdtgroup structure remains accessible for the 591 * rdtgroup_kn_unlock() calls where it is removed. 592 * 593 * Drop the extra reference here, then free the rdtgroup structure. 594 * 595 * Return: void 596 */ 597 static void rdtgroup_remove(struct rdtgroup *rdtgrp) 598 { 599 kernfs_put(rdtgrp->kn); 600 kfree(rdtgrp); 601 } 602 603 static void _update_task_closid_rmid(void *task) 604 { 605 /* 606 * If the task is still current on this CPU, update PQR_ASSOC MSR. 607 * Otherwise, the MSR is updated when the task is scheduled in. 608 */ 609 if (task == current) 610 resctrl_sched_in(task); 611 } 612 613 static void update_task_closid_rmid(struct task_struct *t) 614 { 615 if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) 616 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); 617 else 618 _update_task_closid_rmid(t); 619 } 620 621 static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) 622 { 623 u32 closid, rmid = rdtgrp->mon.rmid; 624 625 if (rdtgrp->type == RDTCTRL_GROUP) 626 closid = rdtgrp->closid; 627 else if (rdtgrp->type == RDTMON_GROUP) 628 closid = rdtgrp->mon.parent->closid; 629 else 630 return false; 631 632 return resctrl_arch_match_closid(tsk, closid) && 633 resctrl_arch_match_rmid(tsk, closid, rmid); 634 } 635 636 static int __rdtgroup_move_task(struct task_struct *tsk, 637 struct rdtgroup *rdtgrp) 638 { 639 /* If the task is already in rdtgrp, no need to move the task. */ 640 if (task_in_rdtgroup(tsk, rdtgrp)) 641 return 0; 642 643 /* 644 * Set the task's closid/rmid before the PQR_ASSOC MSR can be 645 * updated by them. 646 * 647 * For ctrl_mon groups, move both closid and rmid. 648 * For monitor groups, can move the tasks only from 649 * their parent CTRL group. 650 */ 651 if (rdtgrp->type == RDTMON_GROUP && 652 !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { 653 rdt_last_cmd_puts("Can't move task to different control group\n"); 654 return -EINVAL; 655 } 656 657 if (rdtgrp->type == RDTMON_GROUP) 658 resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, 659 rdtgrp->mon.rmid); 660 else 661 resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, 662 rdtgrp->mon.rmid); 663 664 /* 665 * Ensure the task's closid and rmid are written before determining if 666 * the task is current that will decide if it will be interrupted. 667 * This pairs with the full barrier between the rq->curr update and 668 * resctrl_sched_in() during context switch. 669 */ 670 smp_mb(); 671 672 /* 673 * By now, the task's closid and rmid are set. If the task is current 674 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource 675 * group go into effect. If the task is not current, the MSR will be 676 * updated when the task is scheduled in. 677 */ 678 update_task_closid_rmid(tsk); 679 680 return 0; 681 } 682 683 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 684 { 685 return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && 686 resctrl_arch_match_closid(t, r->closid)); 687 } 688 689 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 690 { 691 return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && 692 resctrl_arch_match_rmid(t, r->mon.parent->closid, 693 r->mon.rmid)); 694 } 695 696 /** 697 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 698 * @r: Resource group 699 * 700 * Return: 1 if tasks have been assigned to @r, 0 otherwise 701 */ 702 int rdtgroup_tasks_assigned(struct rdtgroup *r) 703 { 704 struct task_struct *p, *t; 705 int ret = 0; 706 707 lockdep_assert_held(&rdtgroup_mutex); 708 709 rcu_read_lock(); 710 for_each_process_thread(p, t) { 711 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 712 ret = 1; 713 break; 714 } 715 } 716 rcu_read_unlock(); 717 718 return ret; 719 } 720 721 static int rdtgroup_task_write_permission(struct task_struct *task, 722 struct kernfs_open_file *of) 723 { 724 const struct cred *tcred = get_task_cred(task); 725 const struct cred *cred = current_cred(); 726 int ret = 0; 727 728 /* 729 * Even if we're attaching all tasks in the thread group, we only 730 * need to check permissions on one of them. 731 */ 732 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 733 !uid_eq(cred->euid, tcred->uid) && 734 !uid_eq(cred->euid, tcred->suid)) { 735 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 736 ret = -EPERM; 737 } 738 739 put_cred(tcred); 740 return ret; 741 } 742 743 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 744 struct kernfs_open_file *of) 745 { 746 struct task_struct *tsk; 747 int ret; 748 749 rcu_read_lock(); 750 if (pid) { 751 tsk = find_task_by_vpid(pid); 752 if (!tsk) { 753 rcu_read_unlock(); 754 rdt_last_cmd_printf("No task %d\n", pid); 755 return -ESRCH; 756 } 757 } else { 758 tsk = current; 759 } 760 761 get_task_struct(tsk); 762 rcu_read_unlock(); 763 764 ret = rdtgroup_task_write_permission(tsk, of); 765 if (!ret) 766 ret = __rdtgroup_move_task(tsk, rdtgrp); 767 768 put_task_struct(tsk); 769 return ret; 770 } 771 772 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 773 char *buf, size_t nbytes, loff_t off) 774 { 775 struct rdtgroup *rdtgrp; 776 char *pid_str; 777 int ret = 0; 778 pid_t pid; 779 780 rdtgrp = rdtgroup_kn_lock_live(of->kn); 781 if (!rdtgrp) { 782 rdtgroup_kn_unlock(of->kn); 783 return -ENOENT; 784 } 785 rdt_last_cmd_clear(); 786 787 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 788 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 789 ret = -EINVAL; 790 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 791 goto unlock; 792 } 793 794 while (buf && buf[0] != '\0' && buf[0] != '\n') { 795 pid_str = strim(strsep(&buf, ",")); 796 797 if (kstrtoint(pid_str, 0, &pid)) { 798 rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); 799 ret = -EINVAL; 800 break; 801 } 802 803 if (pid < 0) { 804 rdt_last_cmd_printf("Invalid pid %d\n", pid); 805 ret = -EINVAL; 806 break; 807 } 808 809 ret = rdtgroup_move_task(pid, rdtgrp, of); 810 if (ret) { 811 rdt_last_cmd_printf("Error while processing task %d\n", pid); 812 break; 813 } 814 } 815 816 unlock: 817 rdtgroup_kn_unlock(of->kn); 818 819 return ret ?: nbytes; 820 } 821 822 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 823 { 824 struct task_struct *p, *t; 825 pid_t pid; 826 827 rcu_read_lock(); 828 for_each_process_thread(p, t) { 829 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 830 pid = task_pid_vnr(t); 831 if (pid) 832 seq_printf(s, "%d\n", pid); 833 } 834 } 835 rcu_read_unlock(); 836 } 837 838 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 839 struct seq_file *s, void *v) 840 { 841 struct rdtgroup *rdtgrp; 842 int ret = 0; 843 844 rdtgrp = rdtgroup_kn_lock_live(of->kn); 845 if (rdtgrp) 846 show_rdt_tasks(rdtgrp, s); 847 else 848 ret = -ENOENT; 849 rdtgroup_kn_unlock(of->kn); 850 851 return ret; 852 } 853 854 static int rdtgroup_closid_show(struct kernfs_open_file *of, 855 struct seq_file *s, void *v) 856 { 857 struct rdtgroup *rdtgrp; 858 int ret = 0; 859 860 rdtgrp = rdtgroup_kn_lock_live(of->kn); 861 if (rdtgrp) 862 seq_printf(s, "%u\n", rdtgrp->closid); 863 else 864 ret = -ENOENT; 865 rdtgroup_kn_unlock(of->kn); 866 867 return ret; 868 } 869 870 static int rdtgroup_rmid_show(struct kernfs_open_file *of, 871 struct seq_file *s, void *v) 872 { 873 struct rdtgroup *rdtgrp; 874 int ret = 0; 875 876 rdtgrp = rdtgroup_kn_lock_live(of->kn); 877 if (rdtgrp) 878 seq_printf(s, "%u\n", rdtgrp->mon.rmid); 879 else 880 ret = -ENOENT; 881 rdtgroup_kn_unlock(of->kn); 882 883 return ret; 884 } 885 886 #ifdef CONFIG_PROC_CPU_RESCTRL 887 888 /* 889 * A task can only be part of one resctrl control group and of one monitor 890 * group which is associated to that control group. 891 * 892 * 1) res: 893 * mon: 894 * 895 * resctrl is not available. 896 * 897 * 2) res:/ 898 * mon: 899 * 900 * Task is part of the root resctrl control group, and it is not associated 901 * to any monitor group. 902 * 903 * 3) res:/ 904 * mon:mon0 905 * 906 * Task is part of the root resctrl control group and monitor group mon0. 907 * 908 * 4) res:group0 909 * mon: 910 * 911 * Task is part of resctrl control group group0, and it is not associated 912 * to any monitor group. 913 * 914 * 5) res:group0 915 * mon:mon1 916 * 917 * Task is part of resctrl control group group0 and monitor group mon1. 918 */ 919 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 920 struct pid *pid, struct task_struct *tsk) 921 { 922 struct rdtgroup *rdtg; 923 int ret = 0; 924 925 mutex_lock(&rdtgroup_mutex); 926 927 /* Return empty if resctrl has not been mounted. */ 928 if (!resctrl_mounted) { 929 seq_puts(s, "res:\nmon:\n"); 930 goto unlock; 931 } 932 933 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 934 struct rdtgroup *crg; 935 936 /* 937 * Task information is only relevant for shareable 938 * and exclusive groups. 939 */ 940 if (rdtg->mode != RDT_MODE_SHAREABLE && 941 rdtg->mode != RDT_MODE_EXCLUSIVE) 942 continue; 943 944 if (!resctrl_arch_match_closid(tsk, rdtg->closid)) 945 continue; 946 947 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 948 rdt_kn_name(rdtg->kn)); 949 seq_puts(s, "mon:"); 950 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 951 mon.crdtgrp_list) { 952 if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, 953 crg->mon.rmid)) 954 continue; 955 seq_printf(s, "%s", rdt_kn_name(crg->kn)); 956 break; 957 } 958 seq_putc(s, '\n'); 959 goto unlock; 960 } 961 /* 962 * The above search should succeed. Otherwise return 963 * with an error. 964 */ 965 ret = -ENOENT; 966 unlock: 967 mutex_unlock(&rdtgroup_mutex); 968 969 return ret; 970 } 971 #endif 972 973 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 974 struct seq_file *seq, void *v) 975 { 976 int len; 977 978 mutex_lock(&rdtgroup_mutex); 979 len = seq_buf_used(&last_cmd_status); 980 if (len) 981 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 982 else 983 seq_puts(seq, "ok\n"); 984 mutex_unlock(&rdtgroup_mutex); 985 return 0; 986 } 987 988 static void *rdt_kn_parent_priv(struct kernfs_node *kn) 989 { 990 /* 991 * The parent pointer is only valid within RCU section since it can be 992 * replaced. 993 */ 994 guard(rcu)(); 995 return rcu_dereference(kn->__parent)->priv; 996 } 997 998 static int rdt_num_closids_show(struct kernfs_open_file *of, 999 struct seq_file *seq, void *v) 1000 { 1001 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1002 1003 seq_printf(seq, "%u\n", s->num_closid); 1004 return 0; 1005 } 1006 1007 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 1008 struct seq_file *seq, void *v) 1009 { 1010 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1011 struct rdt_resource *r = s->res; 1012 1013 seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r)); 1014 return 0; 1015 } 1016 1017 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 1018 struct seq_file *seq, void *v) 1019 { 1020 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1021 struct rdt_resource *r = s->res; 1022 1023 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 1024 return 0; 1025 } 1026 1027 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 1028 struct seq_file *seq, void *v) 1029 { 1030 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1031 struct rdt_resource *r = s->res; 1032 1033 seq_printf(seq, "%x\n", r->cache.shareable_bits); 1034 return 0; 1035 } 1036 1037 /* 1038 * rdt_bit_usage_show - Display current usage of resources 1039 * 1040 * A domain is a shared resource that can now be allocated differently. Here 1041 * we display the current regions of the domain as an annotated bitmask. 1042 * For each domain of this resource its allocation bitmask 1043 * is annotated as below to indicate the current usage of the corresponding bit: 1044 * 0 - currently unused 1045 * X - currently available for sharing and used by software and hardware 1046 * H - currently used by hardware only but available for software use 1047 * S - currently used and shareable by software only 1048 * E - currently used exclusively by one resource group 1049 * P - currently pseudo-locked by one resource group 1050 */ 1051 static int rdt_bit_usage_show(struct kernfs_open_file *of, 1052 struct seq_file *seq, void *v) 1053 { 1054 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1055 /* 1056 * Use unsigned long even though only 32 bits are used to ensure 1057 * test_bit() is used safely. 1058 */ 1059 unsigned long sw_shareable = 0, hw_shareable = 0; 1060 unsigned long exclusive = 0, pseudo_locked = 0; 1061 struct rdt_resource *r = s->res; 1062 struct rdt_ctrl_domain *dom; 1063 int i, hwb, swb, excl, psl; 1064 enum rdtgrp_mode mode; 1065 bool sep = false; 1066 u32 ctrl_val; 1067 1068 cpus_read_lock(); 1069 mutex_lock(&rdtgroup_mutex); 1070 hw_shareable = r->cache.shareable_bits; 1071 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { 1072 if (sep) 1073 seq_putc(seq, ';'); 1074 sw_shareable = 0; 1075 exclusive = 0; 1076 seq_printf(seq, "%d=", dom->hdr.id); 1077 for (i = 0; i < closids_supported(); i++) { 1078 if (!closid_allocated(i)) 1079 continue; 1080 ctrl_val = resctrl_arch_get_config(r, dom, i, 1081 s->conf_type); 1082 mode = rdtgroup_mode_by_closid(i); 1083 switch (mode) { 1084 case RDT_MODE_SHAREABLE: 1085 sw_shareable |= ctrl_val; 1086 break; 1087 case RDT_MODE_EXCLUSIVE: 1088 exclusive |= ctrl_val; 1089 break; 1090 case RDT_MODE_PSEUDO_LOCKSETUP: 1091 /* 1092 * RDT_MODE_PSEUDO_LOCKSETUP is possible 1093 * here but not included since the CBM 1094 * associated with this CLOSID in this mode 1095 * is not initialized and no task or cpu can be 1096 * assigned this CLOSID. 1097 */ 1098 break; 1099 case RDT_MODE_PSEUDO_LOCKED: 1100 case RDT_NUM_MODES: 1101 WARN(1, 1102 "invalid mode for closid %d\n", i); 1103 break; 1104 } 1105 } 1106 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 1107 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 1108 hwb = test_bit(i, &hw_shareable); 1109 swb = test_bit(i, &sw_shareable); 1110 excl = test_bit(i, &exclusive); 1111 psl = test_bit(i, &pseudo_locked); 1112 if (hwb && swb) 1113 seq_putc(seq, 'X'); 1114 else if (hwb && !swb) 1115 seq_putc(seq, 'H'); 1116 else if (!hwb && swb) 1117 seq_putc(seq, 'S'); 1118 else if (excl) 1119 seq_putc(seq, 'E'); 1120 else if (psl) 1121 seq_putc(seq, 'P'); 1122 else /* Unused bits remain */ 1123 seq_putc(seq, '0'); 1124 } 1125 sep = true; 1126 } 1127 seq_putc(seq, '\n'); 1128 mutex_unlock(&rdtgroup_mutex); 1129 cpus_read_unlock(); 1130 return 0; 1131 } 1132 1133 static int rdt_min_bw_show(struct kernfs_open_file *of, 1134 struct seq_file *seq, void *v) 1135 { 1136 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1137 struct rdt_resource *r = s->res; 1138 1139 seq_printf(seq, "%u\n", r->membw.min_bw); 1140 return 0; 1141 } 1142 1143 static int rdt_num_rmids_show(struct kernfs_open_file *of, 1144 struct seq_file *seq, void *v) 1145 { 1146 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1147 1148 seq_printf(seq, "%d\n", r->num_rmid); 1149 1150 return 0; 1151 } 1152 1153 static int rdt_mon_features_show(struct kernfs_open_file *of, 1154 struct seq_file *seq, void *v) 1155 { 1156 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1157 struct mon_evt *mevt; 1158 1159 list_for_each_entry(mevt, &r->evt_list, list) { 1160 seq_printf(seq, "%s\n", mevt->name); 1161 if (mevt->configurable) 1162 seq_printf(seq, "%s_config\n", mevt->name); 1163 } 1164 1165 return 0; 1166 } 1167 1168 static int rdt_bw_gran_show(struct kernfs_open_file *of, 1169 struct seq_file *seq, void *v) 1170 { 1171 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1172 struct rdt_resource *r = s->res; 1173 1174 seq_printf(seq, "%u\n", r->membw.bw_gran); 1175 return 0; 1176 } 1177 1178 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1179 struct seq_file *seq, void *v) 1180 { 1181 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1182 struct rdt_resource *r = s->res; 1183 1184 seq_printf(seq, "%u\n", r->membw.delay_linear); 1185 return 0; 1186 } 1187 1188 static int max_threshold_occ_show(struct kernfs_open_file *of, 1189 struct seq_file *seq, void *v) 1190 { 1191 seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); 1192 1193 return 0; 1194 } 1195 1196 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, 1197 struct seq_file *seq, void *v) 1198 { 1199 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1200 struct rdt_resource *r = s->res; 1201 1202 switch (r->membw.throttle_mode) { 1203 case THREAD_THROTTLE_PER_THREAD: 1204 seq_puts(seq, "per-thread\n"); 1205 return 0; 1206 case THREAD_THROTTLE_MAX: 1207 seq_puts(seq, "max\n"); 1208 return 0; 1209 case THREAD_THROTTLE_UNDEFINED: 1210 seq_puts(seq, "undefined\n"); 1211 return 0; 1212 } 1213 1214 WARN_ON_ONCE(1); 1215 1216 return 0; 1217 } 1218 1219 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1220 char *buf, size_t nbytes, loff_t off) 1221 { 1222 unsigned int bytes; 1223 int ret; 1224 1225 ret = kstrtouint(buf, 0, &bytes); 1226 if (ret) 1227 return ret; 1228 1229 if (bytes > resctrl_rmid_realloc_limit) 1230 return -EINVAL; 1231 1232 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); 1233 1234 return nbytes; 1235 } 1236 1237 /* 1238 * rdtgroup_mode_show - Display mode of this resource group 1239 */ 1240 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1241 struct seq_file *s, void *v) 1242 { 1243 struct rdtgroup *rdtgrp; 1244 1245 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1246 if (!rdtgrp) { 1247 rdtgroup_kn_unlock(of->kn); 1248 return -ENOENT; 1249 } 1250 1251 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1252 1253 rdtgroup_kn_unlock(of->kn); 1254 return 0; 1255 } 1256 1257 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) 1258 { 1259 switch (my_type) { 1260 case CDP_CODE: 1261 return CDP_DATA; 1262 case CDP_DATA: 1263 return CDP_CODE; 1264 default: 1265 case CDP_NONE: 1266 return CDP_NONE; 1267 } 1268 } 1269 1270 static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, 1271 struct seq_file *seq, void *v) 1272 { 1273 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1274 struct rdt_resource *r = s->res; 1275 1276 seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); 1277 1278 return 0; 1279 } 1280 1281 /** 1282 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1283 * @r: Resource to which domain instance @d belongs. 1284 * @d: The domain instance for which @closid is being tested. 1285 * @cbm: Capacity bitmask being tested. 1286 * @closid: Intended closid for @cbm. 1287 * @type: CDP type of @r. 1288 * @exclusive: Only check if overlaps with exclusive resource groups 1289 * 1290 * Checks if provided @cbm intended to be used for @closid on domain 1291 * @d overlaps with any other closids or other hardware usage associated 1292 * with this domain. If @exclusive is true then only overlaps with 1293 * resource groups in exclusive mode will be considered. If @exclusive 1294 * is false then overlaps with any resource group or hardware entities 1295 * will be considered. 1296 * 1297 * @cbm is unsigned long, even if only 32 bits are used, to make the 1298 * bitmap functions work correctly. 1299 * 1300 * Return: false if CBM does not overlap, true if it does. 1301 */ 1302 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d, 1303 unsigned long cbm, int closid, 1304 enum resctrl_conf_type type, bool exclusive) 1305 { 1306 enum rdtgrp_mode mode; 1307 unsigned long ctrl_b; 1308 int i; 1309 1310 /* Check for any overlap with regions used by hardware directly */ 1311 if (!exclusive) { 1312 ctrl_b = r->cache.shareable_bits; 1313 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1314 return true; 1315 } 1316 1317 /* Check for overlap with other resource groups */ 1318 for (i = 0; i < closids_supported(); i++) { 1319 ctrl_b = resctrl_arch_get_config(r, d, i, type); 1320 mode = rdtgroup_mode_by_closid(i); 1321 if (closid_allocated(i) && i != closid && 1322 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1323 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1324 if (exclusive) { 1325 if (mode == RDT_MODE_EXCLUSIVE) 1326 return true; 1327 continue; 1328 } 1329 return true; 1330 } 1331 } 1332 } 1333 1334 return false; 1335 } 1336 1337 /** 1338 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1339 * @s: Schema for the resource to which domain instance @d belongs. 1340 * @d: The domain instance for which @closid is being tested. 1341 * @cbm: Capacity bitmask being tested. 1342 * @closid: Intended closid for @cbm. 1343 * @exclusive: Only check if overlaps with exclusive resource groups 1344 * 1345 * Resources that can be allocated using a CBM can use the CBM to control 1346 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1347 * for overlap. Overlap test is not limited to the specific resource for 1348 * which the CBM is intended though - when dealing with CDP resources that 1349 * share the underlying hardware the overlap check should be performed on 1350 * the CDP resource sharing the hardware also. 1351 * 1352 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1353 * overlap test. 1354 * 1355 * Return: true if CBM overlap detected, false if there is no overlap 1356 */ 1357 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d, 1358 unsigned long cbm, int closid, bool exclusive) 1359 { 1360 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 1361 struct rdt_resource *r = s->res; 1362 1363 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, 1364 exclusive)) 1365 return true; 1366 1367 if (!resctrl_arch_get_cdp_enabled(r->rid)) 1368 return false; 1369 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); 1370 } 1371 1372 /** 1373 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1374 * @rdtgrp: Resource group identified through its closid. 1375 * 1376 * An exclusive resource group implies that there should be no sharing of 1377 * its allocated resources. At the time this group is considered to be 1378 * exclusive this test can determine if its current schemata supports this 1379 * setting by testing for overlap with all other resource groups. 1380 * 1381 * Return: true if resource group can be exclusive, false if there is overlap 1382 * with allocations of other resource groups and thus this resource group 1383 * cannot be exclusive. 1384 */ 1385 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1386 { 1387 int closid = rdtgrp->closid; 1388 struct rdt_ctrl_domain *d; 1389 struct resctrl_schema *s; 1390 struct rdt_resource *r; 1391 bool has_cache = false; 1392 u32 ctrl; 1393 1394 /* Walking r->domains, ensure it can't race with cpuhp */ 1395 lockdep_assert_cpus_held(); 1396 1397 list_for_each_entry(s, &resctrl_schema_all, list) { 1398 r = s->res; 1399 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) 1400 continue; 1401 has_cache = true; 1402 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 1403 ctrl = resctrl_arch_get_config(r, d, closid, 1404 s->conf_type); 1405 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { 1406 rdt_last_cmd_puts("Schemata overlaps\n"); 1407 return false; 1408 } 1409 } 1410 } 1411 1412 if (!has_cache) { 1413 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1414 return false; 1415 } 1416 1417 return true; 1418 } 1419 1420 /* 1421 * rdtgroup_mode_write - Modify the resource group's mode 1422 */ 1423 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1424 char *buf, size_t nbytes, loff_t off) 1425 { 1426 struct rdtgroup *rdtgrp; 1427 enum rdtgrp_mode mode; 1428 int ret = 0; 1429 1430 /* Valid input requires a trailing newline */ 1431 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1432 return -EINVAL; 1433 buf[nbytes - 1] = '\0'; 1434 1435 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1436 if (!rdtgrp) { 1437 rdtgroup_kn_unlock(of->kn); 1438 return -ENOENT; 1439 } 1440 1441 rdt_last_cmd_clear(); 1442 1443 mode = rdtgrp->mode; 1444 1445 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1446 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1447 (!strcmp(buf, "pseudo-locksetup") && 1448 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1449 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1450 goto out; 1451 1452 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1453 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1454 ret = -EINVAL; 1455 goto out; 1456 } 1457 1458 if (!strcmp(buf, "shareable")) { 1459 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1460 ret = rdtgroup_locksetup_exit(rdtgrp); 1461 if (ret) 1462 goto out; 1463 } 1464 rdtgrp->mode = RDT_MODE_SHAREABLE; 1465 } else if (!strcmp(buf, "exclusive")) { 1466 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1467 ret = -EINVAL; 1468 goto out; 1469 } 1470 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1471 ret = rdtgroup_locksetup_exit(rdtgrp); 1472 if (ret) 1473 goto out; 1474 } 1475 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1476 } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && 1477 !strcmp(buf, "pseudo-locksetup")) { 1478 ret = rdtgroup_locksetup_enter(rdtgrp); 1479 if (ret) 1480 goto out; 1481 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1482 } else { 1483 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1484 ret = -EINVAL; 1485 } 1486 1487 out: 1488 rdtgroup_kn_unlock(of->kn); 1489 return ret ?: nbytes; 1490 } 1491 1492 /** 1493 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1494 * @r: RDT resource to which @d belongs. 1495 * @d: RDT domain instance. 1496 * @cbm: bitmask for which the size should be computed. 1497 * 1498 * The bitmask provided associated with the RDT domain instance @d will be 1499 * translated into how many bytes it represents. The size in bytes is 1500 * computed by first dividing the total cache size by the CBM length to 1501 * determine how many bytes each bit in the bitmask represents. The result 1502 * is multiplied with the number of bits set in the bitmask. 1503 * 1504 * @cbm is unsigned long, even if only 32 bits are used to make the 1505 * bitmap functions work correctly. 1506 */ 1507 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1508 struct rdt_ctrl_domain *d, unsigned long cbm) 1509 { 1510 unsigned int size = 0; 1511 struct cacheinfo *ci; 1512 int num_b; 1513 1514 if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE)) 1515 return size; 1516 1517 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1518 ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope); 1519 if (ci) 1520 size = ci->size / r->cache.cbm_len * num_b; 1521 1522 return size; 1523 } 1524 1525 /* 1526 * rdtgroup_size_show - Display size in bytes of allocated regions 1527 * 1528 * The "size" file mirrors the layout of the "schemata" file, printing the 1529 * size in bytes of each region instead of the capacity bitmask. 1530 */ 1531 static int rdtgroup_size_show(struct kernfs_open_file *of, 1532 struct seq_file *s, void *v) 1533 { 1534 struct resctrl_schema *schema; 1535 enum resctrl_conf_type type; 1536 struct rdt_ctrl_domain *d; 1537 struct rdtgroup *rdtgrp; 1538 struct rdt_resource *r; 1539 unsigned int size; 1540 int ret = 0; 1541 u32 closid; 1542 bool sep; 1543 u32 ctrl; 1544 1545 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1546 if (!rdtgrp) { 1547 rdtgroup_kn_unlock(of->kn); 1548 return -ENOENT; 1549 } 1550 1551 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1552 if (!rdtgrp->plr->d) { 1553 rdt_last_cmd_clear(); 1554 rdt_last_cmd_puts("Cache domain offline\n"); 1555 ret = -ENODEV; 1556 } else { 1557 seq_printf(s, "%*s:", max_name_width, 1558 rdtgrp->plr->s->name); 1559 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, 1560 rdtgrp->plr->d, 1561 rdtgrp->plr->cbm); 1562 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size); 1563 } 1564 goto out; 1565 } 1566 1567 closid = rdtgrp->closid; 1568 1569 list_for_each_entry(schema, &resctrl_schema_all, list) { 1570 r = schema->res; 1571 type = schema->conf_type; 1572 sep = false; 1573 seq_printf(s, "%*s:", max_name_width, schema->name); 1574 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 1575 if (sep) 1576 seq_putc(s, ';'); 1577 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1578 size = 0; 1579 } else { 1580 if (is_mba_sc(r)) 1581 ctrl = d->mbps_val[closid]; 1582 else 1583 ctrl = resctrl_arch_get_config(r, d, 1584 closid, 1585 type); 1586 if (r->rid == RDT_RESOURCE_MBA || 1587 r->rid == RDT_RESOURCE_SMBA) 1588 size = ctrl; 1589 else 1590 size = rdtgroup_cbm_to_size(r, d, ctrl); 1591 } 1592 seq_printf(s, "%d=%u", d->hdr.id, size); 1593 sep = true; 1594 } 1595 seq_putc(s, '\n'); 1596 } 1597 1598 out: 1599 rdtgroup_kn_unlock(of->kn); 1600 1601 return ret; 1602 } 1603 1604 #define INVALID_CONFIG_INDEX UINT_MAX 1605 1606 /** 1607 * mon_event_config_index_get - get the hardware index for the 1608 * configurable event 1609 * @evtid: event id. 1610 * 1611 * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID 1612 * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID 1613 * INVALID_CONFIG_INDEX for invalid evtid 1614 */ 1615 static inline unsigned int mon_event_config_index_get(u32 evtid) 1616 { 1617 switch (evtid) { 1618 case QOS_L3_MBM_TOTAL_EVENT_ID: 1619 return 0; 1620 case QOS_L3_MBM_LOCAL_EVENT_ID: 1621 return 1; 1622 default: 1623 /* Should never reach here */ 1624 return INVALID_CONFIG_INDEX; 1625 } 1626 } 1627 1628 void resctrl_arch_mon_event_config_read(void *_config_info) 1629 { 1630 struct resctrl_mon_config_info *config_info = _config_info; 1631 unsigned int index; 1632 u64 msrval; 1633 1634 index = mon_event_config_index_get(config_info->evtid); 1635 if (index == INVALID_CONFIG_INDEX) { 1636 pr_warn_once("Invalid event id %d\n", config_info->evtid); 1637 return; 1638 } 1639 rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval); 1640 1641 /* Report only the valid event configuration bits */ 1642 config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; 1643 } 1644 1645 static void mondata_config_read(struct resctrl_mon_config_info *mon_info) 1646 { 1647 smp_call_function_any(&mon_info->d->hdr.cpu_mask, 1648 resctrl_arch_mon_event_config_read, mon_info, 1); 1649 } 1650 1651 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) 1652 { 1653 struct resctrl_mon_config_info mon_info; 1654 struct rdt_mon_domain *dom; 1655 bool sep = false; 1656 1657 cpus_read_lock(); 1658 mutex_lock(&rdtgroup_mutex); 1659 1660 list_for_each_entry(dom, &r->mon_domains, hdr.list) { 1661 if (sep) 1662 seq_puts(s, ";"); 1663 1664 memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); 1665 mon_info.r = r; 1666 mon_info.d = dom; 1667 mon_info.evtid = evtid; 1668 mondata_config_read(&mon_info); 1669 1670 seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config); 1671 sep = true; 1672 } 1673 seq_puts(s, "\n"); 1674 1675 mutex_unlock(&rdtgroup_mutex); 1676 cpus_read_unlock(); 1677 1678 return 0; 1679 } 1680 1681 static int mbm_total_bytes_config_show(struct kernfs_open_file *of, 1682 struct seq_file *seq, void *v) 1683 { 1684 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1685 1686 mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); 1687 1688 return 0; 1689 } 1690 1691 static int mbm_local_bytes_config_show(struct kernfs_open_file *of, 1692 struct seq_file *seq, void *v) 1693 { 1694 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1695 1696 mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); 1697 1698 return 0; 1699 } 1700 1701 void resctrl_arch_mon_event_config_write(void *_config_info) 1702 { 1703 struct resctrl_mon_config_info *config_info = _config_info; 1704 unsigned int index; 1705 1706 index = mon_event_config_index_get(config_info->evtid); 1707 if (index == INVALID_CONFIG_INDEX) { 1708 pr_warn_once("Invalid event id %d\n", config_info->evtid); 1709 return; 1710 } 1711 wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config); 1712 } 1713 1714 static void mbm_config_write_domain(struct rdt_resource *r, 1715 struct rdt_mon_domain *d, u32 evtid, u32 val) 1716 { 1717 struct resctrl_mon_config_info mon_info = {0}; 1718 1719 /* 1720 * Read the current config value first. If both are the same then 1721 * no need to write it again. 1722 */ 1723 mon_info.r = r; 1724 mon_info.d = d; 1725 mon_info.evtid = evtid; 1726 mondata_config_read(&mon_info); 1727 if (mon_info.mon_config == val) 1728 return; 1729 1730 mon_info.mon_config = val; 1731 1732 /* 1733 * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the 1734 * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE 1735 * are scoped at the domain level. Writing any of these MSRs 1736 * on one CPU is observed by all the CPUs in the domain. 1737 */ 1738 smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write, 1739 &mon_info, 1); 1740 1741 /* 1742 * When an Event Configuration is changed, the bandwidth counters 1743 * for all RMIDs and Events will be cleared by the hardware. The 1744 * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for 1745 * every RMID on the next read to any event for every RMID. 1746 * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) 1747 * cleared while it is tracked by the hardware. Clear the 1748 * mbm_local and mbm_total counts for all the RMIDs. 1749 */ 1750 resctrl_arch_reset_rmid_all(r, d); 1751 } 1752 1753 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) 1754 { 1755 char *dom_str = NULL, *id_str; 1756 unsigned long dom_id, val; 1757 struct rdt_mon_domain *d; 1758 1759 /* Walking r->domains, ensure it can't race with cpuhp */ 1760 lockdep_assert_cpus_held(); 1761 1762 next: 1763 if (!tok || tok[0] == '\0') 1764 return 0; 1765 1766 /* Start processing the strings for each domain */ 1767 dom_str = strim(strsep(&tok, ";")); 1768 id_str = strsep(&dom_str, "="); 1769 1770 if (!id_str || kstrtoul(id_str, 10, &dom_id)) { 1771 rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); 1772 return -EINVAL; 1773 } 1774 1775 if (!dom_str || kstrtoul(dom_str, 16, &val)) { 1776 rdt_last_cmd_puts("Non-numeric event configuration value\n"); 1777 return -EINVAL; 1778 } 1779 1780 /* Value from user cannot be more than the supported set of events */ 1781 if ((val & r->mbm_cfg_mask) != val) { 1782 rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", 1783 r->mbm_cfg_mask); 1784 return -EINVAL; 1785 } 1786 1787 list_for_each_entry(d, &r->mon_domains, hdr.list) { 1788 if (d->hdr.id == dom_id) { 1789 mbm_config_write_domain(r, d, evtid, val); 1790 goto next; 1791 } 1792 } 1793 1794 return -EINVAL; 1795 } 1796 1797 static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, 1798 char *buf, size_t nbytes, 1799 loff_t off) 1800 { 1801 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1802 int ret; 1803 1804 /* Valid input requires a trailing newline */ 1805 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1806 return -EINVAL; 1807 1808 cpus_read_lock(); 1809 mutex_lock(&rdtgroup_mutex); 1810 1811 rdt_last_cmd_clear(); 1812 1813 buf[nbytes - 1] = '\0'; 1814 1815 ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); 1816 1817 mutex_unlock(&rdtgroup_mutex); 1818 cpus_read_unlock(); 1819 1820 return ret ?: nbytes; 1821 } 1822 1823 static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, 1824 char *buf, size_t nbytes, 1825 loff_t off) 1826 { 1827 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1828 int ret; 1829 1830 /* Valid input requires a trailing newline */ 1831 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1832 return -EINVAL; 1833 1834 cpus_read_lock(); 1835 mutex_lock(&rdtgroup_mutex); 1836 1837 rdt_last_cmd_clear(); 1838 1839 buf[nbytes - 1] = '\0'; 1840 1841 ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); 1842 1843 mutex_unlock(&rdtgroup_mutex); 1844 cpus_read_unlock(); 1845 1846 return ret ?: nbytes; 1847 } 1848 1849 /* rdtgroup information files for one cache resource. */ 1850 static struct rftype res_common_files[] = { 1851 { 1852 .name = "last_cmd_status", 1853 .mode = 0444, 1854 .kf_ops = &rdtgroup_kf_single_ops, 1855 .seq_show = rdt_last_cmd_status_show, 1856 .fflags = RFTYPE_TOP_INFO, 1857 }, 1858 { 1859 .name = "num_closids", 1860 .mode = 0444, 1861 .kf_ops = &rdtgroup_kf_single_ops, 1862 .seq_show = rdt_num_closids_show, 1863 .fflags = RFTYPE_CTRL_INFO, 1864 }, 1865 { 1866 .name = "mon_features", 1867 .mode = 0444, 1868 .kf_ops = &rdtgroup_kf_single_ops, 1869 .seq_show = rdt_mon_features_show, 1870 .fflags = RFTYPE_MON_INFO, 1871 }, 1872 { 1873 .name = "num_rmids", 1874 .mode = 0444, 1875 .kf_ops = &rdtgroup_kf_single_ops, 1876 .seq_show = rdt_num_rmids_show, 1877 .fflags = RFTYPE_MON_INFO, 1878 }, 1879 { 1880 .name = "cbm_mask", 1881 .mode = 0444, 1882 .kf_ops = &rdtgroup_kf_single_ops, 1883 .seq_show = rdt_default_ctrl_show, 1884 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1885 }, 1886 { 1887 .name = "min_cbm_bits", 1888 .mode = 0444, 1889 .kf_ops = &rdtgroup_kf_single_ops, 1890 .seq_show = rdt_min_cbm_bits_show, 1891 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1892 }, 1893 { 1894 .name = "shareable_bits", 1895 .mode = 0444, 1896 .kf_ops = &rdtgroup_kf_single_ops, 1897 .seq_show = rdt_shareable_bits_show, 1898 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1899 }, 1900 { 1901 .name = "bit_usage", 1902 .mode = 0444, 1903 .kf_ops = &rdtgroup_kf_single_ops, 1904 .seq_show = rdt_bit_usage_show, 1905 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1906 }, 1907 { 1908 .name = "min_bandwidth", 1909 .mode = 0444, 1910 .kf_ops = &rdtgroup_kf_single_ops, 1911 .seq_show = rdt_min_bw_show, 1912 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, 1913 }, 1914 { 1915 .name = "bandwidth_gran", 1916 .mode = 0444, 1917 .kf_ops = &rdtgroup_kf_single_ops, 1918 .seq_show = rdt_bw_gran_show, 1919 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, 1920 }, 1921 { 1922 .name = "delay_linear", 1923 .mode = 0444, 1924 .kf_ops = &rdtgroup_kf_single_ops, 1925 .seq_show = rdt_delay_linear_show, 1926 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, 1927 }, 1928 /* 1929 * Platform specific which (if any) capabilities are provided by 1930 * thread_throttle_mode. Defer "fflags" initialization to platform 1931 * discovery. 1932 */ 1933 { 1934 .name = "thread_throttle_mode", 1935 .mode = 0444, 1936 .kf_ops = &rdtgroup_kf_single_ops, 1937 .seq_show = rdt_thread_throttle_mode_show, 1938 }, 1939 { 1940 .name = "max_threshold_occupancy", 1941 .mode = 0644, 1942 .kf_ops = &rdtgroup_kf_single_ops, 1943 .write = max_threshold_occ_write, 1944 .seq_show = max_threshold_occ_show, 1945 .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, 1946 }, 1947 { 1948 .name = "mbm_total_bytes_config", 1949 .mode = 0644, 1950 .kf_ops = &rdtgroup_kf_single_ops, 1951 .seq_show = mbm_total_bytes_config_show, 1952 .write = mbm_total_bytes_config_write, 1953 }, 1954 { 1955 .name = "mbm_local_bytes_config", 1956 .mode = 0644, 1957 .kf_ops = &rdtgroup_kf_single_ops, 1958 .seq_show = mbm_local_bytes_config_show, 1959 .write = mbm_local_bytes_config_write, 1960 }, 1961 { 1962 .name = "cpus", 1963 .mode = 0644, 1964 .kf_ops = &rdtgroup_kf_single_ops, 1965 .write = rdtgroup_cpus_write, 1966 .seq_show = rdtgroup_cpus_show, 1967 .fflags = RFTYPE_BASE, 1968 }, 1969 { 1970 .name = "cpus_list", 1971 .mode = 0644, 1972 .kf_ops = &rdtgroup_kf_single_ops, 1973 .write = rdtgroup_cpus_write, 1974 .seq_show = rdtgroup_cpus_show, 1975 .flags = RFTYPE_FLAGS_CPUS_LIST, 1976 .fflags = RFTYPE_BASE, 1977 }, 1978 { 1979 .name = "tasks", 1980 .mode = 0644, 1981 .kf_ops = &rdtgroup_kf_single_ops, 1982 .write = rdtgroup_tasks_write, 1983 .seq_show = rdtgroup_tasks_show, 1984 .fflags = RFTYPE_BASE, 1985 }, 1986 { 1987 .name = "mon_hw_id", 1988 .mode = 0444, 1989 .kf_ops = &rdtgroup_kf_single_ops, 1990 .seq_show = rdtgroup_rmid_show, 1991 .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, 1992 }, 1993 { 1994 .name = "schemata", 1995 .mode = 0644, 1996 .kf_ops = &rdtgroup_kf_single_ops, 1997 .write = rdtgroup_schemata_write, 1998 .seq_show = rdtgroup_schemata_show, 1999 .fflags = RFTYPE_CTRL_BASE, 2000 }, 2001 { 2002 .name = "mba_MBps_event", 2003 .mode = 0644, 2004 .kf_ops = &rdtgroup_kf_single_ops, 2005 .write = rdtgroup_mba_mbps_event_write, 2006 .seq_show = rdtgroup_mba_mbps_event_show, 2007 }, 2008 { 2009 .name = "mode", 2010 .mode = 0644, 2011 .kf_ops = &rdtgroup_kf_single_ops, 2012 .write = rdtgroup_mode_write, 2013 .seq_show = rdtgroup_mode_show, 2014 .fflags = RFTYPE_CTRL_BASE, 2015 }, 2016 { 2017 .name = "size", 2018 .mode = 0444, 2019 .kf_ops = &rdtgroup_kf_single_ops, 2020 .seq_show = rdtgroup_size_show, 2021 .fflags = RFTYPE_CTRL_BASE, 2022 }, 2023 { 2024 .name = "sparse_masks", 2025 .mode = 0444, 2026 .kf_ops = &rdtgroup_kf_single_ops, 2027 .seq_show = rdt_has_sparse_bitmasks_show, 2028 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 2029 }, 2030 { 2031 .name = "ctrl_hw_id", 2032 .mode = 0444, 2033 .kf_ops = &rdtgroup_kf_single_ops, 2034 .seq_show = rdtgroup_closid_show, 2035 .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, 2036 }, 2037 2038 }; 2039 2040 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 2041 { 2042 struct rftype *rfts, *rft; 2043 int ret, len; 2044 2045 rfts = res_common_files; 2046 len = ARRAY_SIZE(res_common_files); 2047 2048 lockdep_assert_held(&rdtgroup_mutex); 2049 2050 if (resctrl_debug) 2051 fflags |= RFTYPE_DEBUG; 2052 2053 for (rft = rfts; rft < rfts + len; rft++) { 2054 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { 2055 ret = rdtgroup_add_file(kn, rft); 2056 if (ret) 2057 goto error; 2058 } 2059 } 2060 2061 return 0; 2062 error: 2063 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 2064 while (--rft >= rfts) { 2065 if ((fflags & rft->fflags) == rft->fflags) 2066 kernfs_remove_by_name(kn, rft->name); 2067 } 2068 return ret; 2069 } 2070 2071 static struct rftype *rdtgroup_get_rftype_by_name(const char *name) 2072 { 2073 struct rftype *rfts, *rft; 2074 int len; 2075 2076 rfts = res_common_files; 2077 len = ARRAY_SIZE(res_common_files); 2078 2079 for (rft = rfts; rft < rfts + len; rft++) { 2080 if (!strcmp(rft->name, name)) 2081 return rft; 2082 } 2083 2084 return NULL; 2085 } 2086 2087 static void thread_throttle_mode_init(void) 2088 { 2089 enum membw_throttle_mode throttle_mode = THREAD_THROTTLE_UNDEFINED; 2090 struct rdt_resource *r_mba, *r_smba; 2091 2092 r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 2093 if (r_mba->alloc_capable && 2094 r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED) 2095 throttle_mode = r_mba->membw.throttle_mode; 2096 2097 r_smba = resctrl_arch_get_resource(RDT_RESOURCE_SMBA); 2098 if (r_smba->alloc_capable && 2099 r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED) 2100 throttle_mode = r_smba->membw.throttle_mode; 2101 2102 if (throttle_mode == THREAD_THROTTLE_UNDEFINED) 2103 return; 2104 2105 resctrl_file_fflags_init("thread_throttle_mode", 2106 RFTYPE_CTRL_INFO | RFTYPE_RES_MB); 2107 } 2108 2109 void resctrl_file_fflags_init(const char *config, unsigned long fflags) 2110 { 2111 struct rftype *rft; 2112 2113 rft = rdtgroup_get_rftype_by_name(config); 2114 if (rft) 2115 rft->fflags = fflags; 2116 } 2117 2118 /** 2119 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 2120 * @r: The resource group with which the file is associated. 2121 * @name: Name of the file 2122 * 2123 * The permissions of named resctrl file, directory, or link are modified 2124 * to not allow read, write, or execute by any user. 2125 * 2126 * WARNING: This function is intended to communicate to the user that the 2127 * resctrl file has been locked down - that it is not relevant to the 2128 * particular state the system finds itself in. It should not be relied 2129 * on to protect from user access because after the file's permissions 2130 * are restricted the user can still change the permissions using chmod 2131 * from the command line. 2132 * 2133 * Return: 0 on success, <0 on failure. 2134 */ 2135 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 2136 { 2137 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 2138 struct kernfs_node *kn; 2139 int ret = 0; 2140 2141 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 2142 if (!kn) 2143 return -ENOENT; 2144 2145 switch (kernfs_type(kn)) { 2146 case KERNFS_DIR: 2147 iattr.ia_mode = S_IFDIR; 2148 break; 2149 case KERNFS_FILE: 2150 iattr.ia_mode = S_IFREG; 2151 break; 2152 case KERNFS_LINK: 2153 iattr.ia_mode = S_IFLNK; 2154 break; 2155 } 2156 2157 ret = kernfs_setattr(kn, &iattr); 2158 kernfs_put(kn); 2159 return ret; 2160 } 2161 2162 /** 2163 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 2164 * @r: The resource group with which the file is associated. 2165 * @name: Name of the file 2166 * @mask: Mask of permissions that should be restored 2167 * 2168 * Restore the permissions of the named file. If @name is a directory the 2169 * permissions of its parent will be used. 2170 * 2171 * Return: 0 on success, <0 on failure. 2172 */ 2173 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 2174 umode_t mask) 2175 { 2176 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 2177 struct kernfs_node *kn, *parent; 2178 struct rftype *rfts, *rft; 2179 int ret, len; 2180 2181 rfts = res_common_files; 2182 len = ARRAY_SIZE(res_common_files); 2183 2184 for (rft = rfts; rft < rfts + len; rft++) { 2185 if (!strcmp(rft->name, name)) 2186 iattr.ia_mode = rft->mode & mask; 2187 } 2188 2189 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 2190 if (!kn) 2191 return -ENOENT; 2192 2193 switch (kernfs_type(kn)) { 2194 case KERNFS_DIR: 2195 parent = kernfs_get_parent(kn); 2196 if (parent) { 2197 iattr.ia_mode |= parent->mode; 2198 kernfs_put(parent); 2199 } 2200 iattr.ia_mode |= S_IFDIR; 2201 break; 2202 case KERNFS_FILE: 2203 iattr.ia_mode |= S_IFREG; 2204 break; 2205 case KERNFS_LINK: 2206 iattr.ia_mode |= S_IFLNK; 2207 break; 2208 } 2209 2210 ret = kernfs_setattr(kn, &iattr); 2211 kernfs_put(kn); 2212 return ret; 2213 } 2214 2215 static int rdtgroup_mkdir_info_resdir(void *priv, char *name, 2216 unsigned long fflags) 2217 { 2218 struct kernfs_node *kn_subdir; 2219 int ret; 2220 2221 kn_subdir = kernfs_create_dir(kn_info, name, 2222 kn_info->mode, priv); 2223 if (IS_ERR(kn_subdir)) 2224 return PTR_ERR(kn_subdir); 2225 2226 ret = rdtgroup_kn_set_ugid(kn_subdir); 2227 if (ret) 2228 return ret; 2229 2230 ret = rdtgroup_add_files(kn_subdir, fflags); 2231 if (!ret) 2232 kernfs_activate(kn_subdir); 2233 2234 return ret; 2235 } 2236 2237 static unsigned long fflags_from_resource(struct rdt_resource *r) 2238 { 2239 switch (r->rid) { 2240 case RDT_RESOURCE_L3: 2241 case RDT_RESOURCE_L2: 2242 return RFTYPE_RES_CACHE; 2243 case RDT_RESOURCE_MBA: 2244 case RDT_RESOURCE_SMBA: 2245 return RFTYPE_RES_MB; 2246 } 2247 2248 return WARN_ON_ONCE(1); 2249 } 2250 2251 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 2252 { 2253 struct resctrl_schema *s; 2254 struct rdt_resource *r; 2255 unsigned long fflags; 2256 char name[32]; 2257 int ret; 2258 2259 /* create the directory */ 2260 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 2261 if (IS_ERR(kn_info)) 2262 return PTR_ERR(kn_info); 2263 2264 ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); 2265 if (ret) 2266 goto out_destroy; 2267 2268 /* loop over enabled controls, these are all alloc_capable */ 2269 list_for_each_entry(s, &resctrl_schema_all, list) { 2270 r = s->res; 2271 fflags = fflags_from_resource(r) | RFTYPE_CTRL_INFO; 2272 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); 2273 if (ret) 2274 goto out_destroy; 2275 } 2276 2277 for_each_mon_capable_rdt_resource(r) { 2278 fflags = fflags_from_resource(r) | RFTYPE_MON_INFO; 2279 sprintf(name, "%s_MON", r->name); 2280 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 2281 if (ret) 2282 goto out_destroy; 2283 } 2284 2285 ret = rdtgroup_kn_set_ugid(kn_info); 2286 if (ret) 2287 goto out_destroy; 2288 2289 kernfs_activate(kn_info); 2290 2291 return 0; 2292 2293 out_destroy: 2294 kernfs_remove(kn_info); 2295 return ret; 2296 } 2297 2298 static int 2299 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 2300 char *name, struct kernfs_node **dest_kn) 2301 { 2302 struct kernfs_node *kn; 2303 int ret; 2304 2305 /* create the directory */ 2306 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2307 if (IS_ERR(kn)) 2308 return PTR_ERR(kn); 2309 2310 if (dest_kn) 2311 *dest_kn = kn; 2312 2313 ret = rdtgroup_kn_set_ugid(kn); 2314 if (ret) 2315 goto out_destroy; 2316 2317 kernfs_activate(kn); 2318 2319 return 0; 2320 2321 out_destroy: 2322 kernfs_remove(kn); 2323 return ret; 2324 } 2325 2326 static void l3_qos_cfg_update(void *arg) 2327 { 2328 bool *enable = arg; 2329 2330 wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 2331 } 2332 2333 static void l2_qos_cfg_update(void *arg) 2334 { 2335 bool *enable = arg; 2336 2337 wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 2338 } 2339 2340 static inline bool is_mba_linear(void) 2341 { 2342 return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; 2343 } 2344 2345 static int set_cache_qos_cfg(int level, bool enable) 2346 { 2347 void (*update)(void *arg); 2348 struct rdt_ctrl_domain *d; 2349 struct rdt_resource *r_l; 2350 cpumask_var_t cpu_mask; 2351 int cpu; 2352 2353 /* Walking r->domains, ensure it can't race with cpuhp */ 2354 lockdep_assert_cpus_held(); 2355 2356 if (level == RDT_RESOURCE_L3) 2357 update = l3_qos_cfg_update; 2358 else if (level == RDT_RESOURCE_L2) 2359 update = l2_qos_cfg_update; 2360 else 2361 return -EINVAL; 2362 2363 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2364 return -ENOMEM; 2365 2366 r_l = &rdt_resources_all[level].r_resctrl; 2367 list_for_each_entry(d, &r_l->ctrl_domains, hdr.list) { 2368 if (r_l->cache.arch_has_per_cpu_cfg) 2369 /* Pick all the CPUs in the domain instance */ 2370 for_each_cpu(cpu, &d->hdr.cpu_mask) 2371 cpumask_set_cpu(cpu, cpu_mask); 2372 else 2373 /* Pick one CPU from each domain instance to update MSR */ 2374 cpumask_set_cpu(cpumask_any(&d->hdr.cpu_mask), cpu_mask); 2375 } 2376 2377 /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ 2378 on_each_cpu_mask(cpu_mask, update, &enable, 1); 2379 2380 free_cpumask_var(cpu_mask); 2381 2382 return 0; 2383 } 2384 2385 /* Restore the qos cfg state when a domain comes online */ 2386 void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 2387 { 2388 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 2389 2390 if (!r->cdp_capable) 2391 return; 2392 2393 if (r->rid == RDT_RESOURCE_L2) 2394 l2_qos_cfg_update(&hw_res->cdp_enabled); 2395 2396 if (r->rid == RDT_RESOURCE_L3) 2397 l3_qos_cfg_update(&hw_res->cdp_enabled); 2398 } 2399 2400 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d) 2401 { 2402 u32 num_closid = resctrl_arch_get_num_closid(r); 2403 int cpu = cpumask_any(&d->hdr.cpu_mask); 2404 int i; 2405 2406 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), 2407 GFP_KERNEL, cpu_to_node(cpu)); 2408 if (!d->mbps_val) 2409 return -ENOMEM; 2410 2411 for (i = 0; i < num_closid; i++) 2412 d->mbps_val[i] = MBA_MAX_MBPS; 2413 2414 return 0; 2415 } 2416 2417 static void mba_sc_domain_destroy(struct rdt_resource *r, 2418 struct rdt_ctrl_domain *d) 2419 { 2420 kfree(d->mbps_val); 2421 d->mbps_val = NULL; 2422 } 2423 2424 /* 2425 * MBA software controller is supported only if 2426 * MBM is supported and MBA is in linear scale, 2427 * and the MBM monitor scope is the same as MBA 2428 * control scope. 2429 */ 2430 static bool supports_mba_mbps(void) 2431 { 2432 struct rdt_resource *rmbm = resctrl_arch_get_resource(RDT_RESOURCE_L3); 2433 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 2434 2435 return (resctrl_is_mbm_enabled() && 2436 r->alloc_capable && is_mba_linear() && 2437 r->ctrl_scope == rmbm->mon_scope); 2438 } 2439 2440 /* 2441 * Enable or disable the MBA software controller 2442 * which helps user specify bandwidth in MBps. 2443 */ 2444 static int set_mba_sc(bool mba_sc) 2445 { 2446 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 2447 u32 num_closid = resctrl_arch_get_num_closid(r); 2448 struct rdt_ctrl_domain *d; 2449 unsigned long fflags; 2450 int i; 2451 2452 if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) 2453 return -EINVAL; 2454 2455 r->membw.mba_sc = mba_sc; 2456 2457 rdtgroup_default.mba_mbps_event = mba_mbps_default_event; 2458 2459 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 2460 for (i = 0; i < num_closid; i++) 2461 d->mbps_val[i] = MBA_MAX_MBPS; 2462 } 2463 2464 fflags = mba_sc ? RFTYPE_CTRL_BASE | RFTYPE_MON_BASE : 0; 2465 resctrl_file_fflags_init("mba_MBps_event", fflags); 2466 2467 return 0; 2468 } 2469 2470 static int cdp_enable(int level) 2471 { 2472 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; 2473 int ret; 2474 2475 if (!r_l->alloc_capable) 2476 return -EINVAL; 2477 2478 ret = set_cache_qos_cfg(level, true); 2479 if (!ret) 2480 rdt_resources_all[level].cdp_enabled = true; 2481 2482 return ret; 2483 } 2484 2485 static void cdp_disable(int level) 2486 { 2487 struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; 2488 2489 if (r_hw->cdp_enabled) { 2490 set_cache_qos_cfg(level, false); 2491 r_hw->cdp_enabled = false; 2492 } 2493 } 2494 2495 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) 2496 { 2497 struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; 2498 2499 if (!hw_res->r_resctrl.cdp_capable) 2500 return -EINVAL; 2501 2502 if (enable) 2503 return cdp_enable(l); 2504 2505 cdp_disable(l); 2506 2507 return 0; 2508 } 2509 2510 /* 2511 * We don't allow rdtgroup directories to be created anywhere 2512 * except the root directory. Thus when looking for the rdtgroup 2513 * structure for a kernfs node we are either looking at a directory, 2514 * in which case the rdtgroup structure is pointed at by the "priv" 2515 * field, otherwise we have a file, and need only look to the parent 2516 * to find the rdtgroup. 2517 */ 2518 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 2519 { 2520 if (kernfs_type(kn) == KERNFS_DIR) { 2521 /* 2522 * All the resource directories use "kn->priv" 2523 * to point to the "struct rdtgroup" for the 2524 * resource. "info" and its subdirectories don't 2525 * have rdtgroup structures, so return NULL here. 2526 */ 2527 if (kn == kn_info || 2528 rcu_access_pointer(kn->__parent) == kn_info) 2529 return NULL; 2530 else 2531 return kn->priv; 2532 } else { 2533 return rdt_kn_parent_priv(kn); 2534 } 2535 } 2536 2537 static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) 2538 { 2539 atomic_inc(&rdtgrp->waitcount); 2540 kernfs_break_active_protection(kn); 2541 } 2542 2543 static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) 2544 { 2545 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2546 (rdtgrp->flags & RDT_DELETED)) { 2547 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2548 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2549 rdtgroup_pseudo_lock_remove(rdtgrp); 2550 kernfs_unbreak_active_protection(kn); 2551 rdtgroup_remove(rdtgrp); 2552 } else { 2553 kernfs_unbreak_active_protection(kn); 2554 } 2555 } 2556 2557 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 2558 { 2559 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2560 2561 if (!rdtgrp) 2562 return NULL; 2563 2564 rdtgroup_kn_get(rdtgrp, kn); 2565 2566 cpus_read_lock(); 2567 mutex_lock(&rdtgroup_mutex); 2568 2569 /* Was this group deleted while we waited? */ 2570 if (rdtgrp->flags & RDT_DELETED) 2571 return NULL; 2572 2573 return rdtgrp; 2574 } 2575 2576 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2577 { 2578 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2579 2580 if (!rdtgrp) 2581 return; 2582 2583 mutex_unlock(&rdtgroup_mutex); 2584 cpus_read_unlock(); 2585 2586 rdtgroup_kn_put(rdtgrp, kn); 2587 } 2588 2589 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2590 struct rdtgroup *prgrp, 2591 struct kernfs_node **mon_data_kn); 2592 2593 static void rdt_disable_ctx(void) 2594 { 2595 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); 2596 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); 2597 set_mba_sc(false); 2598 2599 resctrl_debug = false; 2600 } 2601 2602 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2603 { 2604 int ret = 0; 2605 2606 if (ctx->enable_cdpl2) { 2607 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); 2608 if (ret) 2609 goto out_done; 2610 } 2611 2612 if (ctx->enable_cdpl3) { 2613 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); 2614 if (ret) 2615 goto out_cdpl2; 2616 } 2617 2618 if (ctx->enable_mba_mbps) { 2619 ret = set_mba_sc(true); 2620 if (ret) 2621 goto out_cdpl3; 2622 } 2623 2624 if (ctx->enable_debug) 2625 resctrl_debug = true; 2626 2627 return 0; 2628 2629 out_cdpl3: 2630 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); 2631 out_cdpl2: 2632 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); 2633 out_done: 2634 return ret; 2635 } 2636 2637 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) 2638 { 2639 struct resctrl_schema *s; 2640 const char *suffix = ""; 2641 int ret, cl; 2642 2643 s = kzalloc(sizeof(*s), GFP_KERNEL); 2644 if (!s) 2645 return -ENOMEM; 2646 2647 s->res = r; 2648 s->num_closid = resctrl_arch_get_num_closid(r); 2649 if (resctrl_arch_get_cdp_enabled(r->rid)) 2650 s->num_closid /= 2; 2651 2652 s->conf_type = type; 2653 switch (type) { 2654 case CDP_CODE: 2655 suffix = "CODE"; 2656 break; 2657 case CDP_DATA: 2658 suffix = "DATA"; 2659 break; 2660 case CDP_NONE: 2661 suffix = ""; 2662 break; 2663 } 2664 2665 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); 2666 if (ret >= sizeof(s->name)) { 2667 kfree(s); 2668 return -EINVAL; 2669 } 2670 2671 cl = strlen(s->name); 2672 2673 /* 2674 * If CDP is supported by this resource, but not enabled, 2675 * include the suffix. This ensures the tabular format of the 2676 * schemata file does not change between mounts of the filesystem. 2677 */ 2678 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) 2679 cl += 4; 2680 2681 if (cl > max_name_width) 2682 max_name_width = cl; 2683 2684 switch (r->schema_fmt) { 2685 case RESCTRL_SCHEMA_BITMAP: 2686 s->fmt_str = "%d=%x"; 2687 break; 2688 case RESCTRL_SCHEMA_RANGE: 2689 s->fmt_str = "%d=%u"; 2690 break; 2691 } 2692 2693 if (WARN_ON_ONCE(!s->fmt_str)) { 2694 kfree(s); 2695 return -EINVAL; 2696 } 2697 2698 INIT_LIST_HEAD(&s->list); 2699 list_add(&s->list, &resctrl_schema_all); 2700 2701 return 0; 2702 } 2703 2704 static int schemata_list_create(void) 2705 { 2706 struct rdt_resource *r; 2707 int ret = 0; 2708 2709 for_each_alloc_capable_rdt_resource(r) { 2710 if (resctrl_arch_get_cdp_enabled(r->rid)) { 2711 ret = schemata_list_add(r, CDP_CODE); 2712 if (ret) 2713 break; 2714 2715 ret = schemata_list_add(r, CDP_DATA); 2716 } else { 2717 ret = schemata_list_add(r, CDP_NONE); 2718 } 2719 2720 if (ret) 2721 break; 2722 } 2723 2724 return ret; 2725 } 2726 2727 static void schemata_list_destroy(void) 2728 { 2729 struct resctrl_schema *s, *tmp; 2730 2731 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { 2732 list_del(&s->list); 2733 kfree(s); 2734 } 2735 } 2736 2737 static int rdt_get_tree(struct fs_context *fc) 2738 { 2739 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2740 unsigned long flags = RFTYPE_CTRL_BASE; 2741 struct rdt_mon_domain *dom; 2742 struct rdt_resource *r; 2743 int ret; 2744 2745 cpus_read_lock(); 2746 mutex_lock(&rdtgroup_mutex); 2747 /* 2748 * resctrl file system can only be mounted once. 2749 */ 2750 if (resctrl_mounted) { 2751 ret = -EBUSY; 2752 goto out; 2753 } 2754 2755 ret = rdtgroup_setup_root(ctx); 2756 if (ret) 2757 goto out; 2758 2759 ret = rdt_enable_ctx(ctx); 2760 if (ret) 2761 goto out_root; 2762 2763 ret = schemata_list_create(); 2764 if (ret) { 2765 schemata_list_destroy(); 2766 goto out_ctx; 2767 } 2768 2769 closid_init(); 2770 2771 if (resctrl_arch_mon_capable()) 2772 flags |= RFTYPE_MON; 2773 2774 ret = rdtgroup_add_files(rdtgroup_default.kn, flags); 2775 if (ret) 2776 goto out_schemata_free; 2777 2778 kernfs_activate(rdtgroup_default.kn); 2779 2780 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2781 if (ret < 0) 2782 goto out_schemata_free; 2783 2784 if (resctrl_arch_mon_capable()) { 2785 ret = mongroup_create_dir(rdtgroup_default.kn, 2786 &rdtgroup_default, "mon_groups", 2787 &kn_mongrp); 2788 if (ret < 0) 2789 goto out_info; 2790 2791 ret = mkdir_mondata_all(rdtgroup_default.kn, 2792 &rdtgroup_default, &kn_mondata); 2793 if (ret < 0) 2794 goto out_mongrp; 2795 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2796 } 2797 2798 ret = rdt_pseudo_lock_init(); 2799 if (ret) 2800 goto out_mondata; 2801 2802 ret = kernfs_get_tree(fc); 2803 if (ret < 0) 2804 goto out_psl; 2805 2806 if (resctrl_arch_alloc_capable()) 2807 resctrl_arch_enable_alloc(); 2808 if (resctrl_arch_mon_capable()) 2809 resctrl_arch_enable_mon(); 2810 2811 if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) 2812 resctrl_mounted = true; 2813 2814 if (resctrl_is_mbm_enabled()) { 2815 r = resctrl_arch_get_resource(RDT_RESOURCE_L3); 2816 list_for_each_entry(dom, &r->mon_domains, hdr.list) 2817 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, 2818 RESCTRL_PICK_ANY_CPU); 2819 } 2820 2821 goto out; 2822 2823 out_psl: 2824 rdt_pseudo_lock_release(); 2825 out_mondata: 2826 if (resctrl_arch_mon_capable()) 2827 kernfs_remove(kn_mondata); 2828 out_mongrp: 2829 if (resctrl_arch_mon_capable()) 2830 kernfs_remove(kn_mongrp); 2831 out_info: 2832 kernfs_remove(kn_info); 2833 out_schemata_free: 2834 schemata_list_destroy(); 2835 out_ctx: 2836 rdt_disable_ctx(); 2837 out_root: 2838 rdtgroup_destroy_root(); 2839 out: 2840 rdt_last_cmd_clear(); 2841 mutex_unlock(&rdtgroup_mutex); 2842 cpus_read_unlock(); 2843 return ret; 2844 } 2845 2846 enum rdt_param { 2847 Opt_cdp, 2848 Opt_cdpl2, 2849 Opt_mba_mbps, 2850 Opt_debug, 2851 nr__rdt_params 2852 }; 2853 2854 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2855 fsparam_flag("cdp", Opt_cdp), 2856 fsparam_flag("cdpl2", Opt_cdpl2), 2857 fsparam_flag("mba_MBps", Opt_mba_mbps), 2858 fsparam_flag("debug", Opt_debug), 2859 {} 2860 }; 2861 2862 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2863 { 2864 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2865 struct fs_parse_result result; 2866 const char *msg; 2867 int opt; 2868 2869 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2870 if (opt < 0) 2871 return opt; 2872 2873 switch (opt) { 2874 case Opt_cdp: 2875 ctx->enable_cdpl3 = true; 2876 return 0; 2877 case Opt_cdpl2: 2878 ctx->enable_cdpl2 = true; 2879 return 0; 2880 case Opt_mba_mbps: 2881 msg = "mba_MBps requires MBM and linear scale MBA at L3 scope"; 2882 if (!supports_mba_mbps()) 2883 return invalfc(fc, msg); 2884 ctx->enable_mba_mbps = true; 2885 return 0; 2886 case Opt_debug: 2887 ctx->enable_debug = true; 2888 return 0; 2889 } 2890 2891 return -EINVAL; 2892 } 2893 2894 static void rdt_fs_context_free(struct fs_context *fc) 2895 { 2896 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2897 2898 kernfs_free_fs_context(fc); 2899 kfree(ctx); 2900 } 2901 2902 static const struct fs_context_operations rdt_fs_context_ops = { 2903 .free = rdt_fs_context_free, 2904 .parse_param = rdt_parse_param, 2905 .get_tree = rdt_get_tree, 2906 }; 2907 2908 static int rdt_init_fs_context(struct fs_context *fc) 2909 { 2910 struct rdt_fs_context *ctx; 2911 2912 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2913 if (!ctx) 2914 return -ENOMEM; 2915 2916 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2917 fc->fs_private = &ctx->kfc; 2918 fc->ops = &rdt_fs_context_ops; 2919 put_user_ns(fc->user_ns); 2920 fc->user_ns = get_user_ns(&init_user_ns); 2921 fc->global = true; 2922 return 0; 2923 } 2924 2925 void resctrl_arch_reset_all_ctrls(struct rdt_resource *r) 2926 { 2927 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 2928 struct rdt_hw_ctrl_domain *hw_dom; 2929 struct msr_param msr_param; 2930 struct rdt_ctrl_domain *d; 2931 int i; 2932 2933 /* Walking r->domains, ensure it can't race with cpuhp */ 2934 lockdep_assert_cpus_held(); 2935 2936 msr_param.res = r; 2937 msr_param.low = 0; 2938 msr_param.high = hw_res->num_closid; 2939 2940 /* 2941 * Disable resource control for this resource by setting all 2942 * CBMs in all ctrl_domains to the maximum mask value. Pick one CPU 2943 * from each domain to update the MSRs below. 2944 */ 2945 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 2946 hw_dom = resctrl_to_arch_ctrl_dom(d); 2947 2948 for (i = 0; i < hw_res->num_closid; i++) 2949 hw_dom->ctrl_val[i] = resctrl_get_default_ctrl(r); 2950 msr_param.dom = d; 2951 smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1); 2952 } 2953 2954 return; 2955 } 2956 2957 /* 2958 * Move tasks from one to the other group. If @from is NULL, then all tasks 2959 * in the systems are moved unconditionally (used for teardown). 2960 * 2961 * If @mask is not NULL the cpus on which moved tasks are running are set 2962 * in that mask so the update smp function call is restricted to affected 2963 * cpus. 2964 */ 2965 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2966 struct cpumask *mask) 2967 { 2968 struct task_struct *p, *t; 2969 2970 read_lock(&tasklist_lock); 2971 for_each_process_thread(p, t) { 2972 if (!from || is_closid_match(t, from) || 2973 is_rmid_match(t, from)) { 2974 resctrl_arch_set_closid_rmid(t, to->closid, 2975 to->mon.rmid); 2976 2977 /* 2978 * Order the closid/rmid stores above before the loads 2979 * in task_curr(). This pairs with the full barrier 2980 * between the rq->curr update and resctrl_sched_in() 2981 * during context switch. 2982 */ 2983 smp_mb(); 2984 2985 /* 2986 * If the task is on a CPU, set the CPU in the mask. 2987 * The detection is inaccurate as tasks might move or 2988 * schedule before the smp function call takes place. 2989 * In such a case the function call is pointless, but 2990 * there is no other side effect. 2991 */ 2992 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) 2993 cpumask_set_cpu(task_cpu(t), mask); 2994 } 2995 } 2996 read_unlock(&tasklist_lock); 2997 } 2998 2999 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 3000 { 3001 struct rdtgroup *sentry, *stmp; 3002 struct list_head *head; 3003 3004 head = &rdtgrp->mon.crdtgrp_list; 3005 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 3006 free_rmid(sentry->closid, sentry->mon.rmid); 3007 list_del(&sentry->mon.crdtgrp_list); 3008 3009 if (atomic_read(&sentry->waitcount) != 0) 3010 sentry->flags = RDT_DELETED; 3011 else 3012 rdtgroup_remove(sentry); 3013 } 3014 } 3015 3016 /* 3017 * Forcibly remove all of subdirectories under root. 3018 */ 3019 static void rmdir_all_sub(void) 3020 { 3021 struct rdtgroup *rdtgrp, *tmp; 3022 3023 /* Move all tasks to the default resource group */ 3024 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 3025 3026 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 3027 /* Free any child rmids */ 3028 free_all_child_rdtgrp(rdtgrp); 3029 3030 /* Remove each rdtgroup other than root */ 3031 if (rdtgrp == &rdtgroup_default) 3032 continue; 3033 3034 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3035 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 3036 rdtgroup_pseudo_lock_remove(rdtgrp); 3037 3038 /* 3039 * Give any CPUs back to the default group. We cannot copy 3040 * cpu_online_mask because a CPU might have executed the 3041 * offline callback already, but is still marked online. 3042 */ 3043 cpumask_or(&rdtgroup_default.cpu_mask, 3044 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3045 3046 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3047 3048 kernfs_remove(rdtgrp->kn); 3049 list_del(&rdtgrp->rdtgroup_list); 3050 3051 if (atomic_read(&rdtgrp->waitcount) != 0) 3052 rdtgrp->flags = RDT_DELETED; 3053 else 3054 rdtgroup_remove(rdtgrp); 3055 } 3056 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 3057 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 3058 3059 kernfs_remove(kn_info); 3060 kernfs_remove(kn_mongrp); 3061 kernfs_remove(kn_mondata); 3062 } 3063 3064 static void rdt_kill_sb(struct super_block *sb) 3065 { 3066 struct rdt_resource *r; 3067 3068 cpus_read_lock(); 3069 mutex_lock(&rdtgroup_mutex); 3070 3071 rdt_disable_ctx(); 3072 3073 /* Put everything back to default values. */ 3074 for_each_alloc_capable_rdt_resource(r) 3075 resctrl_arch_reset_all_ctrls(r); 3076 3077 rmdir_all_sub(); 3078 rdt_pseudo_lock_release(); 3079 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 3080 schemata_list_destroy(); 3081 rdtgroup_destroy_root(); 3082 if (resctrl_arch_alloc_capable()) 3083 resctrl_arch_disable_alloc(); 3084 if (resctrl_arch_mon_capable()) 3085 resctrl_arch_disable_mon(); 3086 resctrl_mounted = false; 3087 kernfs_kill_sb(sb); 3088 mutex_unlock(&rdtgroup_mutex); 3089 cpus_read_unlock(); 3090 } 3091 3092 static struct file_system_type rdt_fs_type = { 3093 .name = "resctrl", 3094 .init_fs_context = rdt_init_fs_context, 3095 .parameters = rdt_fs_parameters, 3096 .kill_sb = rdt_kill_sb, 3097 }; 3098 3099 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 3100 void *priv) 3101 { 3102 struct kernfs_node *kn; 3103 int ret = 0; 3104 3105 kn = __kernfs_create_file(parent_kn, name, 0444, 3106 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 3107 &kf_mondata_ops, priv, NULL, NULL); 3108 if (IS_ERR(kn)) 3109 return PTR_ERR(kn); 3110 3111 ret = rdtgroup_kn_set_ugid(kn); 3112 if (ret) { 3113 kernfs_remove(kn); 3114 return ret; 3115 } 3116 3117 return ret; 3118 } 3119 3120 static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname) 3121 { 3122 struct kernfs_node *kn; 3123 3124 kn = kernfs_find_and_get(pkn, name); 3125 if (!kn) 3126 return; 3127 kernfs_put(kn); 3128 3129 if (kn->dir.subdirs <= 1) 3130 kernfs_remove(kn); 3131 else 3132 kernfs_remove_by_name(kn, subname); 3133 } 3134 3135 /* 3136 * Remove all subdirectories of mon_data of ctrl_mon groups 3137 * and monitor groups for the given domain. 3138 * Remove files and directories containing "sum" of domain data 3139 * when last domain being summed is removed. 3140 */ 3141 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 3142 struct rdt_mon_domain *d) 3143 { 3144 struct rdtgroup *prgrp, *crgrp; 3145 char subname[32]; 3146 bool snc_mode; 3147 char name[32]; 3148 3149 snc_mode = r->mon_scope == RESCTRL_L3_NODE; 3150 sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id); 3151 if (snc_mode) 3152 sprintf(subname, "mon_sub_%s_%02d", r->name, d->hdr.id); 3153 3154 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 3155 mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname); 3156 3157 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 3158 mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname); 3159 } 3160 } 3161 3162 static int mon_add_all_files(struct kernfs_node *kn, struct rdt_mon_domain *d, 3163 struct rdt_resource *r, struct rdtgroup *prgrp, 3164 bool do_sum) 3165 { 3166 struct rmid_read rr = {0}; 3167 union mon_data_bits priv; 3168 struct mon_evt *mevt; 3169 int ret; 3170 3171 if (WARN_ON(list_empty(&r->evt_list))) 3172 return -EPERM; 3173 3174 priv.u.rid = r->rid; 3175 priv.u.domid = do_sum ? d->ci->id : d->hdr.id; 3176 priv.u.sum = do_sum; 3177 list_for_each_entry(mevt, &r->evt_list, list) { 3178 priv.u.evtid = mevt->evtid; 3179 ret = mon_addfile(kn, mevt->name, priv.priv); 3180 if (ret) 3181 return ret; 3182 3183 if (!do_sum && resctrl_is_mbm_event(mevt->evtid)) 3184 mon_event_read(&rr, r, d, prgrp, &d->hdr.cpu_mask, mevt->evtid, true); 3185 } 3186 3187 return 0; 3188 } 3189 3190 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 3191 struct rdt_mon_domain *d, 3192 struct rdt_resource *r, struct rdtgroup *prgrp) 3193 { 3194 struct kernfs_node *kn, *ckn; 3195 char name[32]; 3196 bool snc_mode; 3197 int ret = 0; 3198 3199 lockdep_assert_held(&rdtgroup_mutex); 3200 3201 snc_mode = r->mon_scope == RESCTRL_L3_NODE; 3202 sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci->id : d->hdr.id); 3203 kn = kernfs_find_and_get(parent_kn, name); 3204 if (kn) { 3205 /* 3206 * rdtgroup_mutex will prevent this directory from being 3207 * removed. No need to keep this hold. 3208 */ 3209 kernfs_put(kn); 3210 } else { 3211 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 3212 if (IS_ERR(kn)) 3213 return PTR_ERR(kn); 3214 3215 ret = rdtgroup_kn_set_ugid(kn); 3216 if (ret) 3217 goto out_destroy; 3218 ret = mon_add_all_files(kn, d, r, prgrp, snc_mode); 3219 if (ret) 3220 goto out_destroy; 3221 } 3222 3223 if (snc_mode) { 3224 sprintf(name, "mon_sub_%s_%02d", r->name, d->hdr.id); 3225 ckn = kernfs_create_dir(kn, name, parent_kn->mode, prgrp); 3226 if (IS_ERR(ckn)) { 3227 ret = -EINVAL; 3228 goto out_destroy; 3229 } 3230 3231 ret = rdtgroup_kn_set_ugid(ckn); 3232 if (ret) 3233 goto out_destroy; 3234 3235 ret = mon_add_all_files(ckn, d, r, prgrp, false); 3236 if (ret) 3237 goto out_destroy; 3238 } 3239 3240 kernfs_activate(kn); 3241 return 0; 3242 3243 out_destroy: 3244 kernfs_remove(kn); 3245 return ret; 3246 } 3247 3248 /* 3249 * Add all subdirectories of mon_data for "ctrl_mon" groups 3250 * and "monitor" groups with given domain id. 3251 */ 3252 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 3253 struct rdt_mon_domain *d) 3254 { 3255 struct kernfs_node *parent_kn; 3256 struct rdtgroup *prgrp, *crgrp; 3257 struct list_head *head; 3258 3259 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 3260 parent_kn = prgrp->mon.mon_data_kn; 3261 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 3262 3263 head = &prgrp->mon.crdtgrp_list; 3264 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 3265 parent_kn = crgrp->mon.mon_data_kn; 3266 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 3267 } 3268 } 3269 } 3270 3271 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 3272 struct rdt_resource *r, 3273 struct rdtgroup *prgrp) 3274 { 3275 struct rdt_mon_domain *dom; 3276 int ret; 3277 3278 /* Walking r->domains, ensure it can't race with cpuhp */ 3279 lockdep_assert_cpus_held(); 3280 3281 list_for_each_entry(dom, &r->mon_domains, hdr.list) { 3282 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 3283 if (ret) 3284 return ret; 3285 } 3286 3287 return 0; 3288 } 3289 3290 /* 3291 * This creates a directory mon_data which contains the monitored data. 3292 * 3293 * mon_data has one directory for each domain which are named 3294 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 3295 * with L3 domain looks as below: 3296 * ./mon_data: 3297 * mon_L3_00 3298 * mon_L3_01 3299 * mon_L3_02 3300 * ... 3301 * 3302 * Each domain directory has one file per event: 3303 * ./mon_L3_00/: 3304 * llc_occupancy 3305 * 3306 */ 3307 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 3308 struct rdtgroup *prgrp, 3309 struct kernfs_node **dest_kn) 3310 { 3311 struct rdt_resource *r; 3312 struct kernfs_node *kn; 3313 int ret; 3314 3315 /* 3316 * Create the mon_data directory first. 3317 */ 3318 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 3319 if (ret) 3320 return ret; 3321 3322 if (dest_kn) 3323 *dest_kn = kn; 3324 3325 /* 3326 * Create the subdirectories for each domain. Note that all events 3327 * in a domain like L3 are grouped into a resource whose domain is L3 3328 */ 3329 for_each_mon_capable_rdt_resource(r) { 3330 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 3331 if (ret) 3332 goto out_destroy; 3333 } 3334 3335 return 0; 3336 3337 out_destroy: 3338 kernfs_remove(kn); 3339 return ret; 3340 } 3341 3342 /** 3343 * cbm_ensure_valid - Enforce validity on provided CBM 3344 * @_val: Candidate CBM 3345 * @r: RDT resource to which the CBM belongs 3346 * 3347 * The provided CBM represents all cache portions available for use. This 3348 * may be represented by a bitmap that does not consist of contiguous ones 3349 * and thus be an invalid CBM. 3350 * Here the provided CBM is forced to be a valid CBM by only considering 3351 * the first set of contiguous bits as valid and clearing all bits. 3352 * The intention here is to provide a valid default CBM with which a new 3353 * resource group is initialized. The user can follow this with a 3354 * modification to the CBM if the default does not satisfy the 3355 * requirements. 3356 */ 3357 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 3358 { 3359 unsigned int cbm_len = r->cache.cbm_len; 3360 unsigned long first_bit, zero_bit; 3361 unsigned long val = _val; 3362 3363 if (!val) 3364 return 0; 3365 3366 first_bit = find_first_bit(&val, cbm_len); 3367 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 3368 3369 /* Clear any remaining bits to ensure contiguous region */ 3370 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 3371 return (u32)val; 3372 } 3373 3374 /* 3375 * Initialize cache resources per RDT domain 3376 * 3377 * Set the RDT domain up to start off with all usable allocations. That is, 3378 * all shareable and unused bits. All-zero CBM is invalid. 3379 */ 3380 static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s, 3381 u32 closid) 3382 { 3383 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 3384 enum resctrl_conf_type t = s->conf_type; 3385 struct resctrl_staged_config *cfg; 3386 struct rdt_resource *r = s->res; 3387 u32 used_b = 0, unused_b = 0; 3388 unsigned long tmp_cbm; 3389 enum rdtgrp_mode mode; 3390 u32 peer_ctl, ctrl_val; 3391 int i; 3392 3393 cfg = &d->staged_config[t]; 3394 cfg->have_new_ctrl = false; 3395 cfg->new_ctrl = r->cache.shareable_bits; 3396 used_b = r->cache.shareable_bits; 3397 for (i = 0; i < closids_supported(); i++) { 3398 if (closid_allocated(i) && i != closid) { 3399 mode = rdtgroup_mode_by_closid(i); 3400 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 3401 /* 3402 * ctrl values for locksetup aren't relevant 3403 * until the schemata is written, and the mode 3404 * becomes RDT_MODE_PSEUDO_LOCKED. 3405 */ 3406 continue; 3407 /* 3408 * If CDP is active include peer domain's 3409 * usage to ensure there is no overlap 3410 * with an exclusive group. 3411 */ 3412 if (resctrl_arch_get_cdp_enabled(r->rid)) 3413 peer_ctl = resctrl_arch_get_config(r, d, i, 3414 peer_type); 3415 else 3416 peer_ctl = 0; 3417 ctrl_val = resctrl_arch_get_config(r, d, i, 3418 s->conf_type); 3419 used_b |= ctrl_val | peer_ctl; 3420 if (mode == RDT_MODE_SHAREABLE) 3421 cfg->new_ctrl |= ctrl_val | peer_ctl; 3422 } 3423 } 3424 if (d->plr && d->plr->cbm > 0) 3425 used_b |= d->plr->cbm; 3426 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 3427 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 3428 cfg->new_ctrl |= unused_b; 3429 /* 3430 * Force the initial CBM to be valid, user can 3431 * modify the CBM based on system availability. 3432 */ 3433 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); 3434 /* 3435 * Assign the u32 CBM to an unsigned long to ensure that 3436 * bitmap_weight() does not access out-of-bound memory. 3437 */ 3438 tmp_cbm = cfg->new_ctrl; 3439 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 3440 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id); 3441 return -ENOSPC; 3442 } 3443 cfg->have_new_ctrl = true; 3444 3445 return 0; 3446 } 3447 3448 /* 3449 * Initialize cache resources with default values. 3450 * 3451 * A new RDT group is being created on an allocation capable (CAT) 3452 * supporting system. Set this group up to start off with all usable 3453 * allocations. 3454 * 3455 * If there are no more shareable bits available on any domain then 3456 * the entire allocation will fail. 3457 */ 3458 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) 3459 { 3460 struct rdt_ctrl_domain *d; 3461 int ret; 3462 3463 list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) { 3464 ret = __init_one_rdt_domain(d, s, closid); 3465 if (ret < 0) 3466 return ret; 3467 } 3468 3469 return 0; 3470 } 3471 3472 /* Initialize MBA resource with default values. */ 3473 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) 3474 { 3475 struct resctrl_staged_config *cfg; 3476 struct rdt_ctrl_domain *d; 3477 3478 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 3479 if (is_mba_sc(r)) { 3480 d->mbps_val[closid] = MBA_MAX_MBPS; 3481 continue; 3482 } 3483 3484 cfg = &d->staged_config[CDP_NONE]; 3485 cfg->new_ctrl = resctrl_get_default_ctrl(r); 3486 cfg->have_new_ctrl = true; 3487 } 3488 } 3489 3490 /* Initialize the RDT group's allocations. */ 3491 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 3492 { 3493 struct resctrl_schema *s; 3494 struct rdt_resource *r; 3495 int ret = 0; 3496 3497 rdt_staged_configs_clear(); 3498 3499 list_for_each_entry(s, &resctrl_schema_all, list) { 3500 r = s->res; 3501 if (r->rid == RDT_RESOURCE_MBA || 3502 r->rid == RDT_RESOURCE_SMBA) { 3503 rdtgroup_init_mba(r, rdtgrp->closid); 3504 if (is_mba_sc(r)) 3505 continue; 3506 } else { 3507 ret = rdtgroup_init_cat(s, rdtgrp->closid); 3508 if (ret < 0) 3509 goto out; 3510 } 3511 3512 ret = resctrl_arch_update_domains(r, rdtgrp->closid); 3513 if (ret < 0) { 3514 rdt_last_cmd_puts("Failed to initialize allocations\n"); 3515 goto out; 3516 } 3517 3518 } 3519 3520 rdtgrp->mode = RDT_MODE_SHAREABLE; 3521 3522 out: 3523 rdt_staged_configs_clear(); 3524 return ret; 3525 } 3526 3527 static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) 3528 { 3529 int ret; 3530 3531 if (!resctrl_arch_mon_capable()) 3532 return 0; 3533 3534 ret = alloc_rmid(rdtgrp->closid); 3535 if (ret < 0) { 3536 rdt_last_cmd_puts("Out of RMIDs\n"); 3537 return ret; 3538 } 3539 rdtgrp->mon.rmid = ret; 3540 3541 ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 3542 if (ret) { 3543 rdt_last_cmd_puts("kernfs subdir error\n"); 3544 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3545 return ret; 3546 } 3547 3548 return 0; 3549 } 3550 3551 static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) 3552 { 3553 if (resctrl_arch_mon_capable()) 3554 free_rmid(rgrp->closid, rgrp->mon.rmid); 3555 } 3556 3557 /* 3558 * We allow creating mon groups only with in a directory called "mon_groups" 3559 * which is present in every ctrl_mon group. Check if this is a valid 3560 * "mon_groups" directory. 3561 * 3562 * 1. The directory should be named "mon_groups". 3563 * 2. The mon group itself should "not" be named "mon_groups". 3564 * This makes sure "mon_groups" directory always has a ctrl_mon group 3565 * as parent. 3566 */ 3567 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 3568 { 3569 return (!strcmp(rdt_kn_name(kn), "mon_groups") && 3570 strcmp(name, "mon_groups")); 3571 } 3572 3573 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 3574 const char *name, umode_t mode, 3575 enum rdt_group_type rtype, struct rdtgroup **r) 3576 { 3577 struct rdtgroup *prdtgrp, *rdtgrp; 3578 unsigned long files = 0; 3579 struct kernfs_node *kn; 3580 int ret; 3581 3582 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 3583 if (!prdtgrp) { 3584 ret = -ENODEV; 3585 goto out_unlock; 3586 } 3587 3588 /* 3589 * Check that the parent directory for a monitor group is a "mon_groups" 3590 * directory. 3591 */ 3592 if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) { 3593 ret = -EPERM; 3594 goto out_unlock; 3595 } 3596 3597 if (rtype == RDTMON_GROUP && 3598 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3599 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 3600 ret = -EINVAL; 3601 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 3602 goto out_unlock; 3603 } 3604 3605 /* allocate the rdtgroup. */ 3606 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 3607 if (!rdtgrp) { 3608 ret = -ENOSPC; 3609 rdt_last_cmd_puts("Kernel out of memory\n"); 3610 goto out_unlock; 3611 } 3612 *r = rdtgrp; 3613 rdtgrp->mon.parent = prdtgrp; 3614 rdtgrp->type = rtype; 3615 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 3616 3617 /* kernfs creates the directory for rdtgrp */ 3618 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 3619 if (IS_ERR(kn)) { 3620 ret = PTR_ERR(kn); 3621 rdt_last_cmd_puts("kernfs create error\n"); 3622 goto out_free_rgrp; 3623 } 3624 rdtgrp->kn = kn; 3625 3626 /* 3627 * kernfs_remove() will drop the reference count on "kn" which 3628 * will free it. But we still need it to stick around for the 3629 * rdtgroup_kn_unlock(kn) call. Take one extra reference here, 3630 * which will be dropped by kernfs_put() in rdtgroup_remove(). 3631 */ 3632 kernfs_get(kn); 3633 3634 ret = rdtgroup_kn_set_ugid(kn); 3635 if (ret) { 3636 rdt_last_cmd_puts("kernfs perm error\n"); 3637 goto out_destroy; 3638 } 3639 3640 if (rtype == RDTCTRL_GROUP) { 3641 files = RFTYPE_BASE | RFTYPE_CTRL; 3642 if (resctrl_arch_mon_capable()) 3643 files |= RFTYPE_MON; 3644 } else { 3645 files = RFTYPE_BASE | RFTYPE_MON; 3646 } 3647 3648 ret = rdtgroup_add_files(kn, files); 3649 if (ret) { 3650 rdt_last_cmd_puts("kernfs fill error\n"); 3651 goto out_destroy; 3652 } 3653 3654 /* 3655 * The caller unlocks the parent_kn upon success. 3656 */ 3657 return 0; 3658 3659 out_destroy: 3660 kernfs_put(rdtgrp->kn); 3661 kernfs_remove(rdtgrp->kn); 3662 out_free_rgrp: 3663 kfree(rdtgrp); 3664 out_unlock: 3665 rdtgroup_kn_unlock(parent_kn); 3666 return ret; 3667 } 3668 3669 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 3670 { 3671 kernfs_remove(rgrp->kn); 3672 rdtgroup_remove(rgrp); 3673 } 3674 3675 /* 3676 * Create a monitor group under "mon_groups" directory of a control 3677 * and monitor group(ctrl_mon). This is a resource group 3678 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 3679 */ 3680 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 3681 const char *name, umode_t mode) 3682 { 3683 struct rdtgroup *rdtgrp, *prgrp; 3684 int ret; 3685 3686 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 3687 if (ret) 3688 return ret; 3689 3690 prgrp = rdtgrp->mon.parent; 3691 rdtgrp->closid = prgrp->closid; 3692 3693 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); 3694 if (ret) { 3695 mkdir_rdt_prepare_clean(rdtgrp); 3696 goto out_unlock; 3697 } 3698 3699 kernfs_activate(rdtgrp->kn); 3700 3701 /* 3702 * Add the rdtgrp to the list of rdtgrps the parent 3703 * ctrl_mon group has to track. 3704 */ 3705 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 3706 3707 out_unlock: 3708 rdtgroup_kn_unlock(parent_kn); 3709 return ret; 3710 } 3711 3712 /* 3713 * These are rdtgroups created under the root directory. Can be used 3714 * to allocate and monitor resources. 3715 */ 3716 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 3717 const char *name, umode_t mode) 3718 { 3719 struct rdtgroup *rdtgrp; 3720 struct kernfs_node *kn; 3721 u32 closid; 3722 int ret; 3723 3724 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 3725 if (ret) 3726 return ret; 3727 3728 kn = rdtgrp->kn; 3729 ret = closid_alloc(); 3730 if (ret < 0) { 3731 rdt_last_cmd_puts("Out of CLOSIDs\n"); 3732 goto out_common_fail; 3733 } 3734 closid = ret; 3735 ret = 0; 3736 3737 rdtgrp->closid = closid; 3738 3739 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); 3740 if (ret) 3741 goto out_closid_free; 3742 3743 kernfs_activate(rdtgrp->kn); 3744 3745 ret = rdtgroup_init_alloc(rdtgrp); 3746 if (ret < 0) 3747 goto out_rmid_free; 3748 3749 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 3750 3751 if (resctrl_arch_mon_capable()) { 3752 /* 3753 * Create an empty mon_groups directory to hold the subset 3754 * of tasks and cpus to monitor. 3755 */ 3756 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 3757 if (ret) { 3758 rdt_last_cmd_puts("kernfs subdir error\n"); 3759 goto out_del_list; 3760 } 3761 if (is_mba_sc(NULL)) 3762 rdtgrp->mba_mbps_event = mba_mbps_default_event; 3763 } 3764 3765 goto out_unlock; 3766 3767 out_del_list: 3768 list_del(&rdtgrp->rdtgroup_list); 3769 out_rmid_free: 3770 mkdir_rdt_prepare_rmid_free(rdtgrp); 3771 out_closid_free: 3772 closid_free(closid); 3773 out_common_fail: 3774 mkdir_rdt_prepare_clean(rdtgrp); 3775 out_unlock: 3776 rdtgroup_kn_unlock(parent_kn); 3777 return ret; 3778 } 3779 3780 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 3781 umode_t mode) 3782 { 3783 /* Do not accept '\n' to avoid unparsable situation. */ 3784 if (strchr(name, '\n')) 3785 return -EINVAL; 3786 3787 /* 3788 * If the parent directory is the root directory and RDT 3789 * allocation is supported, add a control and monitoring 3790 * subdirectory 3791 */ 3792 if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) 3793 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 3794 3795 /* Else, attempt to add a monitoring subdirectory. */ 3796 if (resctrl_arch_mon_capable()) 3797 return rdtgroup_mkdir_mon(parent_kn, name, mode); 3798 3799 return -EPERM; 3800 } 3801 3802 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3803 { 3804 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3805 u32 closid, rmid; 3806 int cpu; 3807 3808 /* Give any tasks back to the parent group */ 3809 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 3810 3811 /* 3812 * Update per cpu closid/rmid of the moved CPUs first. 3813 * Note: the closid will not change, but the arch code still needs it. 3814 */ 3815 closid = prdtgrp->closid; 3816 rmid = prdtgrp->mon.rmid; 3817 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3818 resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); 3819 3820 /* 3821 * Update the MSR on moved CPUs and CPUs which have moved 3822 * task running on them. 3823 */ 3824 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3825 update_closid_rmid(tmpmask, NULL); 3826 3827 rdtgrp->flags = RDT_DELETED; 3828 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3829 3830 /* 3831 * Remove the rdtgrp from the parent ctrl_mon group's list 3832 */ 3833 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3834 list_del(&rdtgrp->mon.crdtgrp_list); 3835 3836 kernfs_remove(rdtgrp->kn); 3837 3838 return 0; 3839 } 3840 3841 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) 3842 { 3843 rdtgrp->flags = RDT_DELETED; 3844 list_del(&rdtgrp->rdtgroup_list); 3845 3846 kernfs_remove(rdtgrp->kn); 3847 return 0; 3848 } 3849 3850 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3851 { 3852 u32 closid, rmid; 3853 int cpu; 3854 3855 /* Give any tasks back to the default group */ 3856 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3857 3858 /* Give any CPUs back to the default group */ 3859 cpumask_or(&rdtgroup_default.cpu_mask, 3860 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3861 3862 /* Update per cpu closid and rmid of the moved CPUs first */ 3863 closid = rdtgroup_default.closid; 3864 rmid = rdtgroup_default.mon.rmid; 3865 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3866 resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); 3867 3868 /* 3869 * Update the MSR on moved CPUs and CPUs which have moved 3870 * task running on them. 3871 */ 3872 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3873 update_closid_rmid(tmpmask, NULL); 3874 3875 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3876 closid_free(rdtgrp->closid); 3877 3878 rdtgroup_ctrl_remove(rdtgrp); 3879 3880 /* 3881 * Free all the child monitor group rmids. 3882 */ 3883 free_all_child_rdtgrp(rdtgrp); 3884 3885 return 0; 3886 } 3887 3888 static struct kernfs_node *rdt_kn_parent(struct kernfs_node *kn) 3889 { 3890 /* 3891 * Valid within the RCU section it was obtained or while rdtgroup_mutex 3892 * is held. 3893 */ 3894 return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex)); 3895 } 3896 3897 static int rdtgroup_rmdir(struct kernfs_node *kn) 3898 { 3899 struct kernfs_node *parent_kn; 3900 struct rdtgroup *rdtgrp; 3901 cpumask_var_t tmpmask; 3902 int ret = 0; 3903 3904 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 3905 return -ENOMEM; 3906 3907 rdtgrp = rdtgroup_kn_lock_live(kn); 3908 if (!rdtgrp) { 3909 ret = -EPERM; 3910 goto out; 3911 } 3912 parent_kn = rdt_kn_parent(kn); 3913 3914 /* 3915 * If the rdtgroup is a ctrl_mon group and parent directory 3916 * is the root directory, remove the ctrl_mon group. 3917 * 3918 * If the rdtgroup is a mon group and parent directory 3919 * is a valid "mon_groups" directory, remove the mon group. 3920 */ 3921 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3922 rdtgrp != &rdtgroup_default) { 3923 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3924 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3925 ret = rdtgroup_ctrl_remove(rdtgrp); 3926 } else { 3927 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); 3928 } 3929 } else if (rdtgrp->type == RDTMON_GROUP && 3930 is_mon_groups(parent_kn, rdt_kn_name(kn))) { 3931 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); 3932 } else { 3933 ret = -EPERM; 3934 } 3935 3936 out: 3937 rdtgroup_kn_unlock(kn); 3938 free_cpumask_var(tmpmask); 3939 return ret; 3940 } 3941 3942 /** 3943 * mongrp_reparent() - replace parent CTRL_MON group of a MON group 3944 * @rdtgrp: the MON group whose parent should be replaced 3945 * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp 3946 * @cpus: cpumask provided by the caller for use during this call 3947 * 3948 * Replaces the parent CTRL_MON group for a MON group, resulting in all member 3949 * tasks' CLOSID immediately changing to that of the new parent group. 3950 * Monitoring data for the group is unaffected by this operation. 3951 */ 3952 static void mongrp_reparent(struct rdtgroup *rdtgrp, 3953 struct rdtgroup *new_prdtgrp, 3954 cpumask_var_t cpus) 3955 { 3956 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3957 3958 WARN_ON(rdtgrp->type != RDTMON_GROUP); 3959 WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); 3960 3961 /* Nothing to do when simply renaming a MON group. */ 3962 if (prdtgrp == new_prdtgrp) 3963 return; 3964 3965 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3966 list_move_tail(&rdtgrp->mon.crdtgrp_list, 3967 &new_prdtgrp->mon.crdtgrp_list); 3968 3969 rdtgrp->mon.parent = new_prdtgrp; 3970 rdtgrp->closid = new_prdtgrp->closid; 3971 3972 /* Propagate updated closid to all tasks in this group. */ 3973 rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); 3974 3975 update_closid_rmid(cpus, NULL); 3976 } 3977 3978 static int rdtgroup_rename(struct kernfs_node *kn, 3979 struct kernfs_node *new_parent, const char *new_name) 3980 { 3981 struct kernfs_node *kn_parent; 3982 struct rdtgroup *new_prdtgrp; 3983 struct rdtgroup *rdtgrp; 3984 cpumask_var_t tmpmask; 3985 int ret; 3986 3987 rdtgrp = kernfs_to_rdtgroup(kn); 3988 new_prdtgrp = kernfs_to_rdtgroup(new_parent); 3989 if (!rdtgrp || !new_prdtgrp) 3990 return -ENOENT; 3991 3992 /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ 3993 rdtgroup_kn_get(rdtgrp, kn); 3994 rdtgroup_kn_get(new_prdtgrp, new_parent); 3995 3996 mutex_lock(&rdtgroup_mutex); 3997 3998 rdt_last_cmd_clear(); 3999 4000 /* 4001 * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if 4002 * either kernfs_node is a file. 4003 */ 4004 if (kernfs_type(kn) != KERNFS_DIR || 4005 kernfs_type(new_parent) != KERNFS_DIR) { 4006 rdt_last_cmd_puts("Source and destination must be directories"); 4007 ret = -EPERM; 4008 goto out; 4009 } 4010 4011 if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { 4012 ret = -ENOENT; 4013 goto out; 4014 } 4015 4016 kn_parent = rdt_kn_parent(kn); 4017 if (rdtgrp->type != RDTMON_GROUP || !kn_parent || 4018 !is_mon_groups(kn_parent, rdt_kn_name(kn))) { 4019 rdt_last_cmd_puts("Source must be a MON group\n"); 4020 ret = -EPERM; 4021 goto out; 4022 } 4023 4024 if (!is_mon_groups(new_parent, new_name)) { 4025 rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); 4026 ret = -EPERM; 4027 goto out; 4028 } 4029 4030 /* 4031 * If the MON group is monitoring CPUs, the CPUs must be assigned to the 4032 * current parent CTRL_MON group and therefore cannot be assigned to 4033 * the new parent, making the move illegal. 4034 */ 4035 if (!cpumask_empty(&rdtgrp->cpu_mask) && 4036 rdtgrp->mon.parent != new_prdtgrp) { 4037 rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); 4038 ret = -EPERM; 4039 goto out; 4040 } 4041 4042 /* 4043 * Allocate the cpumask for use in mongrp_reparent() to avoid the 4044 * possibility of failing to allocate it after kernfs_rename() has 4045 * succeeded. 4046 */ 4047 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { 4048 ret = -ENOMEM; 4049 goto out; 4050 } 4051 4052 /* 4053 * Perform all input validation and allocations needed to ensure 4054 * mongrp_reparent() will succeed before calling kernfs_rename(), 4055 * otherwise it would be necessary to revert this call if 4056 * mongrp_reparent() failed. 4057 */ 4058 ret = kernfs_rename(kn, new_parent, new_name); 4059 if (!ret) 4060 mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); 4061 4062 free_cpumask_var(tmpmask); 4063 4064 out: 4065 mutex_unlock(&rdtgroup_mutex); 4066 rdtgroup_kn_put(rdtgrp, kn); 4067 rdtgroup_kn_put(new_prdtgrp, new_parent); 4068 return ret; 4069 } 4070 4071 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 4072 { 4073 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) 4074 seq_puts(seq, ",cdp"); 4075 4076 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) 4077 seq_puts(seq, ",cdpl2"); 4078 4079 if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) 4080 seq_puts(seq, ",mba_MBps"); 4081 4082 if (resctrl_debug) 4083 seq_puts(seq, ",debug"); 4084 4085 return 0; 4086 } 4087 4088 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 4089 .mkdir = rdtgroup_mkdir, 4090 .rmdir = rdtgroup_rmdir, 4091 .rename = rdtgroup_rename, 4092 .show_options = rdtgroup_show_options, 4093 }; 4094 4095 static int rdtgroup_setup_root(struct rdt_fs_context *ctx) 4096 { 4097 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 4098 KERNFS_ROOT_CREATE_DEACTIVATED | 4099 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 4100 &rdtgroup_default); 4101 if (IS_ERR(rdt_root)) 4102 return PTR_ERR(rdt_root); 4103 4104 ctx->kfc.root = rdt_root; 4105 rdtgroup_default.kn = kernfs_root_to_node(rdt_root); 4106 4107 return 0; 4108 } 4109 4110 static void rdtgroup_destroy_root(void) 4111 { 4112 kernfs_destroy_root(rdt_root); 4113 rdtgroup_default.kn = NULL; 4114 } 4115 4116 static void __init rdtgroup_setup_default(void) 4117 { 4118 mutex_lock(&rdtgroup_mutex); 4119 4120 rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; 4121 rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; 4122 rdtgroup_default.type = RDTCTRL_GROUP; 4123 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 4124 4125 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 4126 4127 mutex_unlock(&rdtgroup_mutex); 4128 } 4129 4130 static void domain_destroy_mon_state(struct rdt_mon_domain *d) 4131 { 4132 bitmap_free(d->rmid_busy_llc); 4133 kfree(d->mbm_total); 4134 kfree(d->mbm_local); 4135 } 4136 4137 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d) 4138 { 4139 mutex_lock(&rdtgroup_mutex); 4140 4141 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) 4142 mba_sc_domain_destroy(r, d); 4143 4144 mutex_unlock(&rdtgroup_mutex); 4145 } 4146 4147 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d) 4148 { 4149 mutex_lock(&rdtgroup_mutex); 4150 4151 /* 4152 * If resctrl is mounted, remove all the 4153 * per domain monitor data directories. 4154 */ 4155 if (resctrl_mounted && resctrl_arch_mon_capable()) 4156 rmdir_mondata_subdir_allrdtgrp(r, d); 4157 4158 if (resctrl_is_mbm_enabled()) 4159 cancel_delayed_work(&d->mbm_over); 4160 if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { 4161 /* 4162 * When a package is going down, forcefully 4163 * decrement rmid->ebusy. There is no way to know 4164 * that the L3 was flushed and hence may lead to 4165 * incorrect counts in rare scenarios, but leaving 4166 * the RMID as busy creates RMID leaks if the 4167 * package never comes back. 4168 */ 4169 __check_limbo(d, true); 4170 cancel_delayed_work(&d->cqm_limbo); 4171 } 4172 4173 domain_destroy_mon_state(d); 4174 4175 mutex_unlock(&rdtgroup_mutex); 4176 } 4177 4178 /** 4179 * domain_setup_mon_state() - Initialise domain monitoring structures. 4180 * @r: The resource for the newly online domain. 4181 * @d: The newly online domain. 4182 * 4183 * Allocate monitor resources that belong to this domain. 4184 * Called when the first CPU of a domain comes online, regardless of whether 4185 * the filesystem is mounted. 4186 * During boot this may be called before global allocations have been made by 4187 * resctrl_mon_resource_init(). 4188 * 4189 * Returns 0 for success, or -ENOMEM. 4190 */ 4191 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain *d) 4192 { 4193 u32 idx_limit = resctrl_arch_system_num_rmid_idx(); 4194 size_t tsize; 4195 4196 if (resctrl_arch_is_llc_occupancy_enabled()) { 4197 d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); 4198 if (!d->rmid_busy_llc) 4199 return -ENOMEM; 4200 } 4201 if (resctrl_arch_is_mbm_total_enabled()) { 4202 tsize = sizeof(*d->mbm_total); 4203 d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); 4204 if (!d->mbm_total) { 4205 bitmap_free(d->rmid_busy_llc); 4206 return -ENOMEM; 4207 } 4208 } 4209 if (resctrl_arch_is_mbm_local_enabled()) { 4210 tsize = sizeof(*d->mbm_local); 4211 d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); 4212 if (!d->mbm_local) { 4213 bitmap_free(d->rmid_busy_llc); 4214 kfree(d->mbm_total); 4215 return -ENOMEM; 4216 } 4217 } 4218 4219 return 0; 4220 } 4221 4222 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d) 4223 { 4224 int err = 0; 4225 4226 mutex_lock(&rdtgroup_mutex); 4227 4228 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { 4229 /* RDT_RESOURCE_MBA is never mon_capable */ 4230 err = mba_sc_domain_allocate(r, d); 4231 } 4232 4233 mutex_unlock(&rdtgroup_mutex); 4234 4235 return err; 4236 } 4237 4238 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d) 4239 { 4240 int err; 4241 4242 mutex_lock(&rdtgroup_mutex); 4243 4244 err = domain_setup_mon_state(r, d); 4245 if (err) 4246 goto out_unlock; 4247 4248 if (resctrl_is_mbm_enabled()) { 4249 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); 4250 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, 4251 RESCTRL_PICK_ANY_CPU); 4252 } 4253 4254 if (resctrl_arch_is_llc_occupancy_enabled()) 4255 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); 4256 4257 /* 4258 * If the filesystem is not mounted then only the default resource group 4259 * exists. Creation of its directories is deferred until mount time 4260 * by rdt_get_tree() calling mkdir_mondata_all(). 4261 * If resctrl is mounted, add per domain monitor data directories. 4262 */ 4263 if (resctrl_mounted && resctrl_arch_mon_capable()) 4264 mkdir_mondata_subdir_allrdtgrp(r, d); 4265 4266 out_unlock: 4267 mutex_unlock(&rdtgroup_mutex); 4268 4269 return err; 4270 } 4271 4272 void resctrl_online_cpu(unsigned int cpu) 4273 { 4274 mutex_lock(&rdtgroup_mutex); 4275 /* The CPU is set in default rdtgroup after online. */ 4276 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); 4277 mutex_unlock(&rdtgroup_mutex); 4278 } 4279 4280 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) 4281 { 4282 struct rdtgroup *cr; 4283 4284 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { 4285 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) 4286 break; 4287 } 4288 } 4289 4290 static struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu, 4291 struct rdt_resource *r) 4292 { 4293 struct rdt_mon_domain *d; 4294 4295 lockdep_assert_cpus_held(); 4296 4297 list_for_each_entry(d, &r->mon_domains, hdr.list) { 4298 /* Find the domain that contains this CPU */ 4299 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask)) 4300 return d; 4301 } 4302 4303 return NULL; 4304 } 4305 4306 void resctrl_offline_cpu(unsigned int cpu) 4307 { 4308 struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); 4309 struct rdt_mon_domain *d; 4310 struct rdtgroup *rdtgrp; 4311 4312 mutex_lock(&rdtgroup_mutex); 4313 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 4314 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { 4315 clear_childcpus(rdtgrp, cpu); 4316 break; 4317 } 4318 } 4319 4320 if (!l3->mon_capable) 4321 goto out_unlock; 4322 4323 d = get_mon_domain_from_cpu(cpu, l3); 4324 if (d) { 4325 if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { 4326 cancel_delayed_work(&d->mbm_over); 4327 mbm_setup_overflow_handler(d, 0, cpu); 4328 } 4329 if (resctrl_arch_is_llc_occupancy_enabled() && 4330 cpu == d->cqm_work_cpu && has_busy_rmid(d)) { 4331 cancel_delayed_work(&d->cqm_limbo); 4332 cqm_setup_limbo_handler(d, 0, cpu); 4333 } 4334 } 4335 4336 out_unlock: 4337 mutex_unlock(&rdtgroup_mutex); 4338 } 4339 4340 /* 4341 * resctrl_init - resctrl filesystem initialization 4342 * 4343 * Setup resctrl file system including set up root, create mount point, 4344 * register resctrl filesystem, and initialize files under root directory. 4345 * 4346 * Return: 0 on success or -errno 4347 */ 4348 int __init resctrl_init(void) 4349 { 4350 int ret = 0; 4351 4352 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 4353 sizeof(last_cmd_status_buf)); 4354 4355 rdtgroup_setup_default(); 4356 4357 thread_throttle_mode_init(); 4358 4359 ret = resctrl_mon_resource_init(); 4360 if (ret) 4361 return ret; 4362 4363 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 4364 if (ret) { 4365 resctrl_mon_resource_exit(); 4366 return ret; 4367 } 4368 4369 ret = register_filesystem(&rdt_fs_type); 4370 if (ret) 4371 goto cleanup_mountpoint; 4372 4373 /* 4374 * Adding the resctrl debugfs directory here may not be ideal since 4375 * it would let the resctrl debugfs directory appear on the debugfs 4376 * filesystem before the resctrl filesystem is mounted. 4377 * It may also be ok since that would enable debugging of RDT before 4378 * resctrl is mounted. 4379 * The reason why the debugfs directory is created here and not in 4380 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and 4381 * during the debugfs directory creation also &sb->s_type->i_mutex_key 4382 * (the lockdep class of inode->i_rwsem). Other filesystem 4383 * interactions (eg. SyS_getdents) have the lock ordering: 4384 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 4385 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 4386 * is taken, thus creating dependency: 4387 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 4388 * issues considering the other two lock dependencies. 4389 * By creating the debugfs directory here we avoid a dependency 4390 * that may cause deadlock (even though file operations cannot 4391 * occur until the filesystem is mounted, but I do not know how to 4392 * tell lockdep that). 4393 */ 4394 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 4395 4396 return 0; 4397 4398 cleanup_mountpoint: 4399 sysfs_remove_mount_point(fs_kobj, "resctrl"); 4400 resctrl_mon_resource_exit(); 4401 4402 return ret; 4403 } 4404 4405 void __exit resctrl_exit(void) 4406 { 4407 debugfs_remove_recursive(debugfs_resctrl); 4408 unregister_filesystem(&rdt_fs_type); 4409 sysfs_remove_mount_point(fs_kobj, "resctrl"); 4410 4411 resctrl_mon_resource_exit(); 4412 } 4413