1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Allocation in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cpu.h> 16 #include <linux/debugfs.h> 17 #include <linux/fs.h> 18 #include <linux/fs_parser.h> 19 #include <linux/sysfs.h> 20 #include <linux/kernfs.h> 21 #include <linux/once.h> 22 #include <linux/resctrl.h> 23 #include <linux/seq_buf.h> 24 #include <linux/seq_file.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/user_namespace.h> 28 29 #include <uapi/linux/magic.h> 30 31 #include "internal.h" 32 33 /* Mutex to protect rdtgroup access. */ 34 DEFINE_MUTEX(rdtgroup_mutex); 35 36 static struct kernfs_root *rdt_root; 37 38 struct rdtgroup rdtgroup_default; 39 40 LIST_HEAD(rdt_all_groups); 41 42 /* list of entries for the schemata file */ 43 LIST_HEAD(resctrl_schema_all); 44 45 /* 46 * List of struct mon_data containing private data of event files for use by 47 * rdtgroup_mondata_show(). Protected by rdtgroup_mutex. 48 */ 49 static LIST_HEAD(mon_data_kn_priv_list); 50 51 /* The filesystem can only be mounted once. */ 52 bool resctrl_mounted; 53 54 /* Kernel fs node for "info" directory under root */ 55 static struct kernfs_node *kn_info; 56 57 /* Kernel fs node for "mon_groups" directory under root */ 58 static struct kernfs_node *kn_mongrp; 59 60 /* Kernel fs node for "mon_data" directory under root */ 61 static struct kernfs_node *kn_mondata; 62 63 /* 64 * Used to store the max resource name width to display the schemata names in 65 * a tabular format. 66 */ 67 int max_name_width; 68 69 static struct seq_buf last_cmd_status; 70 71 static char last_cmd_status_buf[512]; 72 73 static int rdtgroup_setup_root(struct rdt_fs_context *ctx); 74 75 static void rdtgroup_destroy_root(void); 76 77 struct dentry *debugfs_resctrl; 78 79 /* 80 * Memory bandwidth monitoring event to use for the default CTRL_MON group 81 * and each new CTRL_MON group created by the user. Only relevant when 82 * the filesystem is mounted with the "mba_MBps" option so it does not 83 * matter that it remains uninitialized on systems that do not support 84 * the "mba_MBps" option. 85 */ 86 enum resctrl_event_id mba_mbps_default_event; 87 88 static bool resctrl_debug; 89 90 void rdt_last_cmd_clear(void) 91 { 92 lockdep_assert_held(&rdtgroup_mutex); 93 seq_buf_clear(&last_cmd_status); 94 } 95 96 void rdt_last_cmd_puts(const char *s) 97 { 98 lockdep_assert_held(&rdtgroup_mutex); 99 seq_buf_puts(&last_cmd_status, s); 100 } 101 102 void rdt_last_cmd_printf(const char *fmt, ...) 103 { 104 va_list ap; 105 106 va_start(ap, fmt); 107 lockdep_assert_held(&rdtgroup_mutex); 108 seq_buf_vprintf(&last_cmd_status, fmt, ap); 109 va_end(ap); 110 } 111 112 void rdt_staged_configs_clear(void) 113 { 114 struct rdt_ctrl_domain *dom; 115 struct rdt_resource *r; 116 117 lockdep_assert_held(&rdtgroup_mutex); 118 119 for_each_alloc_capable_rdt_resource(r) { 120 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) 121 memset(dom->staged_config, 0, sizeof(dom->staged_config)); 122 } 123 } 124 125 static bool resctrl_is_mbm_enabled(void) 126 { 127 return (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID) || 128 resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)); 129 } 130 131 /* 132 * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap 133 * of free CLOSIDs. 134 * 135 * Using a global CLOSID across all resources has some advantages and 136 * some drawbacks: 137 * + We can simply set current's closid to assign a task to a resource 138 * group. 139 * + Context switch code can avoid extra memory references deciding which 140 * CLOSID to load into the PQR_ASSOC MSR 141 * - We give up some options in configuring resource groups across multi-socket 142 * systems. 143 * - Our choices on how to configure each resource become progressively more 144 * limited as the number of resources grows. 145 */ 146 static unsigned long *closid_free_map; 147 148 static int closid_free_map_len; 149 150 int closids_supported(void) 151 { 152 return closid_free_map_len; 153 } 154 155 static int closid_init(void) 156 { 157 struct resctrl_schema *s; 158 u32 rdt_min_closid = ~0; 159 160 /* Monitor only platforms still call closid_init() */ 161 if (list_empty(&resctrl_schema_all)) 162 return 0; 163 164 /* Compute rdt_min_closid across all resources */ 165 list_for_each_entry(s, &resctrl_schema_all, list) 166 rdt_min_closid = min(rdt_min_closid, s->num_closid); 167 168 closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL); 169 if (!closid_free_map) 170 return -ENOMEM; 171 bitmap_fill(closid_free_map, rdt_min_closid); 172 173 /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ 174 __clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map); 175 closid_free_map_len = rdt_min_closid; 176 177 return 0; 178 } 179 180 static void closid_exit(void) 181 { 182 bitmap_free(closid_free_map); 183 closid_free_map = NULL; 184 } 185 186 static int closid_alloc(void) 187 { 188 int cleanest_closid; 189 u32 closid; 190 191 lockdep_assert_held(&rdtgroup_mutex); 192 193 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && 194 resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) { 195 cleanest_closid = resctrl_find_cleanest_closid(); 196 if (cleanest_closid < 0) 197 return cleanest_closid; 198 closid = cleanest_closid; 199 } else { 200 closid = find_first_bit(closid_free_map, closid_free_map_len); 201 if (closid == closid_free_map_len) 202 return -ENOSPC; 203 } 204 __clear_bit(closid, closid_free_map); 205 206 return closid; 207 } 208 209 void closid_free(int closid) 210 { 211 lockdep_assert_held(&rdtgroup_mutex); 212 213 __set_bit(closid, closid_free_map); 214 } 215 216 /** 217 * closid_allocated - test if provided closid is in use 218 * @closid: closid to be tested 219 * 220 * Return: true if @closid is currently associated with a resource group, 221 * false if @closid is free 222 */ 223 bool closid_allocated(unsigned int closid) 224 { 225 lockdep_assert_held(&rdtgroup_mutex); 226 227 return !test_bit(closid, closid_free_map); 228 } 229 230 bool closid_alloc_fixed(u32 closid) 231 { 232 return __test_and_clear_bit(closid, closid_free_map); 233 } 234 235 /** 236 * rdtgroup_mode_by_closid - Return mode of resource group with closid 237 * @closid: closid if the resource group 238 * 239 * Each resource group is associated with a @closid. Here the mode 240 * of a resource group can be queried by searching for it using its closid. 241 * 242 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 243 */ 244 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 245 { 246 struct rdtgroup *rdtgrp; 247 248 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 249 if (rdtgrp->closid == closid) 250 return rdtgrp->mode; 251 } 252 253 return RDT_NUM_MODES; 254 } 255 256 static const char * const rdt_mode_str[] = { 257 [RDT_MODE_SHAREABLE] = "shareable", 258 [RDT_MODE_EXCLUSIVE] = "exclusive", 259 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 260 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 261 }; 262 263 /** 264 * rdtgroup_mode_str - Return the string representation of mode 265 * @mode: the resource group mode as &enum rdtgroup_mode 266 * 267 * Return: string representation of valid mode, "unknown" otherwise 268 */ 269 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 270 { 271 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 272 return "unknown"; 273 274 return rdt_mode_str[mode]; 275 } 276 277 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 278 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 279 { 280 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 281 .ia_uid = current_fsuid(), 282 .ia_gid = current_fsgid(), }; 283 284 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 285 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 286 return 0; 287 288 return kernfs_setattr(kn, &iattr); 289 } 290 291 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 292 { 293 struct kernfs_node *kn; 294 int ret; 295 296 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 297 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 298 0, rft->kf_ops, rft, NULL, NULL); 299 if (IS_ERR(kn)) 300 return PTR_ERR(kn); 301 302 ret = rdtgroup_kn_set_ugid(kn); 303 if (ret) { 304 kernfs_remove(kn); 305 return ret; 306 } 307 308 return 0; 309 } 310 311 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 312 { 313 struct kernfs_open_file *of = m->private; 314 struct rftype *rft = of->kn->priv; 315 316 if (rft->seq_show) 317 return rft->seq_show(of, m, arg); 318 return 0; 319 } 320 321 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 322 size_t nbytes, loff_t off) 323 { 324 struct rftype *rft = of->kn->priv; 325 326 if (rft->write) 327 return rft->write(of, buf, nbytes, off); 328 329 return -EINVAL; 330 } 331 332 static const struct kernfs_ops rdtgroup_kf_single_ops = { 333 .atomic_write_len = PAGE_SIZE, 334 .write = rdtgroup_file_write, 335 .seq_show = rdtgroup_seqfile_show, 336 }; 337 338 static const struct kernfs_ops kf_mondata_ops = { 339 .atomic_write_len = PAGE_SIZE, 340 .seq_show = rdtgroup_mondata_show, 341 }; 342 343 static bool is_cpu_list(struct kernfs_open_file *of) 344 { 345 struct rftype *rft = of->kn->priv; 346 347 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 348 } 349 350 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 351 struct seq_file *s, void *v) 352 { 353 struct rdtgroup *rdtgrp; 354 struct cpumask *mask; 355 int ret = 0; 356 357 rdtgrp = rdtgroup_kn_lock_live(of->kn); 358 359 if (rdtgrp) { 360 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 361 if (!rdtgrp->plr->d) { 362 rdt_last_cmd_clear(); 363 rdt_last_cmd_puts("Cache domain offline\n"); 364 ret = -ENODEV; 365 } else { 366 mask = &rdtgrp->plr->d->hdr.cpu_mask; 367 seq_printf(s, is_cpu_list(of) ? 368 "%*pbl\n" : "%*pb\n", 369 cpumask_pr_args(mask)); 370 } 371 } else { 372 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 373 cpumask_pr_args(&rdtgrp->cpu_mask)); 374 } 375 } else { 376 ret = -ENOENT; 377 } 378 rdtgroup_kn_unlock(of->kn); 379 380 return ret; 381 } 382 383 /* 384 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 385 * 386 * Per task closids/rmids must have been set up before calling this function. 387 * @r may be NULL. 388 */ 389 static void 390 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 391 { 392 struct resctrl_cpu_defaults defaults, *p = NULL; 393 394 if (r) { 395 defaults.closid = r->closid; 396 defaults.rmid = r->mon.rmid; 397 p = &defaults; 398 } 399 400 on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_closid_rmid, p, 1); 401 } 402 403 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 404 cpumask_var_t tmpmask) 405 { 406 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 407 struct list_head *head; 408 409 /* Check whether cpus belong to parent ctrl group */ 410 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 411 if (!cpumask_empty(tmpmask)) { 412 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 413 return -EINVAL; 414 } 415 416 /* Check whether cpus are dropped from this group */ 417 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 418 if (!cpumask_empty(tmpmask)) { 419 /* Give any dropped cpus to parent rdtgroup */ 420 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 421 update_closid_rmid(tmpmask, prgrp); 422 } 423 424 /* 425 * If we added cpus, remove them from previous group that owned them 426 * and update per-cpu rmid 427 */ 428 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 429 if (!cpumask_empty(tmpmask)) { 430 head = &prgrp->mon.crdtgrp_list; 431 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 432 if (crgrp == rdtgrp) 433 continue; 434 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 435 tmpmask); 436 } 437 update_closid_rmid(tmpmask, rdtgrp); 438 } 439 440 /* Done pushing/pulling - update this group with new mask */ 441 cpumask_copy(&rdtgrp->cpu_mask, newmask); 442 443 return 0; 444 } 445 446 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 447 { 448 struct rdtgroup *crgrp; 449 450 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 451 /* update the child mon group masks as well*/ 452 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 453 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 454 } 455 456 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 457 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 458 { 459 struct rdtgroup *r, *crgrp; 460 struct list_head *head; 461 462 /* Check whether cpus are dropped from this group */ 463 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 464 if (!cpumask_empty(tmpmask)) { 465 /* Can't drop from default group */ 466 if (rdtgrp == &rdtgroup_default) { 467 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 468 return -EINVAL; 469 } 470 471 /* Give any dropped cpus to rdtgroup_default */ 472 cpumask_or(&rdtgroup_default.cpu_mask, 473 &rdtgroup_default.cpu_mask, tmpmask); 474 update_closid_rmid(tmpmask, &rdtgroup_default); 475 } 476 477 /* 478 * If we added cpus, remove them from previous group and 479 * the prev group's child groups that owned them 480 * and update per-cpu closid/rmid. 481 */ 482 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 483 if (!cpumask_empty(tmpmask)) { 484 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 485 if (r == rdtgrp) 486 continue; 487 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 488 if (!cpumask_empty(tmpmask1)) 489 cpumask_rdtgrp_clear(r, tmpmask1); 490 } 491 update_closid_rmid(tmpmask, rdtgrp); 492 } 493 494 /* Done pushing/pulling - update this group with new mask */ 495 cpumask_copy(&rdtgrp->cpu_mask, newmask); 496 497 /* 498 * Clear child mon group masks since there is a new parent mask 499 * now and update the rmid for the cpus the child lost. 500 */ 501 head = &rdtgrp->mon.crdtgrp_list; 502 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 503 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 504 update_closid_rmid(tmpmask, rdtgrp); 505 cpumask_clear(&crgrp->cpu_mask); 506 } 507 508 return 0; 509 } 510 511 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 512 char *buf, size_t nbytes, loff_t off) 513 { 514 cpumask_var_t tmpmask, newmask, tmpmask1; 515 struct rdtgroup *rdtgrp; 516 int ret; 517 518 if (!buf) 519 return -EINVAL; 520 521 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 522 return -ENOMEM; 523 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 524 free_cpumask_var(tmpmask); 525 return -ENOMEM; 526 } 527 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 528 free_cpumask_var(tmpmask); 529 free_cpumask_var(newmask); 530 return -ENOMEM; 531 } 532 533 rdtgrp = rdtgroup_kn_lock_live(of->kn); 534 if (!rdtgrp) { 535 ret = -ENOENT; 536 goto unlock; 537 } 538 539 rdt_last_cmd_clear(); 540 541 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 542 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 543 ret = -EINVAL; 544 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 545 goto unlock; 546 } 547 548 if (is_cpu_list(of)) 549 ret = cpulist_parse(buf, newmask); 550 else 551 ret = cpumask_parse(buf, newmask); 552 553 if (ret) { 554 rdt_last_cmd_puts("Bad CPU list/mask\n"); 555 goto unlock; 556 } 557 558 /* check that user didn't specify any offline cpus */ 559 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 560 if (!cpumask_empty(tmpmask)) { 561 ret = -EINVAL; 562 rdt_last_cmd_puts("Can only assign online CPUs\n"); 563 goto unlock; 564 } 565 566 if (rdtgrp->type == RDTCTRL_GROUP) 567 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 568 else if (rdtgrp->type == RDTMON_GROUP) 569 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 570 else 571 ret = -EINVAL; 572 573 unlock: 574 rdtgroup_kn_unlock(of->kn); 575 free_cpumask_var(tmpmask); 576 free_cpumask_var(newmask); 577 free_cpumask_var(tmpmask1); 578 579 return ret ?: nbytes; 580 } 581 582 /** 583 * rdtgroup_remove - the helper to remove resource group safely 584 * @rdtgrp: resource group to remove 585 * 586 * On resource group creation via a mkdir, an extra kernfs_node reference is 587 * taken to ensure that the rdtgroup structure remains accessible for the 588 * rdtgroup_kn_unlock() calls where it is removed. 589 * 590 * Drop the extra reference here, then free the rdtgroup structure. 591 * 592 * Return: void 593 */ 594 static void rdtgroup_remove(struct rdtgroup *rdtgrp) 595 { 596 kernfs_put(rdtgrp->kn); 597 kfree(rdtgrp); 598 } 599 600 static void _update_task_closid_rmid(void *task) 601 { 602 /* 603 * If the task is still current on this CPU, update PQR_ASSOC MSR. 604 * Otherwise, the MSR is updated when the task is scheduled in. 605 */ 606 if (task == current) 607 resctrl_arch_sched_in(task); 608 } 609 610 static void update_task_closid_rmid(struct task_struct *t) 611 { 612 if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) 613 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); 614 else 615 _update_task_closid_rmid(t); 616 } 617 618 static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) 619 { 620 u32 closid, rmid = rdtgrp->mon.rmid; 621 622 if (rdtgrp->type == RDTCTRL_GROUP) 623 closid = rdtgrp->closid; 624 else if (rdtgrp->type == RDTMON_GROUP) 625 closid = rdtgrp->mon.parent->closid; 626 else 627 return false; 628 629 return resctrl_arch_match_closid(tsk, closid) && 630 resctrl_arch_match_rmid(tsk, closid, rmid); 631 } 632 633 static int __rdtgroup_move_task(struct task_struct *tsk, 634 struct rdtgroup *rdtgrp) 635 { 636 /* If the task is already in rdtgrp, no need to move the task. */ 637 if (task_in_rdtgroup(tsk, rdtgrp)) 638 return 0; 639 640 /* 641 * Set the task's closid/rmid before the PQR_ASSOC MSR can be 642 * updated by them. 643 * 644 * For ctrl_mon groups, move both closid and rmid. 645 * For monitor groups, can move the tasks only from 646 * their parent CTRL group. 647 */ 648 if (rdtgrp->type == RDTMON_GROUP && 649 !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { 650 rdt_last_cmd_puts("Can't move task to different control group\n"); 651 return -EINVAL; 652 } 653 654 if (rdtgrp->type == RDTMON_GROUP) 655 resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, 656 rdtgrp->mon.rmid); 657 else 658 resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, 659 rdtgrp->mon.rmid); 660 661 /* 662 * Ensure the task's closid and rmid are written before determining if 663 * the task is current that will decide if it will be interrupted. 664 * This pairs with the full barrier between the rq->curr update and 665 * resctrl_arch_sched_in() during context switch. 666 */ 667 smp_mb(); 668 669 /* 670 * By now, the task's closid and rmid are set. If the task is current 671 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource 672 * group go into effect. If the task is not current, the MSR will be 673 * updated when the task is scheduled in. 674 */ 675 update_task_closid_rmid(tsk); 676 677 return 0; 678 } 679 680 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 681 { 682 return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && 683 resctrl_arch_match_closid(t, r->closid)); 684 } 685 686 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 687 { 688 return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && 689 resctrl_arch_match_rmid(t, r->mon.parent->closid, 690 r->mon.rmid)); 691 } 692 693 /** 694 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 695 * @r: Resource group 696 * 697 * Return: 1 if tasks have been assigned to @r, 0 otherwise 698 */ 699 int rdtgroup_tasks_assigned(struct rdtgroup *r) 700 { 701 struct task_struct *p, *t; 702 int ret = 0; 703 704 lockdep_assert_held(&rdtgroup_mutex); 705 706 rcu_read_lock(); 707 for_each_process_thread(p, t) { 708 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 709 ret = 1; 710 break; 711 } 712 } 713 rcu_read_unlock(); 714 715 return ret; 716 } 717 718 static int rdtgroup_task_write_permission(struct task_struct *task, 719 struct kernfs_open_file *of) 720 { 721 const struct cred *tcred = get_task_cred(task); 722 const struct cred *cred = current_cred(); 723 int ret = 0; 724 725 /* 726 * Even if we're attaching all tasks in the thread group, we only 727 * need to check permissions on one of them. 728 */ 729 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 730 !uid_eq(cred->euid, tcred->uid) && 731 !uid_eq(cred->euid, tcred->suid)) { 732 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 733 ret = -EPERM; 734 } 735 736 put_cred(tcred); 737 return ret; 738 } 739 740 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 741 struct kernfs_open_file *of) 742 { 743 struct task_struct *tsk; 744 int ret; 745 746 rcu_read_lock(); 747 if (pid) { 748 tsk = find_task_by_vpid(pid); 749 if (!tsk) { 750 rcu_read_unlock(); 751 rdt_last_cmd_printf("No task %d\n", pid); 752 return -ESRCH; 753 } 754 } else { 755 tsk = current; 756 } 757 758 get_task_struct(tsk); 759 rcu_read_unlock(); 760 761 ret = rdtgroup_task_write_permission(tsk, of); 762 if (!ret) 763 ret = __rdtgroup_move_task(tsk, rdtgrp); 764 765 put_task_struct(tsk); 766 return ret; 767 } 768 769 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 770 char *buf, size_t nbytes, loff_t off) 771 { 772 struct rdtgroup *rdtgrp; 773 char *pid_str; 774 int ret = 0; 775 pid_t pid; 776 777 rdtgrp = rdtgroup_kn_lock_live(of->kn); 778 if (!rdtgrp) { 779 rdtgroup_kn_unlock(of->kn); 780 return -ENOENT; 781 } 782 rdt_last_cmd_clear(); 783 784 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 785 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 786 ret = -EINVAL; 787 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 788 goto unlock; 789 } 790 791 while (buf && buf[0] != '\0' && buf[0] != '\n') { 792 pid_str = strim(strsep(&buf, ",")); 793 794 if (kstrtoint(pid_str, 0, &pid)) { 795 rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); 796 ret = -EINVAL; 797 break; 798 } 799 800 if (pid < 0) { 801 rdt_last_cmd_printf("Invalid pid %d\n", pid); 802 ret = -EINVAL; 803 break; 804 } 805 806 ret = rdtgroup_move_task(pid, rdtgrp, of); 807 if (ret) { 808 rdt_last_cmd_printf("Error while processing task %d\n", pid); 809 break; 810 } 811 } 812 813 unlock: 814 rdtgroup_kn_unlock(of->kn); 815 816 return ret ?: nbytes; 817 } 818 819 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 820 { 821 struct task_struct *p, *t; 822 pid_t pid; 823 824 rcu_read_lock(); 825 for_each_process_thread(p, t) { 826 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 827 pid = task_pid_vnr(t); 828 if (pid) 829 seq_printf(s, "%d\n", pid); 830 } 831 } 832 rcu_read_unlock(); 833 } 834 835 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 836 struct seq_file *s, void *v) 837 { 838 struct rdtgroup *rdtgrp; 839 int ret = 0; 840 841 rdtgrp = rdtgroup_kn_lock_live(of->kn); 842 if (rdtgrp) 843 show_rdt_tasks(rdtgrp, s); 844 else 845 ret = -ENOENT; 846 rdtgroup_kn_unlock(of->kn); 847 848 return ret; 849 } 850 851 static int rdtgroup_closid_show(struct kernfs_open_file *of, 852 struct seq_file *s, void *v) 853 { 854 struct rdtgroup *rdtgrp; 855 int ret = 0; 856 857 rdtgrp = rdtgroup_kn_lock_live(of->kn); 858 if (rdtgrp) 859 seq_printf(s, "%u\n", rdtgrp->closid); 860 else 861 ret = -ENOENT; 862 rdtgroup_kn_unlock(of->kn); 863 864 return ret; 865 } 866 867 static int rdtgroup_rmid_show(struct kernfs_open_file *of, 868 struct seq_file *s, void *v) 869 { 870 struct rdtgroup *rdtgrp; 871 int ret = 0; 872 873 rdtgrp = rdtgroup_kn_lock_live(of->kn); 874 if (rdtgrp) 875 seq_printf(s, "%u\n", rdtgrp->mon.rmid); 876 else 877 ret = -ENOENT; 878 rdtgroup_kn_unlock(of->kn); 879 880 return ret; 881 } 882 883 #ifdef CONFIG_PROC_CPU_RESCTRL 884 /* 885 * A task can only be part of one resctrl control group and of one monitor 886 * group which is associated to that control group. 887 * 888 * 1) res: 889 * mon: 890 * 891 * resctrl is not available. 892 * 893 * 2) res:/ 894 * mon: 895 * 896 * Task is part of the root resctrl control group, and it is not associated 897 * to any monitor group. 898 * 899 * 3) res:/ 900 * mon:mon0 901 * 902 * Task is part of the root resctrl control group and monitor group mon0. 903 * 904 * 4) res:group0 905 * mon: 906 * 907 * Task is part of resctrl control group group0, and it is not associated 908 * to any monitor group. 909 * 910 * 5) res:group0 911 * mon:mon1 912 * 913 * Task is part of resctrl control group group0 and monitor group mon1. 914 */ 915 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 916 struct pid *pid, struct task_struct *tsk) 917 { 918 struct rdtgroup *rdtg; 919 int ret = 0; 920 921 mutex_lock(&rdtgroup_mutex); 922 923 /* Return empty if resctrl has not been mounted. */ 924 if (!resctrl_mounted) { 925 seq_puts(s, "res:\nmon:\n"); 926 goto unlock; 927 } 928 929 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 930 struct rdtgroup *crg; 931 932 /* 933 * Task information is only relevant for shareable 934 * and exclusive groups. 935 */ 936 if (rdtg->mode != RDT_MODE_SHAREABLE && 937 rdtg->mode != RDT_MODE_EXCLUSIVE) 938 continue; 939 940 if (!resctrl_arch_match_closid(tsk, rdtg->closid)) 941 continue; 942 943 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 944 rdt_kn_name(rdtg->kn)); 945 seq_puts(s, "mon:"); 946 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 947 mon.crdtgrp_list) { 948 if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, 949 crg->mon.rmid)) 950 continue; 951 seq_printf(s, "%s", rdt_kn_name(crg->kn)); 952 break; 953 } 954 seq_putc(s, '\n'); 955 goto unlock; 956 } 957 /* 958 * The above search should succeed. Otherwise return 959 * with an error. 960 */ 961 ret = -ENOENT; 962 unlock: 963 mutex_unlock(&rdtgroup_mutex); 964 965 return ret; 966 } 967 #endif 968 969 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 970 struct seq_file *seq, void *v) 971 { 972 int len; 973 974 mutex_lock(&rdtgroup_mutex); 975 len = seq_buf_used(&last_cmd_status); 976 if (len) 977 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 978 else 979 seq_puts(seq, "ok\n"); 980 mutex_unlock(&rdtgroup_mutex); 981 return 0; 982 } 983 984 void *rdt_kn_parent_priv(struct kernfs_node *kn) 985 { 986 /* 987 * The parent pointer is only valid within RCU section since it can be 988 * replaced. 989 */ 990 guard(rcu)(); 991 return rcu_dereference(kn->__parent)->priv; 992 } 993 994 static int rdt_num_closids_show(struct kernfs_open_file *of, 995 struct seq_file *seq, void *v) 996 { 997 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 998 999 seq_printf(seq, "%u\n", s->num_closid); 1000 return 0; 1001 } 1002 1003 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 1004 struct seq_file *seq, void *v) 1005 { 1006 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1007 struct rdt_resource *r = s->res; 1008 1009 seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r)); 1010 return 0; 1011 } 1012 1013 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 1014 struct seq_file *seq, void *v) 1015 { 1016 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1017 struct rdt_resource *r = s->res; 1018 1019 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 1020 return 0; 1021 } 1022 1023 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 1024 struct seq_file *seq, void *v) 1025 { 1026 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1027 struct rdt_resource *r = s->res; 1028 1029 seq_printf(seq, "%x\n", r->cache.shareable_bits); 1030 return 0; 1031 } 1032 1033 /* 1034 * rdt_bit_usage_show - Display current usage of resources 1035 * 1036 * A domain is a shared resource that can now be allocated differently. Here 1037 * we display the current regions of the domain as an annotated bitmask. 1038 * For each domain of this resource its allocation bitmask 1039 * is annotated as below to indicate the current usage of the corresponding bit: 1040 * 0 - currently unused 1041 * X - currently available for sharing and used by software and hardware 1042 * H - currently used by hardware only but available for software use 1043 * S - currently used and shareable by software only 1044 * E - currently used exclusively by one resource group 1045 * P - currently pseudo-locked by one resource group 1046 */ 1047 static int rdt_bit_usage_show(struct kernfs_open_file *of, 1048 struct seq_file *seq, void *v) 1049 { 1050 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1051 /* 1052 * Use unsigned long even though only 32 bits are used to ensure 1053 * test_bit() is used safely. 1054 */ 1055 unsigned long sw_shareable = 0, hw_shareable = 0; 1056 unsigned long exclusive = 0, pseudo_locked = 0; 1057 struct rdt_resource *r = s->res; 1058 struct rdt_ctrl_domain *dom; 1059 int i, hwb, swb, excl, psl; 1060 enum rdtgrp_mode mode; 1061 bool sep = false; 1062 u32 ctrl_val; 1063 1064 cpus_read_lock(); 1065 mutex_lock(&rdtgroup_mutex); 1066 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { 1067 if (sep) 1068 seq_putc(seq, ';'); 1069 hw_shareable = r->cache.shareable_bits; 1070 sw_shareable = 0; 1071 exclusive = 0; 1072 seq_printf(seq, "%d=", dom->hdr.id); 1073 for (i = 0; i < closids_supported(); i++) { 1074 if (!closid_allocated(i) || 1075 (resctrl_arch_get_io_alloc_enabled(r) && 1076 i == resctrl_io_alloc_closid(r))) 1077 continue; 1078 ctrl_val = resctrl_arch_get_config(r, dom, i, 1079 s->conf_type); 1080 mode = rdtgroup_mode_by_closid(i); 1081 switch (mode) { 1082 case RDT_MODE_SHAREABLE: 1083 sw_shareable |= ctrl_val; 1084 break; 1085 case RDT_MODE_EXCLUSIVE: 1086 exclusive |= ctrl_val; 1087 break; 1088 case RDT_MODE_PSEUDO_LOCKSETUP: 1089 /* 1090 * RDT_MODE_PSEUDO_LOCKSETUP is possible 1091 * here but not included since the CBM 1092 * associated with this CLOSID in this mode 1093 * is not initialized and no task or cpu can be 1094 * assigned this CLOSID. 1095 */ 1096 break; 1097 case RDT_MODE_PSEUDO_LOCKED: 1098 case RDT_NUM_MODES: 1099 WARN(1, 1100 "invalid mode for closid %d\n", i); 1101 break; 1102 } 1103 } 1104 1105 /* 1106 * When the "io_alloc" feature is enabled, a portion of the cache 1107 * is configured for shared use between hardware and software. 1108 * Also, when CDP is enabled the CBMs of CDP_CODE and CDP_DATA 1109 * resources are kept in sync. So, the CBMs for "io_alloc" can 1110 * be accessed through either resource. 1111 */ 1112 if (resctrl_arch_get_io_alloc_enabled(r)) { 1113 ctrl_val = resctrl_arch_get_config(r, dom, 1114 resctrl_io_alloc_closid(r), 1115 s->conf_type); 1116 hw_shareable |= ctrl_val; 1117 } 1118 1119 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 1120 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 1121 hwb = test_bit(i, &hw_shareable); 1122 swb = test_bit(i, &sw_shareable); 1123 excl = test_bit(i, &exclusive); 1124 psl = test_bit(i, &pseudo_locked); 1125 if (hwb && swb) 1126 seq_putc(seq, 'X'); 1127 else if (hwb && !swb) 1128 seq_putc(seq, 'H'); 1129 else if (!hwb && swb) 1130 seq_putc(seq, 'S'); 1131 else if (excl) 1132 seq_putc(seq, 'E'); 1133 else if (psl) 1134 seq_putc(seq, 'P'); 1135 else /* Unused bits remain */ 1136 seq_putc(seq, '0'); 1137 } 1138 sep = true; 1139 } 1140 seq_putc(seq, '\n'); 1141 mutex_unlock(&rdtgroup_mutex); 1142 cpus_read_unlock(); 1143 return 0; 1144 } 1145 1146 static int rdt_min_bw_show(struct kernfs_open_file *of, 1147 struct seq_file *seq, void *v) 1148 { 1149 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1150 struct rdt_resource *r = s->res; 1151 1152 seq_printf(seq, "%u\n", r->membw.min_bw); 1153 return 0; 1154 } 1155 1156 static int rdt_num_rmids_show(struct kernfs_open_file *of, 1157 struct seq_file *seq, void *v) 1158 { 1159 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1160 1161 seq_printf(seq, "%d\n", r->mon.num_rmid); 1162 1163 return 0; 1164 } 1165 1166 static int rdt_mon_features_show(struct kernfs_open_file *of, 1167 struct seq_file *seq, void *v) 1168 { 1169 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1170 struct mon_evt *mevt; 1171 1172 for_each_mon_event(mevt) { 1173 if (mevt->rid != r->rid || !mevt->enabled) 1174 continue; 1175 seq_printf(seq, "%s\n", mevt->name); 1176 if (mevt->configurable && 1177 !resctrl_arch_mbm_cntr_assign_enabled(r)) 1178 seq_printf(seq, "%s_config\n", mevt->name); 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int rdt_bw_gran_show(struct kernfs_open_file *of, 1185 struct seq_file *seq, void *v) 1186 { 1187 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1188 struct rdt_resource *r = s->res; 1189 1190 seq_printf(seq, "%u\n", r->membw.bw_gran); 1191 return 0; 1192 } 1193 1194 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1195 struct seq_file *seq, void *v) 1196 { 1197 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1198 struct rdt_resource *r = s->res; 1199 1200 seq_printf(seq, "%u\n", r->membw.delay_linear); 1201 return 0; 1202 } 1203 1204 static int max_threshold_occ_show(struct kernfs_open_file *of, 1205 struct seq_file *seq, void *v) 1206 { 1207 seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); 1208 1209 return 0; 1210 } 1211 1212 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, 1213 struct seq_file *seq, void *v) 1214 { 1215 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1216 struct rdt_resource *r = s->res; 1217 1218 switch (r->membw.throttle_mode) { 1219 case THREAD_THROTTLE_PER_THREAD: 1220 seq_puts(seq, "per-thread\n"); 1221 return 0; 1222 case THREAD_THROTTLE_MAX: 1223 seq_puts(seq, "max\n"); 1224 return 0; 1225 case THREAD_THROTTLE_UNDEFINED: 1226 seq_puts(seq, "undefined\n"); 1227 return 0; 1228 } 1229 1230 WARN_ON_ONCE(1); 1231 1232 return 0; 1233 } 1234 1235 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1236 char *buf, size_t nbytes, loff_t off) 1237 { 1238 unsigned int bytes; 1239 int ret; 1240 1241 ret = kstrtouint(buf, 0, &bytes); 1242 if (ret) 1243 return ret; 1244 1245 if (bytes > resctrl_rmid_realloc_limit) 1246 return -EINVAL; 1247 1248 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); 1249 1250 return nbytes; 1251 } 1252 1253 /* 1254 * rdtgroup_mode_show - Display mode of this resource group 1255 */ 1256 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1257 struct seq_file *s, void *v) 1258 { 1259 struct rdtgroup *rdtgrp; 1260 1261 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1262 if (!rdtgrp) { 1263 rdtgroup_kn_unlock(of->kn); 1264 return -ENOENT; 1265 } 1266 1267 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1268 1269 rdtgroup_kn_unlock(of->kn); 1270 return 0; 1271 } 1272 1273 enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) 1274 { 1275 switch (my_type) { 1276 case CDP_CODE: 1277 return CDP_DATA; 1278 case CDP_DATA: 1279 return CDP_CODE; 1280 default: 1281 case CDP_NONE: 1282 return CDP_NONE; 1283 } 1284 } 1285 1286 static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, 1287 struct seq_file *seq, void *v) 1288 { 1289 struct resctrl_schema *s = rdt_kn_parent_priv(of->kn); 1290 struct rdt_resource *r = s->res; 1291 1292 seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); 1293 1294 return 0; 1295 } 1296 1297 /** 1298 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1299 * @r: Resource to which domain instance @d belongs. 1300 * @d: The domain instance for which @closid is being tested. 1301 * @cbm: Capacity bitmask being tested. 1302 * @closid: Intended closid for @cbm. 1303 * @type: CDP type of @r. 1304 * @exclusive: Only check if overlaps with exclusive resource groups 1305 * 1306 * Checks if provided @cbm intended to be used for @closid on domain 1307 * @d overlaps with any other closids or other hardware usage associated 1308 * with this domain. If @exclusive is true then only overlaps with 1309 * resource groups in exclusive mode will be considered. If @exclusive 1310 * is false then overlaps with any resource group or hardware entities 1311 * will be considered. 1312 * 1313 * @cbm is unsigned long, even if only 32 bits are used, to make the 1314 * bitmap functions work correctly. 1315 * 1316 * Return: false if CBM does not overlap, true if it does. 1317 */ 1318 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d, 1319 unsigned long cbm, int closid, 1320 enum resctrl_conf_type type, bool exclusive) 1321 { 1322 enum rdtgrp_mode mode; 1323 unsigned long ctrl_b; 1324 int i; 1325 1326 /* Check for any overlap with regions used by hardware directly */ 1327 if (!exclusive) { 1328 ctrl_b = r->cache.shareable_bits; 1329 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1330 return true; 1331 } 1332 1333 /* Check for overlap with other resource groups */ 1334 for (i = 0; i < closids_supported(); i++) { 1335 ctrl_b = resctrl_arch_get_config(r, d, i, type); 1336 mode = rdtgroup_mode_by_closid(i); 1337 if (closid_allocated(i) && i != closid && 1338 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1339 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1340 if (exclusive) { 1341 if (mode == RDT_MODE_EXCLUSIVE) 1342 return true; 1343 continue; 1344 } 1345 return true; 1346 } 1347 } 1348 } 1349 1350 return false; 1351 } 1352 1353 /** 1354 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1355 * @s: Schema for the resource to which domain instance @d belongs. 1356 * @d: The domain instance for which @closid is being tested. 1357 * @cbm: Capacity bitmask being tested. 1358 * @closid: Intended closid for @cbm. 1359 * @exclusive: Only check if overlaps with exclusive resource groups 1360 * 1361 * Resources that can be allocated using a CBM can use the CBM to control 1362 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1363 * for overlap. Overlap test is not limited to the specific resource for 1364 * which the CBM is intended though - when dealing with CDP resources that 1365 * share the underlying hardware the overlap check should be performed on 1366 * the CDP resource sharing the hardware also. 1367 * 1368 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1369 * overlap test. 1370 * 1371 * Return: true if CBM overlap detected, false if there is no overlap 1372 */ 1373 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d, 1374 unsigned long cbm, int closid, bool exclusive) 1375 { 1376 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 1377 struct rdt_resource *r = s->res; 1378 1379 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, 1380 exclusive)) 1381 return true; 1382 1383 if (!resctrl_arch_get_cdp_enabled(r->rid)) 1384 return false; 1385 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); 1386 } 1387 1388 /** 1389 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1390 * @rdtgrp: Resource group identified through its closid. 1391 * 1392 * An exclusive resource group implies that there should be no sharing of 1393 * its allocated resources. At the time this group is considered to be 1394 * exclusive this test can determine if its current schemata supports this 1395 * setting by testing for overlap with all other resource groups. 1396 * 1397 * Return: true if resource group can be exclusive, false if there is overlap 1398 * with allocations of other resource groups and thus this resource group 1399 * cannot be exclusive. 1400 */ 1401 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1402 { 1403 int closid = rdtgrp->closid; 1404 struct rdt_ctrl_domain *d; 1405 struct resctrl_schema *s; 1406 struct rdt_resource *r; 1407 bool has_cache = false; 1408 u32 ctrl; 1409 1410 /* Walking r->domains, ensure it can't race with cpuhp */ 1411 lockdep_assert_cpus_held(); 1412 1413 list_for_each_entry(s, &resctrl_schema_all, list) { 1414 r = s->res; 1415 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) 1416 continue; 1417 has_cache = true; 1418 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 1419 ctrl = resctrl_arch_get_config(r, d, closid, 1420 s->conf_type); 1421 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { 1422 rdt_last_cmd_puts("Schemata overlaps\n"); 1423 return false; 1424 } 1425 } 1426 } 1427 1428 if (!has_cache) { 1429 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1430 return false; 1431 } 1432 1433 return true; 1434 } 1435 1436 /* 1437 * rdtgroup_mode_write - Modify the resource group's mode 1438 */ 1439 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1440 char *buf, size_t nbytes, loff_t off) 1441 { 1442 struct rdtgroup *rdtgrp; 1443 enum rdtgrp_mode mode; 1444 int ret = 0; 1445 1446 /* Valid input requires a trailing newline */ 1447 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1448 return -EINVAL; 1449 buf[nbytes - 1] = '\0'; 1450 1451 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1452 if (!rdtgrp) { 1453 rdtgroup_kn_unlock(of->kn); 1454 return -ENOENT; 1455 } 1456 1457 rdt_last_cmd_clear(); 1458 1459 mode = rdtgrp->mode; 1460 1461 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1462 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1463 (!strcmp(buf, "pseudo-locksetup") && 1464 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1465 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1466 goto out; 1467 1468 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1469 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1470 ret = -EINVAL; 1471 goto out; 1472 } 1473 1474 if (!strcmp(buf, "shareable")) { 1475 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1476 ret = rdtgroup_locksetup_exit(rdtgrp); 1477 if (ret) 1478 goto out; 1479 } 1480 rdtgrp->mode = RDT_MODE_SHAREABLE; 1481 } else if (!strcmp(buf, "exclusive")) { 1482 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1483 ret = -EINVAL; 1484 goto out; 1485 } 1486 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1487 ret = rdtgroup_locksetup_exit(rdtgrp); 1488 if (ret) 1489 goto out; 1490 } 1491 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1492 } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && 1493 !strcmp(buf, "pseudo-locksetup")) { 1494 ret = rdtgroup_locksetup_enter(rdtgrp); 1495 if (ret) 1496 goto out; 1497 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1498 } else { 1499 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1500 ret = -EINVAL; 1501 } 1502 1503 out: 1504 rdtgroup_kn_unlock(of->kn); 1505 return ret ?: nbytes; 1506 } 1507 1508 /** 1509 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1510 * @r: RDT resource to which @d belongs. 1511 * @d: RDT domain instance. 1512 * @cbm: bitmask for which the size should be computed. 1513 * 1514 * The bitmask provided associated with the RDT domain instance @d will be 1515 * translated into how many bytes it represents. The size in bytes is 1516 * computed by first dividing the total cache size by the CBM length to 1517 * determine how many bytes each bit in the bitmask represents. The result 1518 * is multiplied with the number of bits set in the bitmask. 1519 * 1520 * @cbm is unsigned long, even if only 32 bits are used to make the 1521 * bitmap functions work correctly. 1522 */ 1523 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1524 struct rdt_ctrl_domain *d, unsigned long cbm) 1525 { 1526 unsigned int size = 0; 1527 struct cacheinfo *ci; 1528 int num_b; 1529 1530 if (WARN_ON_ONCE(r->ctrl_scope != RESCTRL_L2_CACHE && r->ctrl_scope != RESCTRL_L3_CACHE)) 1531 return size; 1532 1533 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1534 ci = get_cpu_cacheinfo_level(cpumask_any(&d->hdr.cpu_mask), r->ctrl_scope); 1535 if (ci) 1536 size = ci->size / r->cache.cbm_len * num_b; 1537 1538 return size; 1539 } 1540 1541 bool is_mba_sc(struct rdt_resource *r) 1542 { 1543 if (!r) 1544 r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 1545 1546 /* 1547 * The software controller support is only applicable to MBA resource. 1548 * Make sure to check for resource type. 1549 */ 1550 if (r->rid != RDT_RESOURCE_MBA) 1551 return false; 1552 1553 return r->membw.mba_sc; 1554 } 1555 1556 /* 1557 * rdtgroup_size_show - Display size in bytes of allocated regions 1558 * 1559 * The "size" file mirrors the layout of the "schemata" file, printing the 1560 * size in bytes of each region instead of the capacity bitmask. 1561 */ 1562 static int rdtgroup_size_show(struct kernfs_open_file *of, 1563 struct seq_file *s, void *v) 1564 { 1565 struct resctrl_schema *schema; 1566 enum resctrl_conf_type type; 1567 struct rdt_ctrl_domain *d; 1568 struct rdtgroup *rdtgrp; 1569 struct rdt_resource *r; 1570 unsigned int size; 1571 int ret = 0; 1572 u32 closid; 1573 bool sep; 1574 u32 ctrl; 1575 1576 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1577 if (!rdtgrp) { 1578 rdtgroup_kn_unlock(of->kn); 1579 return -ENOENT; 1580 } 1581 1582 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1583 if (!rdtgrp->plr->d) { 1584 rdt_last_cmd_clear(); 1585 rdt_last_cmd_puts("Cache domain offline\n"); 1586 ret = -ENODEV; 1587 } else { 1588 seq_printf(s, "%*s:", max_name_width, 1589 rdtgrp->plr->s->name); 1590 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, 1591 rdtgrp->plr->d, 1592 rdtgrp->plr->cbm); 1593 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->hdr.id, size); 1594 } 1595 goto out; 1596 } 1597 1598 closid = rdtgrp->closid; 1599 1600 list_for_each_entry(schema, &resctrl_schema_all, list) { 1601 r = schema->res; 1602 type = schema->conf_type; 1603 sep = false; 1604 seq_printf(s, "%*s:", max_name_width, schema->name); 1605 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 1606 if (sep) 1607 seq_putc(s, ';'); 1608 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1609 size = 0; 1610 } else { 1611 if (is_mba_sc(r)) 1612 ctrl = d->mbps_val[closid]; 1613 else 1614 ctrl = resctrl_arch_get_config(r, d, 1615 closid, 1616 type); 1617 if (r->rid == RDT_RESOURCE_MBA || 1618 r->rid == RDT_RESOURCE_SMBA) 1619 size = ctrl; 1620 else 1621 size = rdtgroup_cbm_to_size(r, d, ctrl); 1622 } 1623 seq_printf(s, "%d=%u", d->hdr.id, size); 1624 sep = true; 1625 } 1626 seq_putc(s, '\n'); 1627 } 1628 1629 out: 1630 rdtgroup_kn_unlock(of->kn); 1631 1632 return ret; 1633 } 1634 1635 static void mondata_config_read(struct resctrl_mon_config_info *mon_info) 1636 { 1637 smp_call_function_any(&mon_info->d->hdr.cpu_mask, 1638 resctrl_arch_mon_event_config_read, mon_info, 1); 1639 } 1640 1641 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) 1642 { 1643 struct resctrl_mon_config_info mon_info; 1644 struct rdt_l3_mon_domain *dom; 1645 bool sep = false; 1646 1647 cpus_read_lock(); 1648 mutex_lock(&rdtgroup_mutex); 1649 1650 list_for_each_entry(dom, &r->mon_domains, hdr.list) { 1651 if (sep) 1652 seq_puts(s, ";"); 1653 1654 memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); 1655 mon_info.r = r; 1656 mon_info.d = dom; 1657 mon_info.evtid = evtid; 1658 mondata_config_read(&mon_info); 1659 1660 seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config); 1661 sep = true; 1662 } 1663 seq_puts(s, "\n"); 1664 1665 mutex_unlock(&rdtgroup_mutex); 1666 cpus_read_unlock(); 1667 1668 return 0; 1669 } 1670 1671 static int mbm_total_bytes_config_show(struct kernfs_open_file *of, 1672 struct seq_file *seq, void *v) 1673 { 1674 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1675 1676 mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); 1677 1678 return 0; 1679 } 1680 1681 static int mbm_local_bytes_config_show(struct kernfs_open_file *of, 1682 struct seq_file *seq, void *v) 1683 { 1684 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1685 1686 mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); 1687 1688 return 0; 1689 } 1690 1691 static void mbm_config_write_domain(struct rdt_resource *r, 1692 struct rdt_l3_mon_domain *d, u32 evtid, u32 val) 1693 { 1694 struct resctrl_mon_config_info mon_info = {0}; 1695 1696 /* 1697 * Read the current config value first. If both are the same then 1698 * no need to write it again. 1699 */ 1700 mon_info.r = r; 1701 mon_info.d = d; 1702 mon_info.evtid = evtid; 1703 mondata_config_read(&mon_info); 1704 if (mon_info.mon_config == val) 1705 return; 1706 1707 mon_info.mon_config = val; 1708 1709 /* 1710 * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the 1711 * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE 1712 * are scoped at the domain level. Writing any of these MSRs 1713 * on one CPU is observed by all the CPUs in the domain. 1714 */ 1715 smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write, 1716 &mon_info, 1); 1717 1718 /* 1719 * When an Event Configuration is changed, the bandwidth counters 1720 * for all RMIDs and Events will be cleared by the hardware. The 1721 * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for 1722 * every RMID on the next read to any event for every RMID. 1723 * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) 1724 * cleared while it is tracked by the hardware. Clear the 1725 * mbm_local and mbm_total counts for all the RMIDs. 1726 */ 1727 resctrl_arch_reset_rmid_all(r, d); 1728 } 1729 1730 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) 1731 { 1732 char *dom_str = NULL, *id_str; 1733 struct rdt_l3_mon_domain *d; 1734 unsigned long dom_id, val; 1735 1736 /* Walking r->domains, ensure it can't race with cpuhp */ 1737 lockdep_assert_cpus_held(); 1738 1739 next: 1740 if (!tok || tok[0] == '\0') 1741 return 0; 1742 1743 /* Start processing the strings for each domain */ 1744 dom_str = strim(strsep(&tok, ";")); 1745 id_str = strsep(&dom_str, "="); 1746 1747 if (!id_str || kstrtoul(id_str, 10, &dom_id)) { 1748 rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); 1749 return -EINVAL; 1750 } 1751 1752 if (!dom_str || kstrtoul(dom_str, 16, &val)) { 1753 rdt_last_cmd_puts("Non-numeric event configuration value\n"); 1754 return -EINVAL; 1755 } 1756 1757 /* Value from user cannot be more than the supported set of events */ 1758 if ((val & r->mon.mbm_cfg_mask) != val) { 1759 rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", 1760 r->mon.mbm_cfg_mask); 1761 return -EINVAL; 1762 } 1763 1764 list_for_each_entry(d, &r->mon_domains, hdr.list) { 1765 if (d->hdr.id == dom_id) { 1766 mbm_config_write_domain(r, d, evtid, val); 1767 goto next; 1768 } 1769 } 1770 1771 return -EINVAL; 1772 } 1773 1774 static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, 1775 char *buf, size_t nbytes, 1776 loff_t off) 1777 { 1778 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1779 int ret; 1780 1781 /* Valid input requires a trailing newline */ 1782 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1783 return -EINVAL; 1784 1785 cpus_read_lock(); 1786 mutex_lock(&rdtgroup_mutex); 1787 1788 rdt_last_cmd_clear(); 1789 1790 buf[nbytes - 1] = '\0'; 1791 1792 ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); 1793 1794 mutex_unlock(&rdtgroup_mutex); 1795 cpus_read_unlock(); 1796 1797 return ret ?: nbytes; 1798 } 1799 1800 static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, 1801 char *buf, size_t nbytes, 1802 loff_t off) 1803 { 1804 struct rdt_resource *r = rdt_kn_parent_priv(of->kn); 1805 int ret; 1806 1807 /* Valid input requires a trailing newline */ 1808 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1809 return -EINVAL; 1810 1811 cpus_read_lock(); 1812 mutex_lock(&rdtgroup_mutex); 1813 1814 rdt_last_cmd_clear(); 1815 1816 buf[nbytes - 1] = '\0'; 1817 1818 ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); 1819 1820 mutex_unlock(&rdtgroup_mutex); 1821 cpus_read_unlock(); 1822 1823 return ret ?: nbytes; 1824 } 1825 1826 /* 1827 * resctrl_bmec_files_show() — Controls the visibility of BMEC-related resctrl 1828 * files. When @show is true, the files are displayed; when false, the files 1829 * are hidden. 1830 * Don't treat kernfs_find_and_get failure as an error, since this function may 1831 * be called regardless of whether BMEC is supported or the event is enabled. 1832 */ 1833 void resctrl_bmec_files_show(struct rdt_resource *r, struct kernfs_node *l3_mon_kn, 1834 bool show) 1835 { 1836 struct kernfs_node *kn_config, *mon_kn = NULL; 1837 char name[32]; 1838 1839 if (!l3_mon_kn) { 1840 sprintf(name, "%s_MON", r->name); 1841 mon_kn = kernfs_find_and_get(kn_info, name); 1842 if (!mon_kn) 1843 return; 1844 l3_mon_kn = mon_kn; 1845 } 1846 1847 kn_config = kernfs_find_and_get(l3_mon_kn, "mbm_total_bytes_config"); 1848 if (kn_config) { 1849 kernfs_show(kn_config, show); 1850 kernfs_put(kn_config); 1851 } 1852 1853 kn_config = kernfs_find_and_get(l3_mon_kn, "mbm_local_bytes_config"); 1854 if (kn_config) { 1855 kernfs_show(kn_config, show); 1856 kernfs_put(kn_config); 1857 } 1858 1859 /* Release the reference only if it was acquired */ 1860 if (mon_kn) 1861 kernfs_put(mon_kn); 1862 } 1863 1864 const char *rdtgroup_name_by_closid(u32 closid) 1865 { 1866 struct rdtgroup *rdtgrp; 1867 1868 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 1869 if (rdtgrp->closid == closid) 1870 return rdt_kn_name(rdtgrp->kn); 1871 } 1872 1873 return NULL; 1874 } 1875 1876 /* rdtgroup information files for one cache resource. */ 1877 static struct rftype res_common_files[] = { 1878 { 1879 .name = "last_cmd_status", 1880 .mode = 0444, 1881 .kf_ops = &rdtgroup_kf_single_ops, 1882 .seq_show = rdt_last_cmd_status_show, 1883 .fflags = RFTYPE_TOP_INFO, 1884 }, 1885 { 1886 .name = "mbm_assign_on_mkdir", 1887 .mode = 0644, 1888 .kf_ops = &rdtgroup_kf_single_ops, 1889 .seq_show = resctrl_mbm_assign_on_mkdir_show, 1890 .write = resctrl_mbm_assign_on_mkdir_write, 1891 }, 1892 { 1893 .name = "num_closids", 1894 .mode = 0444, 1895 .kf_ops = &rdtgroup_kf_single_ops, 1896 .seq_show = rdt_num_closids_show, 1897 .fflags = RFTYPE_CTRL_INFO, 1898 }, 1899 { 1900 .name = "mon_features", 1901 .mode = 0444, 1902 .kf_ops = &rdtgroup_kf_single_ops, 1903 .seq_show = rdt_mon_features_show, 1904 .fflags = RFTYPE_MON_INFO, 1905 }, 1906 { 1907 .name = "available_mbm_cntrs", 1908 .mode = 0444, 1909 .kf_ops = &rdtgroup_kf_single_ops, 1910 .seq_show = resctrl_available_mbm_cntrs_show, 1911 }, 1912 { 1913 .name = "num_rmids", 1914 .mode = 0444, 1915 .kf_ops = &rdtgroup_kf_single_ops, 1916 .seq_show = rdt_num_rmids_show, 1917 .fflags = RFTYPE_MON_INFO, 1918 }, 1919 { 1920 .name = "cbm_mask", 1921 .mode = 0444, 1922 .kf_ops = &rdtgroup_kf_single_ops, 1923 .seq_show = rdt_default_ctrl_show, 1924 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1925 }, 1926 { 1927 .name = "num_mbm_cntrs", 1928 .mode = 0444, 1929 .kf_ops = &rdtgroup_kf_single_ops, 1930 .seq_show = resctrl_num_mbm_cntrs_show, 1931 }, 1932 { 1933 .name = "min_cbm_bits", 1934 .mode = 0444, 1935 .kf_ops = &rdtgroup_kf_single_ops, 1936 .seq_show = rdt_min_cbm_bits_show, 1937 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1938 }, 1939 { 1940 .name = "shareable_bits", 1941 .mode = 0444, 1942 .kf_ops = &rdtgroup_kf_single_ops, 1943 .seq_show = rdt_shareable_bits_show, 1944 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1945 }, 1946 { 1947 .name = "bit_usage", 1948 .mode = 0444, 1949 .kf_ops = &rdtgroup_kf_single_ops, 1950 .seq_show = rdt_bit_usage_show, 1951 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 1952 }, 1953 { 1954 .name = "min_bandwidth", 1955 .mode = 0444, 1956 .kf_ops = &rdtgroup_kf_single_ops, 1957 .seq_show = rdt_min_bw_show, 1958 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, 1959 }, 1960 { 1961 .name = "bandwidth_gran", 1962 .mode = 0444, 1963 .kf_ops = &rdtgroup_kf_single_ops, 1964 .seq_show = rdt_bw_gran_show, 1965 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, 1966 }, 1967 { 1968 .name = "delay_linear", 1969 .mode = 0444, 1970 .kf_ops = &rdtgroup_kf_single_ops, 1971 .seq_show = rdt_delay_linear_show, 1972 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, 1973 }, 1974 /* 1975 * Platform specific which (if any) capabilities are provided by 1976 * thread_throttle_mode. Defer "fflags" initialization to platform 1977 * discovery. 1978 */ 1979 { 1980 .name = "thread_throttle_mode", 1981 .mode = 0444, 1982 .kf_ops = &rdtgroup_kf_single_ops, 1983 .seq_show = rdt_thread_throttle_mode_show, 1984 }, 1985 { 1986 .name = "io_alloc", 1987 .mode = 0644, 1988 .kf_ops = &rdtgroup_kf_single_ops, 1989 .seq_show = resctrl_io_alloc_show, 1990 .write = resctrl_io_alloc_write, 1991 }, 1992 { 1993 .name = "io_alloc_cbm", 1994 .mode = 0644, 1995 .kf_ops = &rdtgroup_kf_single_ops, 1996 .seq_show = resctrl_io_alloc_cbm_show, 1997 .write = resctrl_io_alloc_cbm_write, 1998 }, 1999 { 2000 .name = "max_threshold_occupancy", 2001 .mode = 0644, 2002 .kf_ops = &rdtgroup_kf_single_ops, 2003 .write = max_threshold_occ_write, 2004 .seq_show = max_threshold_occ_show, 2005 .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, 2006 }, 2007 { 2008 .name = "mbm_total_bytes_config", 2009 .mode = 0644, 2010 .kf_ops = &rdtgroup_kf_single_ops, 2011 .seq_show = mbm_total_bytes_config_show, 2012 .write = mbm_total_bytes_config_write, 2013 }, 2014 { 2015 .name = "mbm_local_bytes_config", 2016 .mode = 0644, 2017 .kf_ops = &rdtgroup_kf_single_ops, 2018 .seq_show = mbm_local_bytes_config_show, 2019 .write = mbm_local_bytes_config_write, 2020 }, 2021 { 2022 .name = "event_filter", 2023 .mode = 0644, 2024 .kf_ops = &rdtgroup_kf_single_ops, 2025 .seq_show = event_filter_show, 2026 .write = event_filter_write, 2027 }, 2028 { 2029 .name = "mbm_L3_assignments", 2030 .mode = 0644, 2031 .kf_ops = &rdtgroup_kf_single_ops, 2032 .seq_show = mbm_L3_assignments_show, 2033 .write = mbm_L3_assignments_write, 2034 }, 2035 { 2036 .name = "mbm_assign_mode", 2037 .mode = 0644, 2038 .kf_ops = &rdtgroup_kf_single_ops, 2039 .seq_show = resctrl_mbm_assign_mode_show, 2040 .write = resctrl_mbm_assign_mode_write, 2041 .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, 2042 }, 2043 { 2044 .name = "cpus", 2045 .mode = 0644, 2046 .kf_ops = &rdtgroup_kf_single_ops, 2047 .write = rdtgroup_cpus_write, 2048 .seq_show = rdtgroup_cpus_show, 2049 .fflags = RFTYPE_BASE, 2050 }, 2051 { 2052 .name = "cpus_list", 2053 .mode = 0644, 2054 .kf_ops = &rdtgroup_kf_single_ops, 2055 .write = rdtgroup_cpus_write, 2056 .seq_show = rdtgroup_cpus_show, 2057 .flags = RFTYPE_FLAGS_CPUS_LIST, 2058 .fflags = RFTYPE_BASE, 2059 }, 2060 { 2061 .name = "tasks", 2062 .mode = 0644, 2063 .kf_ops = &rdtgroup_kf_single_ops, 2064 .write = rdtgroup_tasks_write, 2065 .seq_show = rdtgroup_tasks_show, 2066 .fflags = RFTYPE_BASE, 2067 }, 2068 { 2069 .name = "mon_hw_id", 2070 .mode = 0444, 2071 .kf_ops = &rdtgroup_kf_single_ops, 2072 .seq_show = rdtgroup_rmid_show, 2073 .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, 2074 }, 2075 { 2076 .name = "schemata", 2077 .mode = 0644, 2078 .kf_ops = &rdtgroup_kf_single_ops, 2079 .write = rdtgroup_schemata_write, 2080 .seq_show = rdtgroup_schemata_show, 2081 .fflags = RFTYPE_CTRL_BASE, 2082 }, 2083 { 2084 .name = "mba_MBps_event", 2085 .mode = 0644, 2086 .kf_ops = &rdtgroup_kf_single_ops, 2087 .write = rdtgroup_mba_mbps_event_write, 2088 .seq_show = rdtgroup_mba_mbps_event_show, 2089 }, 2090 { 2091 .name = "mode", 2092 .mode = 0644, 2093 .kf_ops = &rdtgroup_kf_single_ops, 2094 .write = rdtgroup_mode_write, 2095 .seq_show = rdtgroup_mode_show, 2096 .fflags = RFTYPE_CTRL_BASE, 2097 }, 2098 { 2099 .name = "size", 2100 .mode = 0444, 2101 .kf_ops = &rdtgroup_kf_single_ops, 2102 .seq_show = rdtgroup_size_show, 2103 .fflags = RFTYPE_CTRL_BASE, 2104 }, 2105 { 2106 .name = "sparse_masks", 2107 .mode = 0444, 2108 .kf_ops = &rdtgroup_kf_single_ops, 2109 .seq_show = rdt_has_sparse_bitmasks_show, 2110 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, 2111 }, 2112 { 2113 .name = "ctrl_hw_id", 2114 .mode = 0444, 2115 .kf_ops = &rdtgroup_kf_single_ops, 2116 .seq_show = rdtgroup_closid_show, 2117 .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, 2118 }, 2119 }; 2120 2121 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 2122 { 2123 struct rftype *rfts, *rft; 2124 int ret, len; 2125 2126 rfts = res_common_files; 2127 len = ARRAY_SIZE(res_common_files); 2128 2129 lockdep_assert_held(&rdtgroup_mutex); 2130 2131 if (resctrl_debug) 2132 fflags |= RFTYPE_DEBUG; 2133 2134 for (rft = rfts; rft < rfts + len; rft++) { 2135 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { 2136 ret = rdtgroup_add_file(kn, rft); 2137 if (ret) 2138 goto error; 2139 } 2140 } 2141 2142 return 0; 2143 error: 2144 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 2145 while (--rft >= rfts) { 2146 if ((fflags & rft->fflags) == rft->fflags) 2147 kernfs_remove_by_name(kn, rft->name); 2148 } 2149 return ret; 2150 } 2151 2152 static struct rftype *rdtgroup_get_rftype_by_name(const char *name) 2153 { 2154 struct rftype *rfts, *rft; 2155 int len; 2156 2157 rfts = res_common_files; 2158 len = ARRAY_SIZE(res_common_files); 2159 2160 for (rft = rfts; rft < rfts + len; rft++) { 2161 if (!strcmp(rft->name, name)) 2162 return rft; 2163 } 2164 2165 return NULL; 2166 } 2167 2168 static void thread_throttle_mode_init(void) 2169 { 2170 enum membw_throttle_mode throttle_mode = THREAD_THROTTLE_UNDEFINED; 2171 struct rdt_resource *r_mba, *r_smba; 2172 2173 r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 2174 if (r_mba->alloc_capable && 2175 r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED) 2176 throttle_mode = r_mba->membw.throttle_mode; 2177 2178 r_smba = resctrl_arch_get_resource(RDT_RESOURCE_SMBA); 2179 if (r_smba->alloc_capable && 2180 r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED) 2181 throttle_mode = r_smba->membw.throttle_mode; 2182 2183 if (throttle_mode == THREAD_THROTTLE_UNDEFINED) 2184 return; 2185 2186 resctrl_file_fflags_init("thread_throttle_mode", 2187 RFTYPE_CTRL_INFO | RFTYPE_RES_MB); 2188 } 2189 2190 /* 2191 * The resctrl file "io_alloc" is added using L3 resource. However, it results 2192 * in this file being visible for *all* cache resources (eg. L2 cache), 2193 * whether it supports "io_alloc" or not. 2194 */ 2195 static void io_alloc_init(void) 2196 { 2197 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); 2198 2199 if (r->cache.io_alloc_capable) { 2200 resctrl_file_fflags_init("io_alloc", RFTYPE_CTRL_INFO | 2201 RFTYPE_RES_CACHE); 2202 resctrl_file_fflags_init("io_alloc_cbm", 2203 RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE); 2204 } 2205 } 2206 2207 void resctrl_file_fflags_init(const char *config, unsigned long fflags) 2208 { 2209 struct rftype *rft; 2210 2211 rft = rdtgroup_get_rftype_by_name(config); 2212 if (rft) 2213 rft->fflags = fflags; 2214 } 2215 2216 /** 2217 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 2218 * @r: The resource group with which the file is associated. 2219 * @name: Name of the file 2220 * 2221 * The permissions of named resctrl file, directory, or link are modified 2222 * to not allow read, write, or execute by any user. 2223 * 2224 * WARNING: This function is intended to communicate to the user that the 2225 * resctrl file has been locked down - that it is not relevant to the 2226 * particular state the system finds itself in. It should not be relied 2227 * on to protect from user access because after the file's permissions 2228 * are restricted the user can still change the permissions using chmod 2229 * from the command line. 2230 * 2231 * Return: 0 on success, <0 on failure. 2232 */ 2233 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 2234 { 2235 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 2236 struct kernfs_node *kn; 2237 int ret = 0; 2238 2239 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 2240 if (!kn) 2241 return -ENOENT; 2242 2243 switch (kernfs_type(kn)) { 2244 case KERNFS_DIR: 2245 iattr.ia_mode = S_IFDIR; 2246 break; 2247 case KERNFS_FILE: 2248 iattr.ia_mode = S_IFREG; 2249 break; 2250 case KERNFS_LINK: 2251 iattr.ia_mode = S_IFLNK; 2252 break; 2253 } 2254 2255 ret = kernfs_setattr(kn, &iattr); 2256 kernfs_put(kn); 2257 return ret; 2258 } 2259 2260 /** 2261 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 2262 * @r: The resource group with which the file is associated. 2263 * @name: Name of the file 2264 * @mask: Mask of permissions that should be restored 2265 * 2266 * Restore the permissions of the named file. If @name is a directory the 2267 * permissions of its parent will be used. 2268 * 2269 * Return: 0 on success, <0 on failure. 2270 */ 2271 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 2272 umode_t mask) 2273 { 2274 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 2275 struct kernfs_node *kn, *parent; 2276 struct rftype *rfts, *rft; 2277 int ret, len; 2278 2279 rfts = res_common_files; 2280 len = ARRAY_SIZE(res_common_files); 2281 2282 for (rft = rfts; rft < rfts + len; rft++) { 2283 if (!strcmp(rft->name, name)) 2284 iattr.ia_mode = rft->mode & mask; 2285 } 2286 2287 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 2288 if (!kn) 2289 return -ENOENT; 2290 2291 switch (kernfs_type(kn)) { 2292 case KERNFS_DIR: 2293 parent = kernfs_get_parent(kn); 2294 if (parent) { 2295 iattr.ia_mode |= parent->mode; 2296 kernfs_put(parent); 2297 } 2298 iattr.ia_mode |= S_IFDIR; 2299 break; 2300 case KERNFS_FILE: 2301 iattr.ia_mode |= S_IFREG; 2302 break; 2303 case KERNFS_LINK: 2304 iattr.ia_mode |= S_IFLNK; 2305 break; 2306 } 2307 2308 ret = kernfs_setattr(kn, &iattr); 2309 kernfs_put(kn); 2310 return ret; 2311 } 2312 2313 static int resctrl_mkdir_event_configs(struct rdt_resource *r, struct kernfs_node *l3_mon_kn) 2314 { 2315 struct kernfs_node *kn_subdir, *kn_subdir2; 2316 struct mon_evt *mevt; 2317 int ret; 2318 2319 kn_subdir = kernfs_create_dir(l3_mon_kn, "event_configs", l3_mon_kn->mode, NULL); 2320 if (IS_ERR(kn_subdir)) 2321 return PTR_ERR(kn_subdir); 2322 2323 ret = rdtgroup_kn_set_ugid(kn_subdir); 2324 if (ret) 2325 return ret; 2326 2327 for_each_mon_event(mevt) { 2328 if (mevt->rid != r->rid || !mevt->enabled || !resctrl_is_mbm_event(mevt->evtid)) 2329 continue; 2330 2331 kn_subdir2 = kernfs_create_dir(kn_subdir, mevt->name, kn_subdir->mode, mevt); 2332 if (IS_ERR(kn_subdir2)) { 2333 ret = PTR_ERR(kn_subdir2); 2334 goto out; 2335 } 2336 2337 ret = rdtgroup_kn_set_ugid(kn_subdir2); 2338 if (ret) 2339 goto out; 2340 2341 ret = rdtgroup_add_files(kn_subdir2, RFTYPE_ASSIGN_CONFIG); 2342 if (ret) 2343 break; 2344 } 2345 2346 out: 2347 return ret; 2348 } 2349 2350 static int rdtgroup_mkdir_info_resdir(void *priv, char *name, 2351 unsigned long fflags) 2352 { 2353 struct kernfs_node *kn_subdir; 2354 struct rdt_resource *r; 2355 int ret; 2356 2357 kn_subdir = kernfs_create_dir(kn_info, name, 2358 kn_info->mode, priv); 2359 if (IS_ERR(kn_subdir)) 2360 return PTR_ERR(kn_subdir); 2361 2362 ret = rdtgroup_kn_set_ugid(kn_subdir); 2363 if (ret) 2364 return ret; 2365 2366 ret = rdtgroup_add_files(kn_subdir, fflags); 2367 if (ret) 2368 return ret; 2369 2370 if ((fflags & RFTYPE_MON_INFO) == RFTYPE_MON_INFO) { 2371 r = priv; 2372 if (r->mon.mbm_cntr_assignable) { 2373 ret = resctrl_mkdir_event_configs(r, kn_subdir); 2374 if (ret) 2375 return ret; 2376 /* 2377 * Hide BMEC related files if mbm_event mode 2378 * is enabled. 2379 */ 2380 if (resctrl_arch_mbm_cntr_assign_enabled(r)) 2381 resctrl_bmec_files_show(r, kn_subdir, false); 2382 } 2383 } 2384 2385 kernfs_activate(kn_subdir); 2386 2387 return ret; 2388 } 2389 2390 static unsigned long fflags_from_resource(struct rdt_resource *r) 2391 { 2392 switch (r->rid) { 2393 case RDT_RESOURCE_L3: 2394 case RDT_RESOURCE_L2: 2395 return RFTYPE_RES_CACHE; 2396 case RDT_RESOURCE_MBA: 2397 case RDT_RESOURCE_SMBA: 2398 return RFTYPE_RES_MB; 2399 case RDT_RESOURCE_PERF_PKG: 2400 return RFTYPE_RES_PERF_PKG; 2401 } 2402 2403 return WARN_ON_ONCE(1); 2404 } 2405 2406 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 2407 { 2408 struct resctrl_schema *s; 2409 struct rdt_resource *r; 2410 unsigned long fflags; 2411 char name[32]; 2412 int ret; 2413 2414 /* create the directory */ 2415 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 2416 if (IS_ERR(kn_info)) 2417 return PTR_ERR(kn_info); 2418 2419 ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); 2420 if (ret) 2421 goto out_destroy; 2422 2423 /* loop over enabled controls, these are all alloc_capable */ 2424 list_for_each_entry(s, &resctrl_schema_all, list) { 2425 r = s->res; 2426 fflags = fflags_from_resource(r) | RFTYPE_CTRL_INFO; 2427 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); 2428 if (ret) 2429 goto out_destroy; 2430 } 2431 2432 for_each_mon_capable_rdt_resource(r) { 2433 fflags = fflags_from_resource(r) | RFTYPE_MON_INFO; 2434 sprintf(name, "%s_MON", r->name); 2435 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 2436 if (ret) 2437 goto out_destroy; 2438 } 2439 2440 ret = rdtgroup_kn_set_ugid(kn_info); 2441 if (ret) 2442 goto out_destroy; 2443 2444 kernfs_activate(kn_info); 2445 2446 return 0; 2447 2448 out_destroy: 2449 kernfs_remove(kn_info); 2450 return ret; 2451 } 2452 2453 static int 2454 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 2455 char *name, struct kernfs_node **dest_kn) 2456 { 2457 struct kernfs_node *kn; 2458 int ret; 2459 2460 /* create the directory */ 2461 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2462 if (IS_ERR(kn)) 2463 return PTR_ERR(kn); 2464 2465 if (dest_kn) 2466 *dest_kn = kn; 2467 2468 ret = rdtgroup_kn_set_ugid(kn); 2469 if (ret) 2470 goto out_destroy; 2471 2472 kernfs_activate(kn); 2473 2474 return 0; 2475 2476 out_destroy: 2477 kernfs_remove(kn); 2478 return ret; 2479 } 2480 2481 static inline bool is_mba_linear(void) 2482 { 2483 return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; 2484 } 2485 2486 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d) 2487 { 2488 u32 num_closid = resctrl_arch_get_num_closid(r); 2489 int cpu = cpumask_any(&d->hdr.cpu_mask); 2490 int i; 2491 2492 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), 2493 GFP_KERNEL, cpu_to_node(cpu)); 2494 if (!d->mbps_val) 2495 return -ENOMEM; 2496 2497 for (i = 0; i < num_closid; i++) 2498 d->mbps_val[i] = MBA_MAX_MBPS; 2499 2500 return 0; 2501 } 2502 2503 static void mba_sc_domain_destroy(struct rdt_resource *r, 2504 struct rdt_ctrl_domain *d) 2505 { 2506 kfree(d->mbps_val); 2507 d->mbps_val = NULL; 2508 } 2509 2510 /* 2511 * MBA software controller is supported only if 2512 * MBM is supported and MBA is in linear scale, 2513 * and the MBM monitor scope is the same as MBA 2514 * control scope. 2515 */ 2516 static bool supports_mba_mbps(void) 2517 { 2518 struct rdt_resource *rmbm = resctrl_arch_get_resource(RDT_RESOURCE_L3); 2519 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 2520 2521 return (resctrl_is_mbm_enabled() && 2522 r->alloc_capable && is_mba_linear() && 2523 r->ctrl_scope == rmbm->mon_scope); 2524 } 2525 2526 /* 2527 * Enable or disable the MBA software controller 2528 * which helps user specify bandwidth in MBps. 2529 */ 2530 static int set_mba_sc(bool mba_sc) 2531 { 2532 struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); 2533 u32 num_closid = resctrl_arch_get_num_closid(r); 2534 struct rdt_ctrl_domain *d; 2535 unsigned long fflags; 2536 int i; 2537 2538 if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) 2539 return -EINVAL; 2540 2541 r->membw.mba_sc = mba_sc; 2542 2543 rdtgroup_default.mba_mbps_event = mba_mbps_default_event; 2544 2545 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 2546 for (i = 0; i < num_closid; i++) 2547 d->mbps_val[i] = MBA_MAX_MBPS; 2548 } 2549 2550 fflags = mba_sc ? RFTYPE_CTRL_BASE | RFTYPE_MON_BASE : 0; 2551 resctrl_file_fflags_init("mba_MBps_event", fflags); 2552 2553 return 0; 2554 } 2555 2556 /* 2557 * We don't allow rdtgroup directories to be created anywhere 2558 * except the root directory. Thus when looking for the rdtgroup 2559 * structure for a kernfs node we are either looking at a directory, 2560 * in which case the rdtgroup structure is pointed at by the "priv" 2561 * field, otherwise we have a file, and need only look to the parent 2562 * to find the rdtgroup. 2563 */ 2564 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 2565 { 2566 if (kernfs_type(kn) == KERNFS_DIR) { 2567 /* 2568 * All the resource directories use "kn->priv" 2569 * to point to the "struct rdtgroup" for the 2570 * resource. "info" and its subdirectories don't 2571 * have rdtgroup structures, so return NULL here. 2572 */ 2573 if (kn == kn_info || 2574 rcu_access_pointer(kn->__parent) == kn_info) 2575 return NULL; 2576 else 2577 return kn->priv; 2578 } else { 2579 return rdt_kn_parent_priv(kn); 2580 } 2581 } 2582 2583 static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) 2584 { 2585 atomic_inc(&rdtgrp->waitcount); 2586 kernfs_break_active_protection(kn); 2587 } 2588 2589 static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) 2590 { 2591 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2592 (rdtgrp->flags & RDT_DELETED)) { 2593 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2594 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2595 rdtgroup_pseudo_lock_remove(rdtgrp); 2596 kernfs_unbreak_active_protection(kn); 2597 rdtgroup_remove(rdtgrp); 2598 } else { 2599 kernfs_unbreak_active_protection(kn); 2600 } 2601 } 2602 2603 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 2604 { 2605 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2606 2607 if (!rdtgrp) 2608 return NULL; 2609 2610 rdtgroup_kn_get(rdtgrp, kn); 2611 2612 cpus_read_lock(); 2613 mutex_lock(&rdtgroup_mutex); 2614 2615 /* Was this group deleted while we waited? */ 2616 if (rdtgrp->flags & RDT_DELETED) 2617 return NULL; 2618 2619 return rdtgrp; 2620 } 2621 2622 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2623 { 2624 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2625 2626 if (!rdtgrp) 2627 return; 2628 2629 mutex_unlock(&rdtgroup_mutex); 2630 cpus_read_unlock(); 2631 2632 rdtgroup_kn_put(rdtgrp, kn); 2633 } 2634 2635 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2636 struct rdtgroup *prgrp, 2637 struct kernfs_node **mon_data_kn); 2638 2639 static void rdt_disable_ctx(void) 2640 { 2641 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); 2642 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); 2643 set_mba_sc(false); 2644 2645 resctrl_debug = false; 2646 } 2647 2648 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2649 { 2650 int ret = 0; 2651 2652 if (ctx->enable_cdpl2) { 2653 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); 2654 if (ret) 2655 goto out_done; 2656 } 2657 2658 if (ctx->enable_cdpl3) { 2659 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); 2660 if (ret) 2661 goto out_cdpl2; 2662 } 2663 2664 if (ctx->enable_mba_mbps) { 2665 ret = set_mba_sc(true); 2666 if (ret) 2667 goto out_cdpl3; 2668 } 2669 2670 if (ctx->enable_debug) 2671 resctrl_debug = true; 2672 2673 return 0; 2674 2675 out_cdpl3: 2676 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); 2677 out_cdpl2: 2678 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); 2679 out_done: 2680 return ret; 2681 } 2682 2683 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) 2684 { 2685 struct resctrl_schema *s; 2686 const char *suffix = ""; 2687 int ret, cl; 2688 2689 s = kzalloc(sizeof(*s), GFP_KERNEL); 2690 if (!s) 2691 return -ENOMEM; 2692 2693 s->res = r; 2694 s->num_closid = resctrl_arch_get_num_closid(r); 2695 if (resctrl_arch_get_cdp_enabled(r->rid)) 2696 s->num_closid /= 2; 2697 2698 s->conf_type = type; 2699 switch (type) { 2700 case CDP_CODE: 2701 suffix = "CODE"; 2702 break; 2703 case CDP_DATA: 2704 suffix = "DATA"; 2705 break; 2706 case CDP_NONE: 2707 suffix = ""; 2708 break; 2709 } 2710 2711 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); 2712 if (ret >= sizeof(s->name)) { 2713 kfree(s); 2714 return -EINVAL; 2715 } 2716 2717 cl = strlen(s->name); 2718 2719 /* 2720 * If CDP is supported by this resource, but not enabled, 2721 * include the suffix. This ensures the tabular format of the 2722 * schemata file does not change between mounts of the filesystem. 2723 */ 2724 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) 2725 cl += 4; 2726 2727 if (cl > max_name_width) 2728 max_name_width = cl; 2729 2730 switch (r->schema_fmt) { 2731 case RESCTRL_SCHEMA_BITMAP: 2732 s->fmt_str = "%d=%x"; 2733 break; 2734 case RESCTRL_SCHEMA_RANGE: 2735 s->fmt_str = "%d=%u"; 2736 break; 2737 } 2738 2739 if (WARN_ON_ONCE(!s->fmt_str)) { 2740 kfree(s); 2741 return -EINVAL; 2742 } 2743 2744 INIT_LIST_HEAD(&s->list); 2745 list_add(&s->list, &resctrl_schema_all); 2746 2747 return 0; 2748 } 2749 2750 static int schemata_list_create(void) 2751 { 2752 struct rdt_resource *r; 2753 int ret = 0; 2754 2755 for_each_alloc_capable_rdt_resource(r) { 2756 if (resctrl_arch_get_cdp_enabled(r->rid)) { 2757 ret = schemata_list_add(r, CDP_CODE); 2758 if (ret) 2759 break; 2760 2761 ret = schemata_list_add(r, CDP_DATA); 2762 } else { 2763 ret = schemata_list_add(r, CDP_NONE); 2764 } 2765 2766 if (ret) 2767 break; 2768 } 2769 2770 return ret; 2771 } 2772 2773 static void schemata_list_destroy(void) 2774 { 2775 struct resctrl_schema *s, *tmp; 2776 2777 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { 2778 list_del(&s->list); 2779 kfree(s); 2780 } 2781 } 2782 2783 static int rdt_get_tree(struct fs_context *fc) 2784 { 2785 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2786 unsigned long flags = RFTYPE_CTRL_BASE; 2787 struct rdt_l3_mon_domain *dom; 2788 struct rdt_resource *r; 2789 int ret; 2790 2791 DO_ONCE_SLEEPABLE(resctrl_arch_pre_mount); 2792 2793 cpus_read_lock(); 2794 mutex_lock(&rdtgroup_mutex); 2795 /* 2796 * resctrl file system can only be mounted once. 2797 */ 2798 if (resctrl_mounted) { 2799 ret = -EBUSY; 2800 goto out; 2801 } 2802 2803 ret = rdtgroup_setup_root(ctx); 2804 if (ret) 2805 goto out; 2806 2807 ret = rdt_enable_ctx(ctx); 2808 if (ret) 2809 goto out_root; 2810 2811 ret = schemata_list_create(); 2812 if (ret) 2813 goto out_schemata_free; 2814 2815 ret = closid_init(); 2816 if (ret) 2817 goto out_schemata_free; 2818 2819 if (resctrl_arch_mon_capable()) 2820 flags |= RFTYPE_MON; 2821 2822 ret = rdtgroup_add_files(rdtgroup_default.kn, flags); 2823 if (ret) 2824 goto out_closid_exit; 2825 2826 kernfs_activate(rdtgroup_default.kn); 2827 2828 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2829 if (ret < 0) 2830 goto out_closid_exit; 2831 2832 if (resctrl_arch_mon_capable()) { 2833 ret = mongroup_create_dir(rdtgroup_default.kn, 2834 &rdtgroup_default, "mon_groups", 2835 &kn_mongrp); 2836 if (ret < 0) 2837 goto out_info; 2838 2839 rdtgroup_assign_cntrs(&rdtgroup_default); 2840 2841 ret = mkdir_mondata_all(rdtgroup_default.kn, 2842 &rdtgroup_default, &kn_mondata); 2843 if (ret < 0) 2844 goto out_mongrp; 2845 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2846 } 2847 2848 ret = rdt_pseudo_lock_init(); 2849 if (ret) 2850 goto out_mondata; 2851 2852 ret = kernfs_get_tree(fc); 2853 if (ret < 0) 2854 goto out_psl; 2855 2856 if (resctrl_arch_alloc_capable()) 2857 resctrl_arch_enable_alloc(); 2858 if (resctrl_arch_mon_capable()) 2859 resctrl_arch_enable_mon(); 2860 2861 if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) 2862 resctrl_mounted = true; 2863 2864 if (resctrl_is_mbm_enabled()) { 2865 r = resctrl_arch_get_resource(RDT_RESOURCE_L3); 2866 list_for_each_entry(dom, &r->mon_domains, hdr.list) 2867 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, 2868 RESCTRL_PICK_ANY_CPU); 2869 } 2870 2871 goto out; 2872 2873 out_psl: 2874 rdt_pseudo_lock_release(); 2875 out_mondata: 2876 if (resctrl_arch_mon_capable()) 2877 kernfs_remove(kn_mondata); 2878 out_mongrp: 2879 if (resctrl_arch_mon_capable()) { 2880 rdtgroup_unassign_cntrs(&rdtgroup_default); 2881 kernfs_remove(kn_mongrp); 2882 } 2883 out_info: 2884 kernfs_remove(kn_info); 2885 out_closid_exit: 2886 closid_exit(); 2887 out_schemata_free: 2888 schemata_list_destroy(); 2889 rdt_disable_ctx(); 2890 out_root: 2891 rdtgroup_destroy_root(); 2892 out: 2893 rdt_last_cmd_clear(); 2894 mutex_unlock(&rdtgroup_mutex); 2895 cpus_read_unlock(); 2896 return ret; 2897 } 2898 2899 enum rdt_param { 2900 Opt_cdp, 2901 Opt_cdpl2, 2902 Opt_mba_mbps, 2903 Opt_debug, 2904 nr__rdt_params 2905 }; 2906 2907 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2908 fsparam_flag("cdp", Opt_cdp), 2909 fsparam_flag("cdpl2", Opt_cdpl2), 2910 fsparam_flag("mba_MBps", Opt_mba_mbps), 2911 fsparam_flag("debug", Opt_debug), 2912 {} 2913 }; 2914 2915 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2916 { 2917 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2918 struct fs_parse_result result; 2919 const char *msg; 2920 int opt; 2921 2922 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2923 if (opt < 0) 2924 return opt; 2925 2926 switch (opt) { 2927 case Opt_cdp: 2928 ctx->enable_cdpl3 = true; 2929 return 0; 2930 case Opt_cdpl2: 2931 ctx->enable_cdpl2 = true; 2932 return 0; 2933 case Opt_mba_mbps: 2934 msg = "mba_MBps requires MBM and linear scale MBA at L3 scope"; 2935 if (!supports_mba_mbps()) 2936 return invalfc(fc, msg); 2937 ctx->enable_mba_mbps = true; 2938 return 0; 2939 case Opt_debug: 2940 ctx->enable_debug = true; 2941 return 0; 2942 } 2943 2944 return -EINVAL; 2945 } 2946 2947 static void rdt_fs_context_free(struct fs_context *fc) 2948 { 2949 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2950 2951 kernfs_free_fs_context(fc); 2952 kfree(ctx); 2953 } 2954 2955 static const struct fs_context_operations rdt_fs_context_ops = { 2956 .free = rdt_fs_context_free, 2957 .parse_param = rdt_parse_param, 2958 .get_tree = rdt_get_tree, 2959 }; 2960 2961 static int rdt_init_fs_context(struct fs_context *fc) 2962 { 2963 struct rdt_fs_context *ctx; 2964 2965 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2966 if (!ctx) 2967 return -ENOMEM; 2968 2969 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2970 fc->fs_private = &ctx->kfc; 2971 fc->ops = &rdt_fs_context_ops; 2972 put_user_ns(fc->user_ns); 2973 fc->user_ns = get_user_ns(&init_user_ns); 2974 fc->global = true; 2975 return 0; 2976 } 2977 2978 /* 2979 * Move tasks from one to the other group. If @from is NULL, then all tasks 2980 * in the systems are moved unconditionally (used for teardown). 2981 * 2982 * If @mask is not NULL the cpus on which moved tasks are running are set 2983 * in that mask so the update smp function call is restricted to affected 2984 * cpus. 2985 */ 2986 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2987 struct cpumask *mask) 2988 { 2989 struct task_struct *p, *t; 2990 2991 read_lock(&tasklist_lock); 2992 for_each_process_thread(p, t) { 2993 if (!from || is_closid_match(t, from) || 2994 is_rmid_match(t, from)) { 2995 resctrl_arch_set_closid_rmid(t, to->closid, 2996 to->mon.rmid); 2997 2998 /* 2999 * Order the closid/rmid stores above before the loads 3000 * in task_curr(). This pairs with the full barrier 3001 * between the rq->curr update and 3002 * resctrl_arch_sched_in() during context switch. 3003 */ 3004 smp_mb(); 3005 3006 /* 3007 * If the task is on a CPU, set the CPU in the mask. 3008 * The detection is inaccurate as tasks might move or 3009 * schedule before the smp function call takes place. 3010 * In such a case the function call is pointless, but 3011 * there is no other side effect. 3012 */ 3013 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) 3014 cpumask_set_cpu(task_cpu(t), mask); 3015 } 3016 } 3017 read_unlock(&tasklist_lock); 3018 } 3019 3020 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 3021 { 3022 struct rdtgroup *sentry, *stmp; 3023 struct list_head *head; 3024 3025 head = &rdtgrp->mon.crdtgrp_list; 3026 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 3027 rdtgroup_unassign_cntrs(sentry); 3028 free_rmid(sentry->closid, sentry->mon.rmid); 3029 list_del(&sentry->mon.crdtgrp_list); 3030 3031 if (atomic_read(&sentry->waitcount) != 0) 3032 sentry->flags = RDT_DELETED; 3033 else 3034 rdtgroup_remove(sentry); 3035 } 3036 } 3037 3038 /* 3039 * Forcibly remove all of subdirectories under root. 3040 */ 3041 static void rmdir_all_sub(void) 3042 { 3043 struct rdtgroup *rdtgrp, *tmp; 3044 3045 /* Move all tasks to the default resource group */ 3046 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 3047 3048 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 3049 /* Free any child rmids */ 3050 free_all_child_rdtgrp(rdtgrp); 3051 3052 /* Remove each rdtgroup other than root */ 3053 if (rdtgrp == &rdtgroup_default) 3054 continue; 3055 3056 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3057 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 3058 rdtgroup_pseudo_lock_remove(rdtgrp); 3059 3060 /* 3061 * Give any CPUs back to the default group. We cannot copy 3062 * cpu_online_mask because a CPU might have executed the 3063 * offline callback already, but is still marked online. 3064 */ 3065 cpumask_or(&rdtgroup_default.cpu_mask, 3066 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3067 3068 rdtgroup_unassign_cntrs(rdtgrp); 3069 3070 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3071 3072 kernfs_remove(rdtgrp->kn); 3073 list_del(&rdtgrp->rdtgroup_list); 3074 3075 if (atomic_read(&rdtgrp->waitcount) != 0) 3076 rdtgrp->flags = RDT_DELETED; 3077 else 3078 rdtgroup_remove(rdtgrp); 3079 } 3080 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 3081 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 3082 3083 kernfs_remove(kn_info); 3084 kernfs_remove(kn_mongrp); 3085 kernfs_remove(kn_mondata); 3086 } 3087 3088 /** 3089 * mon_get_kn_priv() - Get the mon_data priv data for this event. 3090 * 3091 * The same values are used across the mon_data directories of all control and 3092 * monitor groups for the same event in the same domain. Keep a list of 3093 * allocated structures and re-use an existing one with the same values for 3094 * @rid, @domid, etc. 3095 * 3096 * @rid: The resource id for the event file being created. 3097 * @domid: The domain id for the event file being created. 3098 * @mevt: The type of event file being created. 3099 * @do_sum: Whether SNC summing monitors are being created. Only set 3100 * when @rid == RDT_RESOURCE_L3. 3101 */ 3102 static struct mon_data *mon_get_kn_priv(enum resctrl_res_level rid, int domid, 3103 struct mon_evt *mevt, 3104 bool do_sum) 3105 { 3106 struct mon_data *priv; 3107 3108 lockdep_assert_held(&rdtgroup_mutex); 3109 3110 list_for_each_entry(priv, &mon_data_kn_priv_list, list) { 3111 if (priv->rid == rid && priv->domid == domid && 3112 priv->sum == do_sum && priv->evt == mevt) 3113 return priv; 3114 } 3115 3116 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3117 if (!priv) 3118 return NULL; 3119 3120 priv->rid = rid; 3121 priv->domid = domid; 3122 priv->sum = do_sum; 3123 priv->evt = mevt; 3124 list_add_tail(&priv->list, &mon_data_kn_priv_list); 3125 3126 return priv; 3127 } 3128 3129 /** 3130 * mon_put_kn_priv() - Free all allocated mon_data structures. 3131 * 3132 * Called when resctrl file system is unmounted. 3133 */ 3134 static void mon_put_kn_priv(void) 3135 { 3136 struct mon_data *priv, *tmp; 3137 3138 lockdep_assert_held(&rdtgroup_mutex); 3139 3140 list_for_each_entry_safe(priv, tmp, &mon_data_kn_priv_list, list) { 3141 list_del(&priv->list); 3142 kfree(priv); 3143 } 3144 } 3145 3146 static void resctrl_fs_teardown(void) 3147 { 3148 lockdep_assert_held(&rdtgroup_mutex); 3149 3150 /* Cleared by rdtgroup_destroy_root() */ 3151 if (!rdtgroup_default.kn) 3152 return; 3153 3154 rmdir_all_sub(); 3155 rdtgroup_unassign_cntrs(&rdtgroup_default); 3156 mon_put_kn_priv(); 3157 rdt_pseudo_lock_release(); 3158 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 3159 closid_exit(); 3160 schemata_list_destroy(); 3161 rdtgroup_destroy_root(); 3162 } 3163 3164 static void rdt_kill_sb(struct super_block *sb) 3165 { 3166 struct rdt_resource *r; 3167 3168 cpus_read_lock(); 3169 mutex_lock(&rdtgroup_mutex); 3170 3171 rdt_disable_ctx(); 3172 3173 /* Put everything back to default values. */ 3174 for_each_alloc_capable_rdt_resource(r) 3175 resctrl_arch_reset_all_ctrls(r); 3176 3177 resctrl_fs_teardown(); 3178 if (resctrl_arch_alloc_capable()) 3179 resctrl_arch_disable_alloc(); 3180 if (resctrl_arch_mon_capable()) 3181 resctrl_arch_disable_mon(); 3182 resctrl_mounted = false; 3183 kernfs_kill_sb(sb); 3184 mutex_unlock(&rdtgroup_mutex); 3185 cpus_read_unlock(); 3186 } 3187 3188 static struct file_system_type rdt_fs_type = { 3189 .name = "resctrl", 3190 .init_fs_context = rdt_init_fs_context, 3191 .parameters = rdt_fs_parameters, 3192 .kill_sb = rdt_kill_sb, 3193 }; 3194 3195 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 3196 void *priv) 3197 { 3198 struct kernfs_node *kn; 3199 int ret = 0; 3200 3201 kn = __kernfs_create_file(parent_kn, name, 0444, 3202 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 3203 &kf_mondata_ops, priv, NULL, NULL); 3204 if (IS_ERR(kn)) 3205 return PTR_ERR(kn); 3206 3207 ret = rdtgroup_kn_set_ugid(kn); 3208 if (ret) { 3209 kernfs_remove(kn); 3210 return ret; 3211 } 3212 3213 return ret; 3214 } 3215 3216 static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname) 3217 { 3218 struct kernfs_node *kn; 3219 3220 kn = kernfs_find_and_get(pkn, name); 3221 if (!kn) 3222 return; 3223 kernfs_put(kn); 3224 3225 if (kn->dir.subdirs <= 1) 3226 kernfs_remove(kn); 3227 else 3228 kernfs_remove_by_name(kn, subname); 3229 } 3230 3231 /* 3232 * Remove all subdirectories of mon_data of ctrl_mon groups 3233 * and monitor groups for the given domain. 3234 * Remove files and directories containing "sum" of domain data 3235 * when last domain being summed is removed. 3236 */ 3237 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 3238 struct rdt_domain_hdr *hdr) 3239 { 3240 struct rdtgroup *prgrp, *crgrp; 3241 struct rdt_l3_mon_domain *d; 3242 char subname[32]; 3243 bool snc_mode; 3244 char name[32]; 3245 3246 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) 3247 return; 3248 3249 d = container_of(hdr, struct rdt_l3_mon_domain, hdr); 3250 snc_mode = r->mon_scope == RESCTRL_L3_NODE; 3251 sprintf(name, "mon_%s_%02d", r->name, snc_mode ? d->ci_id : hdr->id); 3252 if (snc_mode) 3253 sprintf(subname, "mon_sub_%s_%02d", r->name, hdr->id); 3254 3255 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 3256 mon_rmdir_one_subdir(prgrp->mon.mon_data_kn, name, subname); 3257 3258 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 3259 mon_rmdir_one_subdir(crgrp->mon.mon_data_kn, name, subname); 3260 } 3261 } 3262 3263 /* 3264 * Create a directory for a domain and populate it with monitor files. Create 3265 * summing monitors when @hdr is NULL. No need to initialize summing monitors. 3266 */ 3267 static struct kernfs_node *_mkdir_mondata_subdir(struct kernfs_node *parent_kn, char *name, 3268 struct rdt_domain_hdr *hdr, 3269 struct rdt_resource *r, 3270 struct rdtgroup *prgrp, int domid) 3271 { 3272 struct rmid_read rr = {0}; 3273 struct kernfs_node *kn; 3274 struct mon_data *priv; 3275 struct mon_evt *mevt; 3276 int ret; 3277 3278 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 3279 if (IS_ERR(kn)) 3280 return kn; 3281 3282 ret = rdtgroup_kn_set_ugid(kn); 3283 if (ret) 3284 goto out_destroy; 3285 3286 for_each_mon_event(mevt) { 3287 if (mevt->rid != r->rid || !mevt->enabled) 3288 continue; 3289 priv = mon_get_kn_priv(r->rid, domid, mevt, !hdr); 3290 if (WARN_ON_ONCE(!priv)) { 3291 ret = -EINVAL; 3292 goto out_destroy; 3293 } 3294 3295 ret = mon_addfile(kn, mevt->name, priv); 3296 if (ret) 3297 goto out_destroy; 3298 3299 if (hdr && resctrl_is_mbm_event(mevt->evtid)) 3300 mon_event_read(&rr, r, hdr, prgrp, &hdr->cpu_mask, mevt, true); 3301 } 3302 3303 return kn; 3304 out_destroy: 3305 kernfs_remove(kn); 3306 return ERR_PTR(ret); 3307 } 3308 3309 static int mkdir_mondata_subdir_snc(struct kernfs_node *parent_kn, 3310 struct rdt_domain_hdr *hdr, 3311 struct rdt_resource *r, struct rdtgroup *prgrp) 3312 { 3313 struct kernfs_node *ckn, *kn; 3314 struct rdt_l3_mon_domain *d; 3315 char name[32]; 3316 3317 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) 3318 return -EINVAL; 3319 3320 d = container_of(hdr, struct rdt_l3_mon_domain, hdr); 3321 sprintf(name, "mon_%s_%02d", r->name, d->ci_id); 3322 kn = kernfs_find_and_get(parent_kn, name); 3323 if (kn) { 3324 /* 3325 * rdtgroup_mutex will prevent this directory from being 3326 * removed. No need to keep this hold. 3327 */ 3328 kernfs_put(kn); 3329 } else { 3330 kn = _mkdir_mondata_subdir(parent_kn, name, NULL, r, prgrp, d->ci_id); 3331 if (IS_ERR(kn)) 3332 return PTR_ERR(kn); 3333 } 3334 3335 sprintf(name, "mon_sub_%s_%02d", r->name, hdr->id); 3336 ckn = _mkdir_mondata_subdir(kn, name, hdr, r, prgrp, hdr->id); 3337 if (IS_ERR(ckn)) { 3338 kernfs_remove(kn); 3339 return PTR_ERR(ckn); 3340 } 3341 3342 kernfs_activate(kn); 3343 return 0; 3344 } 3345 3346 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 3347 struct rdt_domain_hdr *hdr, 3348 struct rdt_resource *r, struct rdtgroup *prgrp) 3349 { 3350 struct kernfs_node *kn; 3351 char name[32]; 3352 3353 lockdep_assert_held(&rdtgroup_mutex); 3354 3355 if (r->rid == RDT_RESOURCE_L3 && r->mon_scope == RESCTRL_L3_NODE) 3356 return mkdir_mondata_subdir_snc(parent_kn, hdr, r, prgrp); 3357 3358 sprintf(name, "mon_%s_%02d", r->name, hdr->id); 3359 kn = _mkdir_mondata_subdir(parent_kn, name, hdr, r, prgrp, hdr->id); 3360 if (IS_ERR(kn)) 3361 return PTR_ERR(kn); 3362 3363 kernfs_activate(kn); 3364 return 0; 3365 } 3366 3367 /* 3368 * Add all subdirectories of mon_data for "ctrl_mon" groups 3369 * and "monitor" groups with given domain id. 3370 */ 3371 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 3372 struct rdt_domain_hdr *hdr) 3373 { 3374 struct kernfs_node *parent_kn; 3375 struct rdtgroup *prgrp, *crgrp; 3376 struct list_head *head; 3377 3378 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 3379 parent_kn = prgrp->mon.mon_data_kn; 3380 mkdir_mondata_subdir(parent_kn, hdr, r, prgrp); 3381 3382 head = &prgrp->mon.crdtgrp_list; 3383 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 3384 parent_kn = crgrp->mon.mon_data_kn; 3385 mkdir_mondata_subdir(parent_kn, hdr, r, crgrp); 3386 } 3387 } 3388 } 3389 3390 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 3391 struct rdt_resource *r, 3392 struct rdtgroup *prgrp) 3393 { 3394 struct rdt_domain_hdr *hdr; 3395 int ret; 3396 3397 /* Walking r->domains, ensure it can't race with cpuhp */ 3398 lockdep_assert_cpus_held(); 3399 3400 list_for_each_entry(hdr, &r->mon_domains, list) { 3401 ret = mkdir_mondata_subdir(parent_kn, hdr, r, prgrp); 3402 if (ret) 3403 return ret; 3404 } 3405 3406 return 0; 3407 } 3408 3409 /* 3410 * This creates a directory mon_data which contains the monitored data. 3411 * 3412 * mon_data has one directory for each domain which are named 3413 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 3414 * with L3 domain looks as below: 3415 * ./mon_data: 3416 * mon_L3_00 3417 * mon_L3_01 3418 * mon_L3_02 3419 * ... 3420 * 3421 * Each domain directory has one file per event: 3422 * ./mon_L3_00/: 3423 * llc_occupancy 3424 * 3425 */ 3426 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 3427 struct rdtgroup *prgrp, 3428 struct kernfs_node **dest_kn) 3429 { 3430 struct rdt_resource *r; 3431 struct kernfs_node *kn; 3432 int ret; 3433 3434 /* 3435 * Create the mon_data directory first. 3436 */ 3437 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 3438 if (ret) 3439 return ret; 3440 3441 if (dest_kn) 3442 *dest_kn = kn; 3443 3444 /* 3445 * Create the subdirectories for each domain. Note that all events 3446 * in a domain like L3 are grouped into a resource whose domain is L3 3447 */ 3448 for_each_mon_capable_rdt_resource(r) { 3449 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 3450 if (ret) 3451 goto out_destroy; 3452 } 3453 3454 return 0; 3455 3456 out_destroy: 3457 kernfs_remove(kn); 3458 return ret; 3459 } 3460 3461 /** 3462 * cbm_ensure_valid - Enforce validity on provided CBM 3463 * @_val: Candidate CBM 3464 * @r: RDT resource to which the CBM belongs 3465 * 3466 * The provided CBM represents all cache portions available for use. This 3467 * may be represented by a bitmap that does not consist of contiguous ones 3468 * and thus be an invalid CBM. 3469 * Here the provided CBM is forced to be a valid CBM by only considering 3470 * the first set of contiguous bits as valid and clearing all bits. 3471 * The intention here is to provide a valid default CBM with which a new 3472 * resource group is initialized. The user can follow this with a 3473 * modification to the CBM if the default does not satisfy the 3474 * requirements. 3475 */ 3476 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 3477 { 3478 unsigned int cbm_len = r->cache.cbm_len; 3479 unsigned long first_bit, zero_bit; 3480 unsigned long val; 3481 3482 if (!_val || r->cache.arch_has_sparse_bitmasks) 3483 return _val; 3484 3485 val = _val; 3486 first_bit = find_first_bit(&val, cbm_len); 3487 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 3488 3489 /* Clear any remaining bits to ensure contiguous region */ 3490 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 3491 return (u32)val; 3492 } 3493 3494 /* 3495 * Initialize cache resources per RDT domain 3496 * 3497 * Set the RDT domain up to start off with all usable allocations. That is, 3498 * all shareable and unused bits. All-zero CBM is invalid. 3499 */ 3500 static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s, 3501 u32 closid) 3502 { 3503 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 3504 enum resctrl_conf_type t = s->conf_type; 3505 struct resctrl_staged_config *cfg; 3506 struct rdt_resource *r = s->res; 3507 u32 used_b = 0, unused_b = 0; 3508 unsigned long tmp_cbm; 3509 enum rdtgrp_mode mode; 3510 u32 peer_ctl, ctrl_val; 3511 int i; 3512 3513 cfg = &d->staged_config[t]; 3514 cfg->have_new_ctrl = false; 3515 cfg->new_ctrl = r->cache.shareable_bits; 3516 used_b = r->cache.shareable_bits; 3517 for (i = 0; i < closids_supported(); i++) { 3518 if (closid_allocated(i) && i != closid) { 3519 mode = rdtgroup_mode_by_closid(i); 3520 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 3521 /* 3522 * ctrl values for locksetup aren't relevant 3523 * until the schemata is written, and the mode 3524 * becomes RDT_MODE_PSEUDO_LOCKED. 3525 */ 3526 continue; 3527 /* 3528 * If CDP is active include peer domain's 3529 * usage to ensure there is no overlap 3530 * with an exclusive group. 3531 */ 3532 if (resctrl_arch_get_cdp_enabled(r->rid)) 3533 peer_ctl = resctrl_arch_get_config(r, d, i, 3534 peer_type); 3535 else 3536 peer_ctl = 0; 3537 ctrl_val = resctrl_arch_get_config(r, d, i, 3538 s->conf_type); 3539 used_b |= ctrl_val | peer_ctl; 3540 if (mode == RDT_MODE_SHAREABLE) 3541 cfg->new_ctrl |= ctrl_val | peer_ctl; 3542 } 3543 } 3544 if (d->plr && d->plr->cbm > 0) 3545 used_b |= d->plr->cbm; 3546 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 3547 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 3548 cfg->new_ctrl |= unused_b; 3549 /* 3550 * Force the initial CBM to be valid, user can 3551 * modify the CBM based on system availability. 3552 */ 3553 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); 3554 /* 3555 * Assign the u32 CBM to an unsigned long to ensure that 3556 * bitmap_weight() does not access out-of-bound memory. 3557 */ 3558 tmp_cbm = cfg->new_ctrl; 3559 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 3560 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->hdr.id); 3561 return -ENOSPC; 3562 } 3563 cfg->have_new_ctrl = true; 3564 3565 return 0; 3566 } 3567 3568 /* 3569 * Initialize cache resources with default values. 3570 * 3571 * A new RDT group is being created on an allocation capable (CAT) 3572 * supporting system. Set this group up to start off with all usable 3573 * allocations. 3574 * 3575 * If there are no more shareable bits available on any domain then 3576 * the entire allocation will fail. 3577 */ 3578 int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) 3579 { 3580 struct rdt_ctrl_domain *d; 3581 int ret; 3582 3583 list_for_each_entry(d, &s->res->ctrl_domains, hdr.list) { 3584 ret = __init_one_rdt_domain(d, s, closid); 3585 if (ret < 0) 3586 return ret; 3587 } 3588 3589 return 0; 3590 } 3591 3592 /* Initialize MBA resource with default values. */ 3593 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) 3594 { 3595 struct resctrl_staged_config *cfg; 3596 struct rdt_ctrl_domain *d; 3597 3598 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 3599 if (is_mba_sc(r)) { 3600 d->mbps_val[closid] = MBA_MAX_MBPS; 3601 continue; 3602 } 3603 3604 cfg = &d->staged_config[CDP_NONE]; 3605 cfg->new_ctrl = resctrl_get_default_ctrl(r); 3606 cfg->have_new_ctrl = true; 3607 } 3608 } 3609 3610 /* Initialize the RDT group's allocations. */ 3611 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 3612 { 3613 struct resctrl_schema *s; 3614 struct rdt_resource *r; 3615 int ret = 0; 3616 3617 rdt_staged_configs_clear(); 3618 3619 list_for_each_entry(s, &resctrl_schema_all, list) { 3620 r = s->res; 3621 if (r->rid == RDT_RESOURCE_MBA || 3622 r->rid == RDT_RESOURCE_SMBA) { 3623 rdtgroup_init_mba(r, rdtgrp->closid); 3624 if (is_mba_sc(r)) 3625 continue; 3626 } else { 3627 ret = rdtgroup_init_cat(s, rdtgrp->closid); 3628 if (ret < 0) 3629 goto out; 3630 } 3631 3632 ret = resctrl_arch_update_domains(r, rdtgrp->closid); 3633 if (ret < 0) { 3634 rdt_last_cmd_puts("Failed to initialize allocations\n"); 3635 goto out; 3636 } 3637 } 3638 3639 rdtgrp->mode = RDT_MODE_SHAREABLE; 3640 3641 out: 3642 rdt_staged_configs_clear(); 3643 return ret; 3644 } 3645 3646 static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) 3647 { 3648 int ret; 3649 3650 if (!resctrl_arch_mon_capable()) 3651 return 0; 3652 3653 ret = alloc_rmid(rdtgrp->closid); 3654 if (ret < 0) { 3655 rdt_last_cmd_puts("Out of RMIDs\n"); 3656 return ret; 3657 } 3658 rdtgrp->mon.rmid = ret; 3659 3660 rdtgroup_assign_cntrs(rdtgrp); 3661 3662 ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 3663 if (ret) { 3664 rdt_last_cmd_puts("kernfs subdir error\n"); 3665 rdtgroup_unassign_cntrs(rdtgrp); 3666 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3667 return ret; 3668 } 3669 3670 return 0; 3671 } 3672 3673 static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) 3674 { 3675 if (resctrl_arch_mon_capable()) { 3676 rdtgroup_unassign_cntrs(rgrp); 3677 free_rmid(rgrp->closid, rgrp->mon.rmid); 3678 } 3679 } 3680 3681 /* 3682 * We allow creating mon groups only with in a directory called "mon_groups" 3683 * which is present in every ctrl_mon group. Check if this is a valid 3684 * "mon_groups" directory. 3685 * 3686 * 1. The directory should be named "mon_groups". 3687 * 2. The mon group itself should "not" be named "mon_groups". 3688 * This makes sure "mon_groups" directory always has a ctrl_mon group 3689 * as parent. 3690 */ 3691 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 3692 { 3693 return (!strcmp(rdt_kn_name(kn), "mon_groups") && 3694 strcmp(name, "mon_groups")); 3695 } 3696 3697 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 3698 const char *name, umode_t mode, 3699 enum rdt_group_type rtype, struct rdtgroup **r) 3700 { 3701 struct rdtgroup *prdtgrp, *rdtgrp; 3702 unsigned long files = 0; 3703 struct kernfs_node *kn; 3704 int ret; 3705 3706 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 3707 if (!prdtgrp) { 3708 ret = -ENODEV; 3709 goto out_unlock; 3710 } 3711 3712 rdt_last_cmd_clear(); 3713 3714 /* 3715 * Check that the parent directory for a monitor group is a "mon_groups" 3716 * directory. 3717 */ 3718 if (rtype == RDTMON_GROUP && !is_mon_groups(parent_kn, name)) { 3719 ret = -EPERM; 3720 goto out_unlock; 3721 } 3722 3723 if (rtype == RDTMON_GROUP && 3724 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3725 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 3726 ret = -EINVAL; 3727 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 3728 goto out_unlock; 3729 } 3730 3731 /* allocate the rdtgroup. */ 3732 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 3733 if (!rdtgrp) { 3734 ret = -ENOSPC; 3735 rdt_last_cmd_puts("Kernel out of memory\n"); 3736 goto out_unlock; 3737 } 3738 *r = rdtgrp; 3739 rdtgrp->mon.parent = prdtgrp; 3740 rdtgrp->type = rtype; 3741 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 3742 3743 /* kernfs creates the directory for rdtgrp */ 3744 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 3745 if (IS_ERR(kn)) { 3746 ret = PTR_ERR(kn); 3747 rdt_last_cmd_puts("kernfs create error\n"); 3748 goto out_free_rgrp; 3749 } 3750 rdtgrp->kn = kn; 3751 3752 /* 3753 * kernfs_remove() will drop the reference count on "kn" which 3754 * will free it. But we still need it to stick around for the 3755 * rdtgroup_kn_unlock(kn) call. Take one extra reference here, 3756 * which will be dropped by kernfs_put() in rdtgroup_remove(). 3757 */ 3758 kernfs_get(kn); 3759 3760 ret = rdtgroup_kn_set_ugid(kn); 3761 if (ret) { 3762 rdt_last_cmd_puts("kernfs perm error\n"); 3763 goto out_destroy; 3764 } 3765 3766 if (rtype == RDTCTRL_GROUP) { 3767 files = RFTYPE_BASE | RFTYPE_CTRL; 3768 if (resctrl_arch_mon_capable()) 3769 files |= RFTYPE_MON; 3770 } else { 3771 files = RFTYPE_BASE | RFTYPE_MON; 3772 } 3773 3774 ret = rdtgroup_add_files(kn, files); 3775 if (ret) { 3776 rdt_last_cmd_puts("kernfs fill error\n"); 3777 goto out_destroy; 3778 } 3779 3780 /* 3781 * The caller unlocks the parent_kn upon success. 3782 */ 3783 return 0; 3784 3785 out_destroy: 3786 kernfs_put(rdtgrp->kn); 3787 kernfs_remove(rdtgrp->kn); 3788 out_free_rgrp: 3789 kfree(rdtgrp); 3790 out_unlock: 3791 rdtgroup_kn_unlock(parent_kn); 3792 return ret; 3793 } 3794 3795 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 3796 { 3797 kernfs_remove(rgrp->kn); 3798 rdtgroup_remove(rgrp); 3799 } 3800 3801 /* 3802 * Create a monitor group under "mon_groups" directory of a control 3803 * and monitor group(ctrl_mon). This is a resource group 3804 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 3805 */ 3806 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 3807 const char *name, umode_t mode) 3808 { 3809 struct rdtgroup *rdtgrp, *prgrp; 3810 int ret; 3811 3812 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 3813 if (ret) 3814 return ret; 3815 3816 prgrp = rdtgrp->mon.parent; 3817 rdtgrp->closid = prgrp->closid; 3818 3819 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); 3820 if (ret) { 3821 mkdir_rdt_prepare_clean(rdtgrp); 3822 goto out_unlock; 3823 } 3824 3825 kernfs_activate(rdtgrp->kn); 3826 3827 /* 3828 * Add the rdtgrp to the list of rdtgrps the parent 3829 * ctrl_mon group has to track. 3830 */ 3831 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 3832 3833 out_unlock: 3834 rdtgroup_kn_unlock(parent_kn); 3835 return ret; 3836 } 3837 3838 /* 3839 * These are rdtgroups created under the root directory. Can be used 3840 * to allocate and monitor resources. 3841 */ 3842 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 3843 const char *name, umode_t mode) 3844 { 3845 struct rdtgroup *rdtgrp; 3846 struct kernfs_node *kn; 3847 u32 closid; 3848 int ret; 3849 3850 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 3851 if (ret) 3852 return ret; 3853 3854 kn = rdtgrp->kn; 3855 ret = closid_alloc(); 3856 if (ret < 0) { 3857 rdt_last_cmd_puts("Out of CLOSIDs\n"); 3858 goto out_common_fail; 3859 } 3860 closid = ret; 3861 ret = 0; 3862 3863 rdtgrp->closid = closid; 3864 3865 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); 3866 if (ret) 3867 goto out_closid_free; 3868 3869 kernfs_activate(rdtgrp->kn); 3870 3871 ret = rdtgroup_init_alloc(rdtgrp); 3872 if (ret < 0) 3873 goto out_rmid_free; 3874 3875 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 3876 3877 if (resctrl_arch_mon_capable()) { 3878 /* 3879 * Create an empty mon_groups directory to hold the subset 3880 * of tasks and cpus to monitor. 3881 */ 3882 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 3883 if (ret) { 3884 rdt_last_cmd_puts("kernfs subdir error\n"); 3885 goto out_del_list; 3886 } 3887 if (is_mba_sc(NULL)) 3888 rdtgrp->mba_mbps_event = mba_mbps_default_event; 3889 } 3890 3891 goto out_unlock; 3892 3893 out_del_list: 3894 list_del(&rdtgrp->rdtgroup_list); 3895 out_rmid_free: 3896 mkdir_rdt_prepare_rmid_free(rdtgrp); 3897 out_closid_free: 3898 closid_free(closid); 3899 out_common_fail: 3900 mkdir_rdt_prepare_clean(rdtgrp); 3901 out_unlock: 3902 rdtgroup_kn_unlock(parent_kn); 3903 return ret; 3904 } 3905 3906 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 3907 umode_t mode) 3908 { 3909 /* Do not accept '\n' to avoid unparsable situation. */ 3910 if (strchr(name, '\n')) 3911 return -EINVAL; 3912 3913 /* 3914 * If the parent directory is the root directory and RDT 3915 * allocation is supported, add a control and monitoring 3916 * subdirectory 3917 */ 3918 if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) 3919 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 3920 3921 /* Else, attempt to add a monitoring subdirectory. */ 3922 if (resctrl_arch_mon_capable()) 3923 return rdtgroup_mkdir_mon(parent_kn, name, mode); 3924 3925 return -EPERM; 3926 } 3927 3928 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3929 { 3930 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3931 u32 closid, rmid; 3932 int cpu; 3933 3934 /* Give any tasks back to the parent group */ 3935 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 3936 3937 /* 3938 * Update per cpu closid/rmid of the moved CPUs first. 3939 * Note: the closid will not change, but the arch code still needs it. 3940 */ 3941 closid = prdtgrp->closid; 3942 rmid = prdtgrp->mon.rmid; 3943 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3944 resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); 3945 3946 /* 3947 * Update the MSR on moved CPUs and CPUs which have moved 3948 * task running on them. 3949 */ 3950 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3951 update_closid_rmid(tmpmask, NULL); 3952 3953 rdtgrp->flags = RDT_DELETED; 3954 3955 rdtgroup_unassign_cntrs(rdtgrp); 3956 3957 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 3958 3959 /* 3960 * Remove the rdtgrp from the parent ctrl_mon group's list 3961 */ 3962 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3963 list_del(&rdtgrp->mon.crdtgrp_list); 3964 3965 kernfs_remove(rdtgrp->kn); 3966 3967 return 0; 3968 } 3969 3970 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) 3971 { 3972 rdtgrp->flags = RDT_DELETED; 3973 list_del(&rdtgrp->rdtgroup_list); 3974 3975 kernfs_remove(rdtgrp->kn); 3976 return 0; 3977 } 3978 3979 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3980 { 3981 u32 closid, rmid; 3982 int cpu; 3983 3984 /* Give any tasks back to the default group */ 3985 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3986 3987 /* Give any CPUs back to the default group */ 3988 cpumask_or(&rdtgroup_default.cpu_mask, 3989 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3990 3991 /* Update per cpu closid and rmid of the moved CPUs first */ 3992 closid = rdtgroup_default.closid; 3993 rmid = rdtgroup_default.mon.rmid; 3994 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3995 resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); 3996 3997 /* 3998 * Update the MSR on moved CPUs and CPUs which have moved 3999 * task running on them. 4000 */ 4001 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 4002 update_closid_rmid(tmpmask, NULL); 4003 4004 rdtgroup_unassign_cntrs(rdtgrp); 4005 4006 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 4007 closid_free(rdtgrp->closid); 4008 4009 rdtgroup_ctrl_remove(rdtgrp); 4010 4011 /* 4012 * Free all the child monitor group rmids. 4013 */ 4014 free_all_child_rdtgrp(rdtgrp); 4015 4016 return 0; 4017 } 4018 4019 static struct kernfs_node *rdt_kn_parent(struct kernfs_node *kn) 4020 { 4021 /* 4022 * Valid within the RCU section it was obtained or while rdtgroup_mutex 4023 * is held. 4024 */ 4025 return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex)); 4026 } 4027 4028 static int rdtgroup_rmdir(struct kernfs_node *kn) 4029 { 4030 struct kernfs_node *parent_kn; 4031 struct rdtgroup *rdtgrp; 4032 cpumask_var_t tmpmask; 4033 int ret = 0; 4034 4035 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 4036 return -ENOMEM; 4037 4038 rdtgrp = rdtgroup_kn_lock_live(kn); 4039 if (!rdtgrp) { 4040 ret = -EPERM; 4041 goto out; 4042 } 4043 parent_kn = rdt_kn_parent(kn); 4044 4045 /* 4046 * If the rdtgroup is a ctrl_mon group and parent directory 4047 * is the root directory, remove the ctrl_mon group. 4048 * 4049 * If the rdtgroup is a mon group and parent directory 4050 * is a valid "mon_groups" directory, remove the mon group. 4051 */ 4052 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 4053 rdtgrp != &rdtgroup_default) { 4054 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 4055 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 4056 ret = rdtgroup_ctrl_remove(rdtgrp); 4057 } else { 4058 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); 4059 } 4060 } else if (rdtgrp->type == RDTMON_GROUP && 4061 is_mon_groups(parent_kn, rdt_kn_name(kn))) { 4062 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); 4063 } else { 4064 ret = -EPERM; 4065 } 4066 4067 out: 4068 rdtgroup_kn_unlock(kn); 4069 free_cpumask_var(tmpmask); 4070 return ret; 4071 } 4072 4073 /** 4074 * mongrp_reparent() - replace parent CTRL_MON group of a MON group 4075 * @rdtgrp: the MON group whose parent should be replaced 4076 * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp 4077 * @cpus: cpumask provided by the caller for use during this call 4078 * 4079 * Replaces the parent CTRL_MON group for a MON group, resulting in all member 4080 * tasks' CLOSID immediately changing to that of the new parent group. 4081 * Monitoring data for the group is unaffected by this operation. 4082 */ 4083 static void mongrp_reparent(struct rdtgroup *rdtgrp, 4084 struct rdtgroup *new_prdtgrp, 4085 cpumask_var_t cpus) 4086 { 4087 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 4088 4089 WARN_ON(rdtgrp->type != RDTMON_GROUP); 4090 WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); 4091 4092 /* Nothing to do when simply renaming a MON group. */ 4093 if (prdtgrp == new_prdtgrp) 4094 return; 4095 4096 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 4097 list_move_tail(&rdtgrp->mon.crdtgrp_list, 4098 &new_prdtgrp->mon.crdtgrp_list); 4099 4100 rdtgrp->mon.parent = new_prdtgrp; 4101 rdtgrp->closid = new_prdtgrp->closid; 4102 4103 /* Propagate updated closid to all tasks in this group. */ 4104 rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); 4105 4106 update_closid_rmid(cpus, NULL); 4107 } 4108 4109 static int rdtgroup_rename(struct kernfs_node *kn, 4110 struct kernfs_node *new_parent, const char *new_name) 4111 { 4112 struct kernfs_node *kn_parent; 4113 struct rdtgroup *new_prdtgrp; 4114 struct rdtgroup *rdtgrp; 4115 cpumask_var_t tmpmask; 4116 int ret; 4117 4118 rdtgrp = kernfs_to_rdtgroup(kn); 4119 new_prdtgrp = kernfs_to_rdtgroup(new_parent); 4120 if (!rdtgrp || !new_prdtgrp) 4121 return -ENOENT; 4122 4123 /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ 4124 rdtgroup_kn_get(rdtgrp, kn); 4125 rdtgroup_kn_get(new_prdtgrp, new_parent); 4126 4127 mutex_lock(&rdtgroup_mutex); 4128 4129 rdt_last_cmd_clear(); 4130 4131 /* 4132 * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if 4133 * either kernfs_node is a file. 4134 */ 4135 if (kernfs_type(kn) != KERNFS_DIR || 4136 kernfs_type(new_parent) != KERNFS_DIR) { 4137 rdt_last_cmd_puts("Source and destination must be directories"); 4138 ret = -EPERM; 4139 goto out; 4140 } 4141 4142 if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { 4143 ret = -ENOENT; 4144 goto out; 4145 } 4146 4147 kn_parent = rdt_kn_parent(kn); 4148 if (rdtgrp->type != RDTMON_GROUP || !kn_parent || 4149 !is_mon_groups(kn_parent, rdt_kn_name(kn))) { 4150 rdt_last_cmd_puts("Source must be a MON group\n"); 4151 ret = -EPERM; 4152 goto out; 4153 } 4154 4155 if (!is_mon_groups(new_parent, new_name)) { 4156 rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); 4157 ret = -EPERM; 4158 goto out; 4159 } 4160 4161 /* 4162 * If the MON group is monitoring CPUs, the CPUs must be assigned to the 4163 * current parent CTRL_MON group and therefore cannot be assigned to 4164 * the new parent, making the move illegal. 4165 */ 4166 if (!cpumask_empty(&rdtgrp->cpu_mask) && 4167 rdtgrp->mon.parent != new_prdtgrp) { 4168 rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); 4169 ret = -EPERM; 4170 goto out; 4171 } 4172 4173 /* 4174 * Allocate the cpumask for use in mongrp_reparent() to avoid the 4175 * possibility of failing to allocate it after kernfs_rename() has 4176 * succeeded. 4177 */ 4178 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { 4179 ret = -ENOMEM; 4180 goto out; 4181 } 4182 4183 /* 4184 * Perform all input validation and allocations needed to ensure 4185 * mongrp_reparent() will succeed before calling kernfs_rename(), 4186 * otherwise it would be necessary to revert this call if 4187 * mongrp_reparent() failed. 4188 */ 4189 ret = kernfs_rename(kn, new_parent, new_name); 4190 if (!ret) 4191 mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); 4192 4193 free_cpumask_var(tmpmask); 4194 4195 out: 4196 mutex_unlock(&rdtgroup_mutex); 4197 rdtgroup_kn_put(rdtgrp, kn); 4198 rdtgroup_kn_put(new_prdtgrp, new_parent); 4199 return ret; 4200 } 4201 4202 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 4203 { 4204 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) 4205 seq_puts(seq, ",cdp"); 4206 4207 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) 4208 seq_puts(seq, ",cdpl2"); 4209 4210 if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) 4211 seq_puts(seq, ",mba_MBps"); 4212 4213 if (resctrl_debug) 4214 seq_puts(seq, ",debug"); 4215 4216 return 0; 4217 } 4218 4219 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 4220 .mkdir = rdtgroup_mkdir, 4221 .rmdir = rdtgroup_rmdir, 4222 .rename = rdtgroup_rename, 4223 .show_options = rdtgroup_show_options, 4224 }; 4225 4226 static int rdtgroup_setup_root(struct rdt_fs_context *ctx) 4227 { 4228 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 4229 KERNFS_ROOT_CREATE_DEACTIVATED | 4230 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 4231 &rdtgroup_default); 4232 if (IS_ERR(rdt_root)) 4233 return PTR_ERR(rdt_root); 4234 4235 ctx->kfc.root = rdt_root; 4236 rdtgroup_default.kn = kernfs_root_to_node(rdt_root); 4237 4238 return 0; 4239 } 4240 4241 static void rdtgroup_destroy_root(void) 4242 { 4243 lockdep_assert_held(&rdtgroup_mutex); 4244 4245 kernfs_destroy_root(rdt_root); 4246 rdtgroup_default.kn = NULL; 4247 } 4248 4249 static void rdtgroup_setup_default(void) 4250 { 4251 mutex_lock(&rdtgroup_mutex); 4252 4253 rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; 4254 rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; 4255 rdtgroup_default.type = RDTCTRL_GROUP; 4256 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 4257 4258 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 4259 4260 mutex_unlock(&rdtgroup_mutex); 4261 } 4262 4263 static void domain_destroy_l3_mon_state(struct rdt_l3_mon_domain *d) 4264 { 4265 int idx; 4266 4267 kfree(d->cntr_cfg); 4268 bitmap_free(d->rmid_busy_llc); 4269 for_each_mbm_idx(idx) { 4270 kfree(d->mbm_states[idx]); 4271 d->mbm_states[idx] = NULL; 4272 } 4273 } 4274 4275 void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d) 4276 { 4277 mutex_lock(&rdtgroup_mutex); 4278 4279 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) 4280 mba_sc_domain_destroy(r, d); 4281 4282 mutex_unlock(&rdtgroup_mutex); 4283 } 4284 4285 void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr) 4286 { 4287 struct rdt_l3_mon_domain *d; 4288 4289 mutex_lock(&rdtgroup_mutex); 4290 4291 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) 4292 goto out_unlock; 4293 4294 d = container_of(hdr, struct rdt_l3_mon_domain, hdr); 4295 4296 /* 4297 * If resctrl is mounted, remove all the 4298 * per domain monitor data directories. 4299 */ 4300 if (resctrl_mounted && resctrl_arch_mon_capable()) 4301 rmdir_mondata_subdir_allrdtgrp(r, hdr); 4302 4303 if (resctrl_is_mbm_enabled()) 4304 cancel_delayed_work(&d->mbm_over); 4305 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) && has_busy_rmid(d)) { 4306 /* 4307 * When a package is going down, forcefully 4308 * decrement rmid->ebusy. There is no way to know 4309 * that the L3 was flushed and hence may lead to 4310 * incorrect counts in rare scenarios, but leaving 4311 * the RMID as busy creates RMID leaks if the 4312 * package never comes back. 4313 */ 4314 __check_limbo(d, true); 4315 cancel_delayed_work(&d->cqm_limbo); 4316 } 4317 4318 domain_destroy_l3_mon_state(d); 4319 out_unlock: 4320 mutex_unlock(&rdtgroup_mutex); 4321 } 4322 4323 /** 4324 * domain_setup_l3_mon_state() - Initialise domain monitoring structures. 4325 * @r: The resource for the newly online domain. 4326 * @d: The newly online domain. 4327 * 4328 * Allocate monitor resources that belong to this domain. 4329 * Called when the first CPU of a domain comes online, regardless of whether 4330 * the filesystem is mounted. 4331 * During boot this may be called before global allocations have been made by 4332 * resctrl_l3_mon_resource_init(). 4333 * 4334 * Return: 0 for success, or -ENOMEM. 4335 */ 4336 static int domain_setup_l3_mon_state(struct rdt_resource *r, struct rdt_l3_mon_domain *d) 4337 { 4338 u32 idx_limit = resctrl_arch_system_num_rmid_idx(); 4339 size_t tsize = sizeof(*d->mbm_states[0]); 4340 enum resctrl_event_id eventid; 4341 int idx; 4342 4343 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) { 4344 d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); 4345 if (!d->rmid_busy_llc) 4346 return -ENOMEM; 4347 } 4348 4349 for_each_mbm_event_id(eventid) { 4350 if (!resctrl_is_mon_event_enabled(eventid)) 4351 continue; 4352 idx = MBM_STATE_IDX(eventid); 4353 d->mbm_states[idx] = kcalloc(idx_limit, tsize, GFP_KERNEL); 4354 if (!d->mbm_states[idx]) 4355 goto cleanup; 4356 } 4357 4358 if (resctrl_is_mbm_enabled() && r->mon.mbm_cntr_assignable) { 4359 tsize = sizeof(*d->cntr_cfg); 4360 d->cntr_cfg = kcalloc(r->mon.num_mbm_cntrs, tsize, GFP_KERNEL); 4361 if (!d->cntr_cfg) 4362 goto cleanup; 4363 } 4364 4365 return 0; 4366 cleanup: 4367 bitmap_free(d->rmid_busy_llc); 4368 for_each_mbm_idx(idx) { 4369 kfree(d->mbm_states[idx]); 4370 d->mbm_states[idx] = NULL; 4371 } 4372 4373 return -ENOMEM; 4374 } 4375 4376 int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d) 4377 { 4378 int err = 0; 4379 4380 mutex_lock(&rdtgroup_mutex); 4381 4382 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { 4383 /* RDT_RESOURCE_MBA is never mon_capable */ 4384 err = mba_sc_domain_allocate(r, d); 4385 } 4386 4387 mutex_unlock(&rdtgroup_mutex); 4388 4389 return err; 4390 } 4391 4392 int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr) 4393 { 4394 struct rdt_l3_mon_domain *d; 4395 int err = -EINVAL; 4396 4397 mutex_lock(&rdtgroup_mutex); 4398 4399 if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) 4400 goto out_unlock; 4401 4402 d = container_of(hdr, struct rdt_l3_mon_domain, hdr); 4403 err = domain_setup_l3_mon_state(r, d); 4404 if (err) 4405 goto out_unlock; 4406 4407 if (resctrl_is_mbm_enabled()) { 4408 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); 4409 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, 4410 RESCTRL_PICK_ANY_CPU); 4411 } 4412 4413 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID)) 4414 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); 4415 4416 /* 4417 * If the filesystem is not mounted then only the default resource group 4418 * exists. Creation of its directories is deferred until mount time 4419 * by rdt_get_tree() calling mkdir_mondata_all(). 4420 * If resctrl is mounted, add per domain monitor data directories. 4421 */ 4422 if (resctrl_mounted && resctrl_arch_mon_capable()) 4423 mkdir_mondata_subdir_allrdtgrp(r, hdr); 4424 4425 out_unlock: 4426 mutex_unlock(&rdtgroup_mutex); 4427 4428 return err; 4429 } 4430 4431 void resctrl_online_cpu(unsigned int cpu) 4432 { 4433 mutex_lock(&rdtgroup_mutex); 4434 /* The CPU is set in default rdtgroup after online. */ 4435 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); 4436 mutex_unlock(&rdtgroup_mutex); 4437 } 4438 4439 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) 4440 { 4441 struct rdtgroup *cr; 4442 4443 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { 4444 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) 4445 break; 4446 } 4447 } 4448 4449 static struct rdt_l3_mon_domain *get_mon_domain_from_cpu(int cpu, 4450 struct rdt_resource *r) 4451 { 4452 struct rdt_l3_mon_domain *d; 4453 4454 lockdep_assert_cpus_held(); 4455 4456 list_for_each_entry(d, &r->mon_domains, hdr.list) { 4457 /* Find the domain that contains this CPU */ 4458 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask)) 4459 return d; 4460 } 4461 4462 return NULL; 4463 } 4464 4465 void resctrl_offline_cpu(unsigned int cpu) 4466 { 4467 struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); 4468 struct rdt_l3_mon_domain *d; 4469 struct rdtgroup *rdtgrp; 4470 4471 mutex_lock(&rdtgroup_mutex); 4472 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 4473 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { 4474 clear_childcpus(rdtgrp, cpu); 4475 break; 4476 } 4477 } 4478 4479 if (!l3->mon_capable) 4480 goto out_unlock; 4481 4482 d = get_mon_domain_from_cpu(cpu, l3); 4483 if (d) { 4484 if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { 4485 cancel_delayed_work(&d->mbm_over); 4486 mbm_setup_overflow_handler(d, 0, cpu); 4487 } 4488 if (resctrl_is_mon_event_enabled(QOS_L3_OCCUP_EVENT_ID) && 4489 cpu == d->cqm_work_cpu && has_busy_rmid(d)) { 4490 cancel_delayed_work(&d->cqm_limbo); 4491 cqm_setup_limbo_handler(d, 0, cpu); 4492 } 4493 } 4494 4495 out_unlock: 4496 mutex_unlock(&rdtgroup_mutex); 4497 } 4498 4499 /* 4500 * resctrl_init - resctrl filesystem initialization 4501 * 4502 * Setup resctrl file system including set up root, create mount point, 4503 * register resctrl filesystem, and initialize files under root directory. 4504 * 4505 * Return: 0 on success or -errno 4506 */ 4507 int resctrl_init(void) 4508 { 4509 int ret = 0; 4510 4511 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 4512 sizeof(last_cmd_status_buf)); 4513 4514 rdtgroup_setup_default(); 4515 4516 thread_throttle_mode_init(); 4517 4518 io_alloc_init(); 4519 4520 ret = resctrl_l3_mon_resource_init(); 4521 if (ret) 4522 return ret; 4523 4524 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 4525 if (ret) { 4526 resctrl_l3_mon_resource_exit(); 4527 return ret; 4528 } 4529 4530 ret = register_filesystem(&rdt_fs_type); 4531 if (ret) 4532 goto cleanup_mountpoint; 4533 4534 /* 4535 * Adding the resctrl debugfs directory here may not be ideal since 4536 * it would let the resctrl debugfs directory appear on the debugfs 4537 * filesystem before the resctrl filesystem is mounted. 4538 * It may also be ok since that would enable debugging of RDT before 4539 * resctrl is mounted. 4540 * The reason why the debugfs directory is created here and not in 4541 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and 4542 * during the debugfs directory creation also &sb->s_type->i_mutex_key 4543 * (the lockdep class of inode->i_rwsem). Other filesystem 4544 * interactions (eg. SyS_getdents) have the lock ordering: 4545 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 4546 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 4547 * is taken, thus creating dependency: 4548 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 4549 * issues considering the other two lock dependencies. 4550 * By creating the debugfs directory here we avoid a dependency 4551 * that may cause deadlock (even though file operations cannot 4552 * occur until the filesystem is mounted, but I do not know how to 4553 * tell lockdep that). 4554 */ 4555 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 4556 4557 return 0; 4558 4559 cleanup_mountpoint: 4560 sysfs_remove_mount_point(fs_kobj, "resctrl"); 4561 resctrl_l3_mon_resource_exit(); 4562 4563 return ret; 4564 } 4565 4566 static bool resctrl_online_domains_exist(void) 4567 { 4568 struct rdt_resource *r; 4569 4570 /* 4571 * Only walk capable resources to allow resctrl_arch_get_resource() 4572 * to return dummy 'not capable' resources. 4573 */ 4574 for_each_alloc_capable_rdt_resource(r) { 4575 if (!list_empty(&r->ctrl_domains)) 4576 return true; 4577 } 4578 4579 for_each_mon_capable_rdt_resource(r) { 4580 if (!list_empty(&r->mon_domains)) 4581 return true; 4582 } 4583 4584 return false; 4585 } 4586 4587 /** 4588 * resctrl_exit() - Remove the resctrl filesystem and free resources. 4589 * 4590 * Called by the architecture code in response to a fatal error. 4591 * Removes resctrl files and structures from kernfs to prevent further 4592 * configuration. 4593 * 4594 * When called by the architecture code, all CPUs and resctrl domains must be 4595 * offline. This ensures the limbo and overflow handlers are not scheduled to 4596 * run, meaning the data structures they access can be freed by 4597 * resctrl_l3_mon_resource_exit(). 4598 * 4599 * After resctrl_exit() returns, the architecture code should return an 4600 * error from all resctrl_arch_ functions that can do this. 4601 * resctrl_arch_get_resource() must continue to return struct rdt_resources 4602 * with the correct rid field to ensure the filesystem can be unmounted. 4603 */ 4604 void resctrl_exit(void) 4605 { 4606 cpus_read_lock(); 4607 WARN_ON_ONCE(resctrl_online_domains_exist()); 4608 4609 mutex_lock(&rdtgroup_mutex); 4610 resctrl_fs_teardown(); 4611 mutex_unlock(&rdtgroup_mutex); 4612 4613 cpus_read_unlock(); 4614 4615 debugfs_remove_recursive(debugfs_resctrl); 4616 debugfs_resctrl = NULL; 4617 unregister_filesystem(&rdt_fs_type); 4618 4619 /* 4620 * Do not remove the sysfs mount point added by resctrl_init() so that 4621 * it can be used to umount resctrl. 4622 */ 4623 4624 resctrl_l3_mon_resource_exit(); 4625 } 4626