1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Alloction in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cacheinfo.h> 16 #include <linux/cpu.h> 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/fs_parser.h> 20 #include <linux/sysfs.h> 21 #include <linux/kernfs.h> 22 #include <linux/seq_buf.h> 23 #include <linux/seq_file.h> 24 #include <linux/sched/signal.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/task_work.h> 28 #include <linux/user_namespace.h> 29 30 #include <uapi/linux/magic.h> 31 32 #include <asm/resctrl.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 static struct kernfs_root *rdt_root; 39 struct rdtgroup rdtgroup_default; 40 LIST_HEAD(rdt_all_groups); 41 42 /* Kernel fs node for "info" directory under root */ 43 static struct kernfs_node *kn_info; 44 45 /* Kernel fs node for "mon_groups" directory under root */ 46 static struct kernfs_node *kn_mongrp; 47 48 /* Kernel fs node for "mon_data" directory under root */ 49 static struct kernfs_node *kn_mondata; 50 51 static struct seq_buf last_cmd_status; 52 static char last_cmd_status_buf[512]; 53 54 struct dentry *debugfs_resctrl; 55 56 void rdt_last_cmd_clear(void) 57 { 58 lockdep_assert_held(&rdtgroup_mutex); 59 seq_buf_clear(&last_cmd_status); 60 } 61 62 void rdt_last_cmd_puts(const char *s) 63 { 64 lockdep_assert_held(&rdtgroup_mutex); 65 seq_buf_puts(&last_cmd_status, s); 66 } 67 68 void rdt_last_cmd_printf(const char *fmt, ...) 69 { 70 va_list ap; 71 72 va_start(ap, fmt); 73 lockdep_assert_held(&rdtgroup_mutex); 74 seq_buf_vprintf(&last_cmd_status, fmt, ap); 75 va_end(ap); 76 } 77 78 /* 79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 80 * we can keep a bitmap of free CLOSIDs in a single integer. 81 * 82 * Using a global CLOSID across all resources has some advantages and 83 * some drawbacks: 84 * + We can simply set "current->closid" to assign a task to a resource 85 * group. 86 * + Context switch code can avoid extra memory references deciding which 87 * CLOSID to load into the PQR_ASSOC MSR 88 * - We give up some options in configuring resource groups across multi-socket 89 * systems. 90 * - Our choices on how to configure each resource become progressively more 91 * limited as the number of resources grows. 92 */ 93 static int closid_free_map; 94 static int closid_free_map_len; 95 96 int closids_supported(void) 97 { 98 return closid_free_map_len; 99 } 100 101 static void closid_init(void) 102 { 103 struct rdt_resource *r; 104 int rdt_min_closid = 32; 105 106 /* Compute rdt_min_closid across all resources */ 107 for_each_alloc_enabled_rdt_resource(r) 108 rdt_min_closid = min(rdt_min_closid, r->num_closid); 109 110 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 111 112 /* CLOSID 0 is always reserved for the default group */ 113 closid_free_map &= ~1; 114 closid_free_map_len = rdt_min_closid; 115 } 116 117 static int closid_alloc(void) 118 { 119 u32 closid = ffs(closid_free_map); 120 121 if (closid == 0) 122 return -ENOSPC; 123 closid--; 124 closid_free_map &= ~(1 << closid); 125 126 return closid; 127 } 128 129 void closid_free(int closid) 130 { 131 closid_free_map |= 1 << closid; 132 } 133 134 /** 135 * closid_allocated - test if provided closid is in use 136 * @closid: closid to be tested 137 * 138 * Return: true if @closid is currently associated with a resource group, 139 * false if @closid is free 140 */ 141 static bool closid_allocated(unsigned int closid) 142 { 143 return (closid_free_map & (1 << closid)) == 0; 144 } 145 146 /** 147 * rdtgroup_mode_by_closid - Return mode of resource group with closid 148 * @closid: closid if the resource group 149 * 150 * Each resource group is associated with a @closid. Here the mode 151 * of a resource group can be queried by searching for it using its closid. 152 * 153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 154 */ 155 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 156 { 157 struct rdtgroup *rdtgrp; 158 159 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 160 if (rdtgrp->closid == closid) 161 return rdtgrp->mode; 162 } 163 164 return RDT_NUM_MODES; 165 } 166 167 static const char * const rdt_mode_str[] = { 168 [RDT_MODE_SHAREABLE] = "shareable", 169 [RDT_MODE_EXCLUSIVE] = "exclusive", 170 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 171 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 172 }; 173 174 /** 175 * rdtgroup_mode_str - Return the string representation of mode 176 * @mode: the resource group mode as &enum rdtgroup_mode 177 * 178 * Return: string representation of valid mode, "unknown" otherwise 179 */ 180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 181 { 182 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 183 return "unknown"; 184 185 return rdt_mode_str[mode]; 186 } 187 188 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 189 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 190 { 191 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 192 .ia_uid = current_fsuid(), 193 .ia_gid = current_fsgid(), }; 194 195 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 196 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 197 return 0; 198 199 return kernfs_setattr(kn, &iattr); 200 } 201 202 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 203 { 204 struct kernfs_node *kn; 205 int ret; 206 207 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 208 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 209 0, rft->kf_ops, rft, NULL, NULL); 210 if (IS_ERR(kn)) 211 return PTR_ERR(kn); 212 213 ret = rdtgroup_kn_set_ugid(kn); 214 if (ret) { 215 kernfs_remove(kn); 216 return ret; 217 } 218 219 return 0; 220 } 221 222 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 223 { 224 struct kernfs_open_file *of = m->private; 225 struct rftype *rft = of->kn->priv; 226 227 if (rft->seq_show) 228 return rft->seq_show(of, m, arg); 229 return 0; 230 } 231 232 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 233 size_t nbytes, loff_t off) 234 { 235 struct rftype *rft = of->kn->priv; 236 237 if (rft->write) 238 return rft->write(of, buf, nbytes, off); 239 240 return -EINVAL; 241 } 242 243 static struct kernfs_ops rdtgroup_kf_single_ops = { 244 .atomic_write_len = PAGE_SIZE, 245 .write = rdtgroup_file_write, 246 .seq_show = rdtgroup_seqfile_show, 247 }; 248 249 static struct kernfs_ops kf_mondata_ops = { 250 .atomic_write_len = PAGE_SIZE, 251 .seq_show = rdtgroup_mondata_show, 252 }; 253 254 static bool is_cpu_list(struct kernfs_open_file *of) 255 { 256 struct rftype *rft = of->kn->priv; 257 258 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 259 } 260 261 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 262 struct seq_file *s, void *v) 263 { 264 struct rdtgroup *rdtgrp; 265 struct cpumask *mask; 266 int ret = 0; 267 268 rdtgrp = rdtgroup_kn_lock_live(of->kn); 269 270 if (rdtgrp) { 271 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 272 if (!rdtgrp->plr->d) { 273 rdt_last_cmd_clear(); 274 rdt_last_cmd_puts("Cache domain offline\n"); 275 ret = -ENODEV; 276 } else { 277 mask = &rdtgrp->plr->d->cpu_mask; 278 seq_printf(s, is_cpu_list(of) ? 279 "%*pbl\n" : "%*pb\n", 280 cpumask_pr_args(mask)); 281 } 282 } else { 283 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 284 cpumask_pr_args(&rdtgrp->cpu_mask)); 285 } 286 } else { 287 ret = -ENOENT; 288 } 289 rdtgroup_kn_unlock(of->kn); 290 291 return ret; 292 } 293 294 /* 295 * This is safe against resctrl_sched_in() called from __switch_to() 296 * because __switch_to() is executed with interrupts disabled. A local call 297 * from update_closid_rmid() is proteced against __switch_to() because 298 * preemption is disabled. 299 */ 300 static void update_cpu_closid_rmid(void *info) 301 { 302 struct rdtgroup *r = info; 303 304 if (r) { 305 this_cpu_write(pqr_state.default_closid, r->closid); 306 this_cpu_write(pqr_state.default_rmid, r->mon.rmid); 307 } 308 309 /* 310 * We cannot unconditionally write the MSR because the current 311 * executing task might have its own closid selected. Just reuse 312 * the context switch code. 313 */ 314 resctrl_sched_in(); 315 } 316 317 /* 318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 319 * 320 * Per task closids/rmids must have been set up before calling this function. 321 */ 322 static void 323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 324 { 325 int cpu = get_cpu(); 326 327 if (cpumask_test_cpu(cpu, cpu_mask)) 328 update_cpu_closid_rmid(r); 329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1); 330 put_cpu(); 331 } 332 333 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 334 cpumask_var_t tmpmask) 335 { 336 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 337 struct list_head *head; 338 339 /* Check whether cpus belong to parent ctrl group */ 340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 341 if (cpumask_weight(tmpmask)) { 342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 343 return -EINVAL; 344 } 345 346 /* Check whether cpus are dropped from this group */ 347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 348 if (cpumask_weight(tmpmask)) { 349 /* Give any dropped cpus to parent rdtgroup */ 350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 351 update_closid_rmid(tmpmask, prgrp); 352 } 353 354 /* 355 * If we added cpus, remove them from previous group that owned them 356 * and update per-cpu rmid 357 */ 358 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 359 if (cpumask_weight(tmpmask)) { 360 head = &prgrp->mon.crdtgrp_list; 361 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 362 if (crgrp == rdtgrp) 363 continue; 364 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 365 tmpmask); 366 } 367 update_closid_rmid(tmpmask, rdtgrp); 368 } 369 370 /* Done pushing/pulling - update this group with new mask */ 371 cpumask_copy(&rdtgrp->cpu_mask, newmask); 372 373 return 0; 374 } 375 376 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 377 { 378 struct rdtgroup *crgrp; 379 380 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 381 /* update the child mon group masks as well*/ 382 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 383 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 384 } 385 386 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 387 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 388 { 389 struct rdtgroup *r, *crgrp; 390 struct list_head *head; 391 392 /* Check whether cpus are dropped from this group */ 393 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 394 if (cpumask_weight(tmpmask)) { 395 /* Can't drop from default group */ 396 if (rdtgrp == &rdtgroup_default) { 397 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 398 return -EINVAL; 399 } 400 401 /* Give any dropped cpus to rdtgroup_default */ 402 cpumask_or(&rdtgroup_default.cpu_mask, 403 &rdtgroup_default.cpu_mask, tmpmask); 404 update_closid_rmid(tmpmask, &rdtgroup_default); 405 } 406 407 /* 408 * If we added cpus, remove them from previous group and 409 * the prev group's child groups that owned them 410 * and update per-cpu closid/rmid. 411 */ 412 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 413 if (cpumask_weight(tmpmask)) { 414 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 415 if (r == rdtgrp) 416 continue; 417 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 418 if (cpumask_weight(tmpmask1)) 419 cpumask_rdtgrp_clear(r, tmpmask1); 420 } 421 update_closid_rmid(tmpmask, rdtgrp); 422 } 423 424 /* Done pushing/pulling - update this group with new mask */ 425 cpumask_copy(&rdtgrp->cpu_mask, newmask); 426 427 /* 428 * Clear child mon group masks since there is a new parent mask 429 * now and update the rmid for the cpus the child lost. 430 */ 431 head = &rdtgrp->mon.crdtgrp_list; 432 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 433 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 434 update_closid_rmid(tmpmask, rdtgrp); 435 cpumask_clear(&crgrp->cpu_mask); 436 } 437 438 return 0; 439 } 440 441 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 442 char *buf, size_t nbytes, loff_t off) 443 { 444 cpumask_var_t tmpmask, newmask, tmpmask1; 445 struct rdtgroup *rdtgrp; 446 int ret; 447 448 if (!buf) 449 return -EINVAL; 450 451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 452 return -ENOMEM; 453 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 454 free_cpumask_var(tmpmask); 455 return -ENOMEM; 456 } 457 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 458 free_cpumask_var(tmpmask); 459 free_cpumask_var(newmask); 460 return -ENOMEM; 461 } 462 463 rdtgrp = rdtgroup_kn_lock_live(of->kn); 464 if (!rdtgrp) { 465 ret = -ENOENT; 466 goto unlock; 467 } 468 469 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 470 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 471 ret = -EINVAL; 472 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 473 goto unlock; 474 } 475 476 if (is_cpu_list(of)) 477 ret = cpulist_parse(buf, newmask); 478 else 479 ret = cpumask_parse(buf, newmask); 480 481 if (ret) { 482 rdt_last_cmd_puts("Bad CPU list/mask\n"); 483 goto unlock; 484 } 485 486 /* check that user didn't specify any offline cpus */ 487 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 488 if (cpumask_weight(tmpmask)) { 489 ret = -EINVAL; 490 rdt_last_cmd_puts("Can only assign online CPUs\n"); 491 goto unlock; 492 } 493 494 if (rdtgrp->type == RDTCTRL_GROUP) 495 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 496 else if (rdtgrp->type == RDTMON_GROUP) 497 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 498 else 499 ret = -EINVAL; 500 501 unlock: 502 rdtgroup_kn_unlock(of->kn); 503 free_cpumask_var(tmpmask); 504 free_cpumask_var(newmask); 505 free_cpumask_var(tmpmask1); 506 507 return ret ?: nbytes; 508 } 509 510 struct task_move_callback { 511 struct callback_head work; 512 struct rdtgroup *rdtgrp; 513 }; 514 515 static void move_myself(struct callback_head *head) 516 { 517 struct task_move_callback *callback; 518 struct rdtgroup *rdtgrp; 519 520 callback = container_of(head, struct task_move_callback, work); 521 rdtgrp = callback->rdtgrp; 522 523 /* 524 * If resource group was deleted before this task work callback 525 * was invoked, then assign the task to root group and free the 526 * resource group. 527 */ 528 if (atomic_dec_and_test(&rdtgrp->waitcount) && 529 (rdtgrp->flags & RDT_DELETED)) { 530 current->closid = 0; 531 current->rmid = 0; 532 kfree(rdtgrp); 533 } 534 535 if (unlikely(current->flags & PF_EXITING)) 536 goto out; 537 538 preempt_disable(); 539 /* update PQR_ASSOC MSR to make resource group go into effect */ 540 resctrl_sched_in(); 541 preempt_enable(); 542 543 out: 544 kfree(callback); 545 } 546 547 static int __rdtgroup_move_task(struct task_struct *tsk, 548 struct rdtgroup *rdtgrp) 549 { 550 struct task_move_callback *callback; 551 int ret; 552 553 callback = kzalloc(sizeof(*callback), GFP_KERNEL); 554 if (!callback) 555 return -ENOMEM; 556 callback->work.func = move_myself; 557 callback->rdtgrp = rdtgrp; 558 559 /* 560 * Take a refcount, so rdtgrp cannot be freed before the 561 * callback has been invoked. 562 */ 563 atomic_inc(&rdtgrp->waitcount); 564 ret = task_work_add(tsk, &callback->work, true); 565 if (ret) { 566 /* 567 * Task is exiting. Drop the refcount and free the callback. 568 * No need to check the refcount as the group cannot be 569 * deleted before the write function unlocks rdtgroup_mutex. 570 */ 571 atomic_dec(&rdtgrp->waitcount); 572 kfree(callback); 573 rdt_last_cmd_puts("Task exited\n"); 574 } else { 575 /* 576 * For ctrl_mon groups move both closid and rmid. 577 * For monitor groups, can move the tasks only from 578 * their parent CTRL group. 579 */ 580 if (rdtgrp->type == RDTCTRL_GROUP) { 581 tsk->closid = rdtgrp->closid; 582 tsk->rmid = rdtgrp->mon.rmid; 583 } else if (rdtgrp->type == RDTMON_GROUP) { 584 if (rdtgrp->mon.parent->closid == tsk->closid) { 585 tsk->rmid = rdtgrp->mon.rmid; 586 } else { 587 rdt_last_cmd_puts("Can't move task to different control group\n"); 588 ret = -EINVAL; 589 } 590 } 591 } 592 return ret; 593 } 594 595 /** 596 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 597 * @r: Resource group 598 * 599 * Return: 1 if tasks have been assigned to @r, 0 otherwise 600 */ 601 int rdtgroup_tasks_assigned(struct rdtgroup *r) 602 { 603 struct task_struct *p, *t; 604 int ret = 0; 605 606 lockdep_assert_held(&rdtgroup_mutex); 607 608 rcu_read_lock(); 609 for_each_process_thread(p, t) { 610 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) || 611 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) { 612 ret = 1; 613 break; 614 } 615 } 616 rcu_read_unlock(); 617 618 return ret; 619 } 620 621 static int rdtgroup_task_write_permission(struct task_struct *task, 622 struct kernfs_open_file *of) 623 { 624 const struct cred *tcred = get_task_cred(task); 625 const struct cred *cred = current_cred(); 626 int ret = 0; 627 628 /* 629 * Even if we're attaching all tasks in the thread group, we only 630 * need to check permissions on one of them. 631 */ 632 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 633 !uid_eq(cred->euid, tcred->uid) && 634 !uid_eq(cred->euid, tcred->suid)) { 635 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 636 ret = -EPERM; 637 } 638 639 put_cred(tcred); 640 return ret; 641 } 642 643 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 644 struct kernfs_open_file *of) 645 { 646 struct task_struct *tsk; 647 int ret; 648 649 rcu_read_lock(); 650 if (pid) { 651 tsk = find_task_by_vpid(pid); 652 if (!tsk) { 653 rcu_read_unlock(); 654 rdt_last_cmd_printf("No task %d\n", pid); 655 return -ESRCH; 656 } 657 } else { 658 tsk = current; 659 } 660 661 get_task_struct(tsk); 662 rcu_read_unlock(); 663 664 ret = rdtgroup_task_write_permission(tsk, of); 665 if (!ret) 666 ret = __rdtgroup_move_task(tsk, rdtgrp); 667 668 put_task_struct(tsk); 669 return ret; 670 } 671 672 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 673 char *buf, size_t nbytes, loff_t off) 674 { 675 struct rdtgroup *rdtgrp; 676 int ret = 0; 677 pid_t pid; 678 679 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 680 return -EINVAL; 681 rdtgrp = rdtgroup_kn_lock_live(of->kn); 682 if (!rdtgrp) { 683 rdtgroup_kn_unlock(of->kn); 684 return -ENOENT; 685 } 686 rdt_last_cmd_clear(); 687 688 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 689 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 690 ret = -EINVAL; 691 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 692 goto unlock; 693 } 694 695 ret = rdtgroup_move_task(pid, rdtgrp, of); 696 697 unlock: 698 rdtgroup_kn_unlock(of->kn); 699 700 return ret ?: nbytes; 701 } 702 703 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 704 { 705 struct task_struct *p, *t; 706 707 rcu_read_lock(); 708 for_each_process_thread(p, t) { 709 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) || 710 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) 711 seq_printf(s, "%d\n", t->pid); 712 } 713 rcu_read_unlock(); 714 } 715 716 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 717 struct seq_file *s, void *v) 718 { 719 struct rdtgroup *rdtgrp; 720 int ret = 0; 721 722 rdtgrp = rdtgroup_kn_lock_live(of->kn); 723 if (rdtgrp) 724 show_rdt_tasks(rdtgrp, s); 725 else 726 ret = -ENOENT; 727 rdtgroup_kn_unlock(of->kn); 728 729 return ret; 730 } 731 732 #ifdef CONFIG_PROC_CPU_RESCTRL 733 734 /* 735 * A task can only be part of one resctrl control group and of one monitor 736 * group which is associated to that control group. 737 * 738 * 1) res: 739 * mon: 740 * 741 * resctrl is not available. 742 * 743 * 2) res:/ 744 * mon: 745 * 746 * Task is part of the root resctrl control group, and it is not associated 747 * to any monitor group. 748 * 749 * 3) res:/ 750 * mon:mon0 751 * 752 * Task is part of the root resctrl control group and monitor group mon0. 753 * 754 * 4) res:group0 755 * mon: 756 * 757 * Task is part of resctrl control group group0, and it is not associated 758 * to any monitor group. 759 * 760 * 5) res:group0 761 * mon:mon1 762 * 763 * Task is part of resctrl control group group0 and monitor group mon1. 764 */ 765 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 766 struct pid *pid, struct task_struct *tsk) 767 { 768 struct rdtgroup *rdtg; 769 int ret = 0; 770 771 mutex_lock(&rdtgroup_mutex); 772 773 /* Return empty if resctrl has not been mounted. */ 774 if (!static_branch_unlikely(&rdt_enable_key)) { 775 seq_puts(s, "res:\nmon:\n"); 776 goto unlock; 777 } 778 779 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 780 struct rdtgroup *crg; 781 782 /* 783 * Task information is only relevant for shareable 784 * and exclusive groups. 785 */ 786 if (rdtg->mode != RDT_MODE_SHAREABLE && 787 rdtg->mode != RDT_MODE_EXCLUSIVE) 788 continue; 789 790 if (rdtg->closid != tsk->closid) 791 continue; 792 793 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 794 rdtg->kn->name); 795 seq_puts(s, "mon:"); 796 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 797 mon.crdtgrp_list) { 798 if (tsk->rmid != crg->mon.rmid) 799 continue; 800 seq_printf(s, "%s", crg->kn->name); 801 break; 802 } 803 seq_putc(s, '\n'); 804 goto unlock; 805 } 806 /* 807 * The above search should succeed. Otherwise return 808 * with an error. 809 */ 810 ret = -ENOENT; 811 unlock: 812 mutex_unlock(&rdtgroup_mutex); 813 814 return ret; 815 } 816 #endif 817 818 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 819 struct seq_file *seq, void *v) 820 { 821 int len; 822 823 mutex_lock(&rdtgroup_mutex); 824 len = seq_buf_used(&last_cmd_status); 825 if (len) 826 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 827 else 828 seq_puts(seq, "ok\n"); 829 mutex_unlock(&rdtgroup_mutex); 830 return 0; 831 } 832 833 static int rdt_num_closids_show(struct kernfs_open_file *of, 834 struct seq_file *seq, void *v) 835 { 836 struct rdt_resource *r = of->kn->parent->priv; 837 838 seq_printf(seq, "%d\n", r->num_closid); 839 return 0; 840 } 841 842 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 843 struct seq_file *seq, void *v) 844 { 845 struct rdt_resource *r = of->kn->parent->priv; 846 847 seq_printf(seq, "%x\n", r->default_ctrl); 848 return 0; 849 } 850 851 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 852 struct seq_file *seq, void *v) 853 { 854 struct rdt_resource *r = of->kn->parent->priv; 855 856 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 857 return 0; 858 } 859 860 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 861 struct seq_file *seq, void *v) 862 { 863 struct rdt_resource *r = of->kn->parent->priv; 864 865 seq_printf(seq, "%x\n", r->cache.shareable_bits); 866 return 0; 867 } 868 869 /** 870 * rdt_bit_usage_show - Display current usage of resources 871 * 872 * A domain is a shared resource that can now be allocated differently. Here 873 * we display the current regions of the domain as an annotated bitmask. 874 * For each domain of this resource its allocation bitmask 875 * is annotated as below to indicate the current usage of the corresponding bit: 876 * 0 - currently unused 877 * X - currently available for sharing and used by software and hardware 878 * H - currently used by hardware only but available for software use 879 * S - currently used and shareable by software only 880 * E - currently used exclusively by one resource group 881 * P - currently pseudo-locked by one resource group 882 */ 883 static int rdt_bit_usage_show(struct kernfs_open_file *of, 884 struct seq_file *seq, void *v) 885 { 886 struct rdt_resource *r = of->kn->parent->priv; 887 /* 888 * Use unsigned long even though only 32 bits are used to ensure 889 * test_bit() is used safely. 890 */ 891 unsigned long sw_shareable = 0, hw_shareable = 0; 892 unsigned long exclusive = 0, pseudo_locked = 0; 893 struct rdt_domain *dom; 894 int i, hwb, swb, excl, psl; 895 enum rdtgrp_mode mode; 896 bool sep = false; 897 u32 *ctrl; 898 899 mutex_lock(&rdtgroup_mutex); 900 hw_shareable = r->cache.shareable_bits; 901 list_for_each_entry(dom, &r->domains, list) { 902 if (sep) 903 seq_putc(seq, ';'); 904 ctrl = dom->ctrl_val; 905 sw_shareable = 0; 906 exclusive = 0; 907 seq_printf(seq, "%d=", dom->id); 908 for (i = 0; i < closids_supported(); i++, ctrl++) { 909 if (!closid_allocated(i)) 910 continue; 911 mode = rdtgroup_mode_by_closid(i); 912 switch (mode) { 913 case RDT_MODE_SHAREABLE: 914 sw_shareable |= *ctrl; 915 break; 916 case RDT_MODE_EXCLUSIVE: 917 exclusive |= *ctrl; 918 break; 919 case RDT_MODE_PSEUDO_LOCKSETUP: 920 /* 921 * RDT_MODE_PSEUDO_LOCKSETUP is possible 922 * here but not included since the CBM 923 * associated with this CLOSID in this mode 924 * is not initialized and no task or cpu can be 925 * assigned this CLOSID. 926 */ 927 break; 928 case RDT_MODE_PSEUDO_LOCKED: 929 case RDT_NUM_MODES: 930 WARN(1, 931 "invalid mode for closid %d\n", i); 932 break; 933 } 934 } 935 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 936 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 937 hwb = test_bit(i, &hw_shareable); 938 swb = test_bit(i, &sw_shareable); 939 excl = test_bit(i, &exclusive); 940 psl = test_bit(i, &pseudo_locked); 941 if (hwb && swb) 942 seq_putc(seq, 'X'); 943 else if (hwb && !swb) 944 seq_putc(seq, 'H'); 945 else if (!hwb && swb) 946 seq_putc(seq, 'S'); 947 else if (excl) 948 seq_putc(seq, 'E'); 949 else if (psl) 950 seq_putc(seq, 'P'); 951 else /* Unused bits remain */ 952 seq_putc(seq, '0'); 953 } 954 sep = true; 955 } 956 seq_putc(seq, '\n'); 957 mutex_unlock(&rdtgroup_mutex); 958 return 0; 959 } 960 961 static int rdt_min_bw_show(struct kernfs_open_file *of, 962 struct seq_file *seq, void *v) 963 { 964 struct rdt_resource *r = of->kn->parent->priv; 965 966 seq_printf(seq, "%u\n", r->membw.min_bw); 967 return 0; 968 } 969 970 static int rdt_num_rmids_show(struct kernfs_open_file *of, 971 struct seq_file *seq, void *v) 972 { 973 struct rdt_resource *r = of->kn->parent->priv; 974 975 seq_printf(seq, "%d\n", r->num_rmid); 976 977 return 0; 978 } 979 980 static int rdt_mon_features_show(struct kernfs_open_file *of, 981 struct seq_file *seq, void *v) 982 { 983 struct rdt_resource *r = of->kn->parent->priv; 984 struct mon_evt *mevt; 985 986 list_for_each_entry(mevt, &r->evt_list, list) 987 seq_printf(seq, "%s\n", mevt->name); 988 989 return 0; 990 } 991 992 static int rdt_bw_gran_show(struct kernfs_open_file *of, 993 struct seq_file *seq, void *v) 994 { 995 struct rdt_resource *r = of->kn->parent->priv; 996 997 seq_printf(seq, "%u\n", r->membw.bw_gran); 998 return 0; 999 } 1000 1001 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1002 struct seq_file *seq, void *v) 1003 { 1004 struct rdt_resource *r = of->kn->parent->priv; 1005 1006 seq_printf(seq, "%u\n", r->membw.delay_linear); 1007 return 0; 1008 } 1009 1010 static int max_threshold_occ_show(struct kernfs_open_file *of, 1011 struct seq_file *seq, void *v) 1012 { 1013 struct rdt_resource *r = of->kn->parent->priv; 1014 1015 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); 1016 1017 return 0; 1018 } 1019 1020 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1021 char *buf, size_t nbytes, loff_t off) 1022 { 1023 struct rdt_resource *r = of->kn->parent->priv; 1024 unsigned int bytes; 1025 int ret; 1026 1027 ret = kstrtouint(buf, 0, &bytes); 1028 if (ret) 1029 return ret; 1030 1031 if (bytes > (boot_cpu_data.x86_cache_size * 1024)) 1032 return -EINVAL; 1033 1034 resctrl_cqm_threshold = bytes / r->mon_scale; 1035 1036 return nbytes; 1037 } 1038 1039 /* 1040 * rdtgroup_mode_show - Display mode of this resource group 1041 */ 1042 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1043 struct seq_file *s, void *v) 1044 { 1045 struct rdtgroup *rdtgrp; 1046 1047 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1048 if (!rdtgrp) { 1049 rdtgroup_kn_unlock(of->kn); 1050 return -ENOENT; 1051 } 1052 1053 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1054 1055 rdtgroup_kn_unlock(of->kn); 1056 return 0; 1057 } 1058 1059 /** 1060 * rdt_cdp_peer_get - Retrieve CDP peer if it exists 1061 * @r: RDT resource to which RDT domain @d belongs 1062 * @d: Cache instance for which a CDP peer is requested 1063 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer) 1064 * Used to return the result. 1065 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer) 1066 * Used to return the result. 1067 * 1068 * RDT resources are managed independently and by extension the RDT domains 1069 * (RDT resource instances) are managed independently also. The Code and 1070 * Data Prioritization (CDP) RDT resources, while managed independently, 1071 * could refer to the same underlying hardware. For example, 1072 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache. 1073 * 1074 * When provided with an RDT resource @r and an instance of that RDT 1075 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT 1076 * resource and the exact instance that shares the same hardware. 1077 * 1078 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists. 1079 * If a CDP peer was found, @r_cdp will point to the peer RDT resource 1080 * and @d_cdp will point to the peer RDT domain. 1081 */ 1082 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, 1083 struct rdt_resource **r_cdp, 1084 struct rdt_domain **d_cdp) 1085 { 1086 struct rdt_resource *_r_cdp = NULL; 1087 struct rdt_domain *_d_cdp = NULL; 1088 int ret = 0; 1089 1090 switch (r->rid) { 1091 case RDT_RESOURCE_L3DATA: 1092 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; 1093 break; 1094 case RDT_RESOURCE_L3CODE: 1095 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA]; 1096 break; 1097 case RDT_RESOURCE_L2DATA: 1098 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE]; 1099 break; 1100 case RDT_RESOURCE_L2CODE: 1101 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA]; 1102 break; 1103 default: 1104 ret = -ENOENT; 1105 goto out; 1106 } 1107 1108 /* 1109 * When a new CPU comes online and CDP is enabled then the new 1110 * RDT domains (if any) associated with both CDP RDT resources 1111 * are added in the same CPU online routine while the 1112 * rdtgroup_mutex is held. It should thus not happen for one 1113 * RDT domain to exist and be associated with its RDT CDP 1114 * resource but there is no RDT domain associated with the 1115 * peer RDT CDP resource. Hence the WARN. 1116 */ 1117 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); 1118 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { 1119 _r_cdp = NULL; 1120 ret = -EINVAL; 1121 } 1122 1123 out: 1124 *r_cdp = _r_cdp; 1125 *d_cdp = _d_cdp; 1126 1127 return ret; 1128 } 1129 1130 /** 1131 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1132 * @r: Resource to which domain instance @d belongs. 1133 * @d: The domain instance for which @closid is being tested. 1134 * @cbm: Capacity bitmask being tested. 1135 * @closid: Intended closid for @cbm. 1136 * @exclusive: Only check if overlaps with exclusive resource groups 1137 * 1138 * Checks if provided @cbm intended to be used for @closid on domain 1139 * @d overlaps with any other closids or other hardware usage associated 1140 * with this domain. If @exclusive is true then only overlaps with 1141 * resource groups in exclusive mode will be considered. If @exclusive 1142 * is false then overlaps with any resource group or hardware entities 1143 * will be considered. 1144 * 1145 * @cbm is unsigned long, even if only 32 bits are used, to make the 1146 * bitmap functions work correctly. 1147 * 1148 * Return: false if CBM does not overlap, true if it does. 1149 */ 1150 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1151 unsigned long cbm, int closid, bool exclusive) 1152 { 1153 enum rdtgrp_mode mode; 1154 unsigned long ctrl_b; 1155 u32 *ctrl; 1156 int i; 1157 1158 /* Check for any overlap with regions used by hardware directly */ 1159 if (!exclusive) { 1160 ctrl_b = r->cache.shareable_bits; 1161 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1162 return true; 1163 } 1164 1165 /* Check for overlap with other resource groups */ 1166 ctrl = d->ctrl_val; 1167 for (i = 0; i < closids_supported(); i++, ctrl++) { 1168 ctrl_b = *ctrl; 1169 mode = rdtgroup_mode_by_closid(i); 1170 if (closid_allocated(i) && i != closid && 1171 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1172 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1173 if (exclusive) { 1174 if (mode == RDT_MODE_EXCLUSIVE) 1175 return true; 1176 continue; 1177 } 1178 return true; 1179 } 1180 } 1181 } 1182 1183 return false; 1184 } 1185 1186 /** 1187 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1188 * @r: Resource to which domain instance @d belongs. 1189 * @d: The domain instance for which @closid is being tested. 1190 * @cbm: Capacity bitmask being tested. 1191 * @closid: Intended closid for @cbm. 1192 * @exclusive: Only check if overlaps with exclusive resource groups 1193 * 1194 * Resources that can be allocated using a CBM can use the CBM to control 1195 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1196 * for overlap. Overlap test is not limited to the specific resource for 1197 * which the CBM is intended though - when dealing with CDP resources that 1198 * share the underlying hardware the overlap check should be performed on 1199 * the CDP resource sharing the hardware also. 1200 * 1201 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1202 * overlap test. 1203 * 1204 * Return: true if CBM overlap detected, false if there is no overlap 1205 */ 1206 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1207 unsigned long cbm, int closid, bool exclusive) 1208 { 1209 struct rdt_resource *r_cdp; 1210 struct rdt_domain *d_cdp; 1211 1212 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive)) 1213 return true; 1214 1215 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0) 1216 return false; 1217 1218 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive); 1219 } 1220 1221 /** 1222 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1223 * 1224 * An exclusive resource group implies that there should be no sharing of 1225 * its allocated resources. At the time this group is considered to be 1226 * exclusive this test can determine if its current schemata supports this 1227 * setting by testing for overlap with all other resource groups. 1228 * 1229 * Return: true if resource group can be exclusive, false if there is overlap 1230 * with allocations of other resource groups and thus this resource group 1231 * cannot be exclusive. 1232 */ 1233 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1234 { 1235 int closid = rdtgrp->closid; 1236 struct rdt_resource *r; 1237 bool has_cache = false; 1238 struct rdt_domain *d; 1239 1240 for_each_alloc_enabled_rdt_resource(r) { 1241 if (r->rid == RDT_RESOURCE_MBA) 1242 continue; 1243 has_cache = true; 1244 list_for_each_entry(d, &r->domains, list) { 1245 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], 1246 rdtgrp->closid, false)) { 1247 rdt_last_cmd_puts("Schemata overlaps\n"); 1248 return false; 1249 } 1250 } 1251 } 1252 1253 if (!has_cache) { 1254 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1255 return false; 1256 } 1257 1258 return true; 1259 } 1260 1261 /** 1262 * rdtgroup_mode_write - Modify the resource group's mode 1263 * 1264 */ 1265 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1266 char *buf, size_t nbytes, loff_t off) 1267 { 1268 struct rdtgroup *rdtgrp; 1269 enum rdtgrp_mode mode; 1270 int ret = 0; 1271 1272 /* Valid input requires a trailing newline */ 1273 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1274 return -EINVAL; 1275 buf[nbytes - 1] = '\0'; 1276 1277 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1278 if (!rdtgrp) { 1279 rdtgroup_kn_unlock(of->kn); 1280 return -ENOENT; 1281 } 1282 1283 rdt_last_cmd_clear(); 1284 1285 mode = rdtgrp->mode; 1286 1287 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1288 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1289 (!strcmp(buf, "pseudo-locksetup") && 1290 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1291 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1292 goto out; 1293 1294 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1295 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1296 ret = -EINVAL; 1297 goto out; 1298 } 1299 1300 if (!strcmp(buf, "shareable")) { 1301 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1302 ret = rdtgroup_locksetup_exit(rdtgrp); 1303 if (ret) 1304 goto out; 1305 } 1306 rdtgrp->mode = RDT_MODE_SHAREABLE; 1307 } else if (!strcmp(buf, "exclusive")) { 1308 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1309 ret = -EINVAL; 1310 goto out; 1311 } 1312 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1313 ret = rdtgroup_locksetup_exit(rdtgrp); 1314 if (ret) 1315 goto out; 1316 } 1317 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1318 } else if (!strcmp(buf, "pseudo-locksetup")) { 1319 ret = rdtgroup_locksetup_enter(rdtgrp); 1320 if (ret) 1321 goto out; 1322 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1323 } else { 1324 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1325 ret = -EINVAL; 1326 } 1327 1328 out: 1329 rdtgroup_kn_unlock(of->kn); 1330 return ret ?: nbytes; 1331 } 1332 1333 /** 1334 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1335 * @r: RDT resource to which @d belongs. 1336 * @d: RDT domain instance. 1337 * @cbm: bitmask for which the size should be computed. 1338 * 1339 * The bitmask provided associated with the RDT domain instance @d will be 1340 * translated into how many bytes it represents. The size in bytes is 1341 * computed by first dividing the total cache size by the CBM length to 1342 * determine how many bytes each bit in the bitmask represents. The result 1343 * is multiplied with the number of bits set in the bitmask. 1344 * 1345 * @cbm is unsigned long, even if only 32 bits are used to make the 1346 * bitmap functions work correctly. 1347 */ 1348 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1349 struct rdt_domain *d, unsigned long cbm) 1350 { 1351 struct cpu_cacheinfo *ci; 1352 unsigned int size = 0; 1353 int num_b, i; 1354 1355 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1356 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1357 for (i = 0; i < ci->num_leaves; i++) { 1358 if (ci->info_list[i].level == r->cache_level) { 1359 size = ci->info_list[i].size / r->cache.cbm_len * num_b; 1360 break; 1361 } 1362 } 1363 1364 return size; 1365 } 1366 1367 /** 1368 * rdtgroup_size_show - Display size in bytes of allocated regions 1369 * 1370 * The "size" file mirrors the layout of the "schemata" file, printing the 1371 * size in bytes of each region instead of the capacity bitmask. 1372 * 1373 */ 1374 static int rdtgroup_size_show(struct kernfs_open_file *of, 1375 struct seq_file *s, void *v) 1376 { 1377 struct rdtgroup *rdtgrp; 1378 struct rdt_resource *r; 1379 struct rdt_domain *d; 1380 unsigned int size; 1381 int ret = 0; 1382 bool sep; 1383 u32 ctrl; 1384 1385 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1386 if (!rdtgrp) { 1387 rdtgroup_kn_unlock(of->kn); 1388 return -ENOENT; 1389 } 1390 1391 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1392 if (!rdtgrp->plr->d) { 1393 rdt_last_cmd_clear(); 1394 rdt_last_cmd_puts("Cache domain offline\n"); 1395 ret = -ENODEV; 1396 } else { 1397 seq_printf(s, "%*s:", max_name_width, 1398 rdtgrp->plr->r->name); 1399 size = rdtgroup_cbm_to_size(rdtgrp->plr->r, 1400 rdtgrp->plr->d, 1401 rdtgrp->plr->cbm); 1402 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); 1403 } 1404 goto out; 1405 } 1406 1407 for_each_alloc_enabled_rdt_resource(r) { 1408 sep = false; 1409 seq_printf(s, "%*s:", max_name_width, r->name); 1410 list_for_each_entry(d, &r->domains, list) { 1411 if (sep) 1412 seq_putc(s, ';'); 1413 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1414 size = 0; 1415 } else { 1416 ctrl = (!is_mba_sc(r) ? 1417 d->ctrl_val[rdtgrp->closid] : 1418 d->mbps_val[rdtgrp->closid]); 1419 if (r->rid == RDT_RESOURCE_MBA) 1420 size = ctrl; 1421 else 1422 size = rdtgroup_cbm_to_size(r, d, ctrl); 1423 } 1424 seq_printf(s, "%d=%u", d->id, size); 1425 sep = true; 1426 } 1427 seq_putc(s, '\n'); 1428 } 1429 1430 out: 1431 rdtgroup_kn_unlock(of->kn); 1432 1433 return ret; 1434 } 1435 1436 /* rdtgroup information files for one cache resource. */ 1437 static struct rftype res_common_files[] = { 1438 { 1439 .name = "last_cmd_status", 1440 .mode = 0444, 1441 .kf_ops = &rdtgroup_kf_single_ops, 1442 .seq_show = rdt_last_cmd_status_show, 1443 .fflags = RF_TOP_INFO, 1444 }, 1445 { 1446 .name = "num_closids", 1447 .mode = 0444, 1448 .kf_ops = &rdtgroup_kf_single_ops, 1449 .seq_show = rdt_num_closids_show, 1450 .fflags = RF_CTRL_INFO, 1451 }, 1452 { 1453 .name = "mon_features", 1454 .mode = 0444, 1455 .kf_ops = &rdtgroup_kf_single_ops, 1456 .seq_show = rdt_mon_features_show, 1457 .fflags = RF_MON_INFO, 1458 }, 1459 { 1460 .name = "num_rmids", 1461 .mode = 0444, 1462 .kf_ops = &rdtgroup_kf_single_ops, 1463 .seq_show = rdt_num_rmids_show, 1464 .fflags = RF_MON_INFO, 1465 }, 1466 { 1467 .name = "cbm_mask", 1468 .mode = 0444, 1469 .kf_ops = &rdtgroup_kf_single_ops, 1470 .seq_show = rdt_default_ctrl_show, 1471 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1472 }, 1473 { 1474 .name = "min_cbm_bits", 1475 .mode = 0444, 1476 .kf_ops = &rdtgroup_kf_single_ops, 1477 .seq_show = rdt_min_cbm_bits_show, 1478 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1479 }, 1480 { 1481 .name = "shareable_bits", 1482 .mode = 0444, 1483 .kf_ops = &rdtgroup_kf_single_ops, 1484 .seq_show = rdt_shareable_bits_show, 1485 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1486 }, 1487 { 1488 .name = "bit_usage", 1489 .mode = 0444, 1490 .kf_ops = &rdtgroup_kf_single_ops, 1491 .seq_show = rdt_bit_usage_show, 1492 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1493 }, 1494 { 1495 .name = "min_bandwidth", 1496 .mode = 0444, 1497 .kf_ops = &rdtgroup_kf_single_ops, 1498 .seq_show = rdt_min_bw_show, 1499 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1500 }, 1501 { 1502 .name = "bandwidth_gran", 1503 .mode = 0444, 1504 .kf_ops = &rdtgroup_kf_single_ops, 1505 .seq_show = rdt_bw_gran_show, 1506 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1507 }, 1508 { 1509 .name = "delay_linear", 1510 .mode = 0444, 1511 .kf_ops = &rdtgroup_kf_single_ops, 1512 .seq_show = rdt_delay_linear_show, 1513 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1514 }, 1515 { 1516 .name = "max_threshold_occupancy", 1517 .mode = 0644, 1518 .kf_ops = &rdtgroup_kf_single_ops, 1519 .write = max_threshold_occ_write, 1520 .seq_show = max_threshold_occ_show, 1521 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, 1522 }, 1523 { 1524 .name = "cpus", 1525 .mode = 0644, 1526 .kf_ops = &rdtgroup_kf_single_ops, 1527 .write = rdtgroup_cpus_write, 1528 .seq_show = rdtgroup_cpus_show, 1529 .fflags = RFTYPE_BASE, 1530 }, 1531 { 1532 .name = "cpus_list", 1533 .mode = 0644, 1534 .kf_ops = &rdtgroup_kf_single_ops, 1535 .write = rdtgroup_cpus_write, 1536 .seq_show = rdtgroup_cpus_show, 1537 .flags = RFTYPE_FLAGS_CPUS_LIST, 1538 .fflags = RFTYPE_BASE, 1539 }, 1540 { 1541 .name = "tasks", 1542 .mode = 0644, 1543 .kf_ops = &rdtgroup_kf_single_ops, 1544 .write = rdtgroup_tasks_write, 1545 .seq_show = rdtgroup_tasks_show, 1546 .fflags = RFTYPE_BASE, 1547 }, 1548 { 1549 .name = "schemata", 1550 .mode = 0644, 1551 .kf_ops = &rdtgroup_kf_single_ops, 1552 .write = rdtgroup_schemata_write, 1553 .seq_show = rdtgroup_schemata_show, 1554 .fflags = RF_CTRL_BASE, 1555 }, 1556 { 1557 .name = "mode", 1558 .mode = 0644, 1559 .kf_ops = &rdtgroup_kf_single_ops, 1560 .write = rdtgroup_mode_write, 1561 .seq_show = rdtgroup_mode_show, 1562 .fflags = RF_CTRL_BASE, 1563 }, 1564 { 1565 .name = "size", 1566 .mode = 0444, 1567 .kf_ops = &rdtgroup_kf_single_ops, 1568 .seq_show = rdtgroup_size_show, 1569 .fflags = RF_CTRL_BASE, 1570 }, 1571 1572 }; 1573 1574 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 1575 { 1576 struct rftype *rfts, *rft; 1577 int ret, len; 1578 1579 rfts = res_common_files; 1580 len = ARRAY_SIZE(res_common_files); 1581 1582 lockdep_assert_held(&rdtgroup_mutex); 1583 1584 for (rft = rfts; rft < rfts + len; rft++) { 1585 if ((fflags & rft->fflags) == rft->fflags) { 1586 ret = rdtgroup_add_file(kn, rft); 1587 if (ret) 1588 goto error; 1589 } 1590 } 1591 1592 return 0; 1593 error: 1594 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 1595 while (--rft >= rfts) { 1596 if ((fflags & rft->fflags) == rft->fflags) 1597 kernfs_remove_by_name(kn, rft->name); 1598 } 1599 return ret; 1600 } 1601 1602 /** 1603 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 1604 * @r: The resource group with which the file is associated. 1605 * @name: Name of the file 1606 * 1607 * The permissions of named resctrl file, directory, or link are modified 1608 * to not allow read, write, or execute by any user. 1609 * 1610 * WARNING: This function is intended to communicate to the user that the 1611 * resctrl file has been locked down - that it is not relevant to the 1612 * particular state the system finds itself in. It should not be relied 1613 * on to protect from user access because after the file's permissions 1614 * are restricted the user can still change the permissions using chmod 1615 * from the command line. 1616 * 1617 * Return: 0 on success, <0 on failure. 1618 */ 1619 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 1620 { 1621 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1622 struct kernfs_node *kn; 1623 int ret = 0; 1624 1625 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1626 if (!kn) 1627 return -ENOENT; 1628 1629 switch (kernfs_type(kn)) { 1630 case KERNFS_DIR: 1631 iattr.ia_mode = S_IFDIR; 1632 break; 1633 case KERNFS_FILE: 1634 iattr.ia_mode = S_IFREG; 1635 break; 1636 case KERNFS_LINK: 1637 iattr.ia_mode = S_IFLNK; 1638 break; 1639 } 1640 1641 ret = kernfs_setattr(kn, &iattr); 1642 kernfs_put(kn); 1643 return ret; 1644 } 1645 1646 /** 1647 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 1648 * @r: The resource group with which the file is associated. 1649 * @name: Name of the file 1650 * @mask: Mask of permissions that should be restored 1651 * 1652 * Restore the permissions of the named file. If @name is a directory the 1653 * permissions of its parent will be used. 1654 * 1655 * Return: 0 on success, <0 on failure. 1656 */ 1657 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 1658 umode_t mask) 1659 { 1660 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1661 struct kernfs_node *kn, *parent; 1662 struct rftype *rfts, *rft; 1663 int ret, len; 1664 1665 rfts = res_common_files; 1666 len = ARRAY_SIZE(res_common_files); 1667 1668 for (rft = rfts; rft < rfts + len; rft++) { 1669 if (!strcmp(rft->name, name)) 1670 iattr.ia_mode = rft->mode & mask; 1671 } 1672 1673 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1674 if (!kn) 1675 return -ENOENT; 1676 1677 switch (kernfs_type(kn)) { 1678 case KERNFS_DIR: 1679 parent = kernfs_get_parent(kn); 1680 if (parent) { 1681 iattr.ia_mode |= parent->mode; 1682 kernfs_put(parent); 1683 } 1684 iattr.ia_mode |= S_IFDIR; 1685 break; 1686 case KERNFS_FILE: 1687 iattr.ia_mode |= S_IFREG; 1688 break; 1689 case KERNFS_LINK: 1690 iattr.ia_mode |= S_IFLNK; 1691 break; 1692 } 1693 1694 ret = kernfs_setattr(kn, &iattr); 1695 kernfs_put(kn); 1696 return ret; 1697 } 1698 1699 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name, 1700 unsigned long fflags) 1701 { 1702 struct kernfs_node *kn_subdir; 1703 int ret; 1704 1705 kn_subdir = kernfs_create_dir(kn_info, name, 1706 kn_info->mode, r); 1707 if (IS_ERR(kn_subdir)) 1708 return PTR_ERR(kn_subdir); 1709 1710 kernfs_get(kn_subdir); 1711 ret = rdtgroup_kn_set_ugid(kn_subdir); 1712 if (ret) 1713 return ret; 1714 1715 ret = rdtgroup_add_files(kn_subdir, fflags); 1716 if (!ret) 1717 kernfs_activate(kn_subdir); 1718 1719 return ret; 1720 } 1721 1722 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 1723 { 1724 struct rdt_resource *r; 1725 unsigned long fflags; 1726 char name[32]; 1727 int ret; 1728 1729 /* create the directory */ 1730 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 1731 if (IS_ERR(kn_info)) 1732 return PTR_ERR(kn_info); 1733 kernfs_get(kn_info); 1734 1735 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); 1736 if (ret) 1737 goto out_destroy; 1738 1739 for_each_alloc_enabled_rdt_resource(r) { 1740 fflags = r->fflags | RF_CTRL_INFO; 1741 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags); 1742 if (ret) 1743 goto out_destroy; 1744 } 1745 1746 for_each_mon_enabled_rdt_resource(r) { 1747 fflags = r->fflags | RF_MON_INFO; 1748 sprintf(name, "%s_MON", r->name); 1749 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 1750 if (ret) 1751 goto out_destroy; 1752 } 1753 1754 /* 1755 * This extra ref will be put in kernfs_remove() and guarantees 1756 * that @rdtgrp->kn is always accessible. 1757 */ 1758 kernfs_get(kn_info); 1759 1760 ret = rdtgroup_kn_set_ugid(kn_info); 1761 if (ret) 1762 goto out_destroy; 1763 1764 kernfs_activate(kn_info); 1765 1766 return 0; 1767 1768 out_destroy: 1769 kernfs_remove(kn_info); 1770 return ret; 1771 } 1772 1773 static int 1774 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 1775 char *name, struct kernfs_node **dest_kn) 1776 { 1777 struct kernfs_node *kn; 1778 int ret; 1779 1780 /* create the directory */ 1781 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 1782 if (IS_ERR(kn)) 1783 return PTR_ERR(kn); 1784 1785 if (dest_kn) 1786 *dest_kn = kn; 1787 1788 /* 1789 * This extra ref will be put in kernfs_remove() and guarantees 1790 * that @rdtgrp->kn is always accessible. 1791 */ 1792 kernfs_get(kn); 1793 1794 ret = rdtgroup_kn_set_ugid(kn); 1795 if (ret) 1796 goto out_destroy; 1797 1798 kernfs_activate(kn); 1799 1800 return 0; 1801 1802 out_destroy: 1803 kernfs_remove(kn); 1804 return ret; 1805 } 1806 1807 static void l3_qos_cfg_update(void *arg) 1808 { 1809 bool *enable = arg; 1810 1811 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 1812 } 1813 1814 static void l2_qos_cfg_update(void *arg) 1815 { 1816 bool *enable = arg; 1817 1818 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 1819 } 1820 1821 static inline bool is_mba_linear(void) 1822 { 1823 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; 1824 } 1825 1826 static int set_cache_qos_cfg(int level, bool enable) 1827 { 1828 void (*update)(void *arg); 1829 struct rdt_resource *r_l; 1830 cpumask_var_t cpu_mask; 1831 struct rdt_domain *d; 1832 int cpu; 1833 1834 if (level == RDT_RESOURCE_L3) 1835 update = l3_qos_cfg_update; 1836 else if (level == RDT_RESOURCE_L2) 1837 update = l2_qos_cfg_update; 1838 else 1839 return -EINVAL; 1840 1841 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 1842 return -ENOMEM; 1843 1844 r_l = &rdt_resources_all[level]; 1845 list_for_each_entry(d, &r_l->domains, list) { 1846 /* Pick one CPU from each domain instance to update MSR */ 1847 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1848 } 1849 cpu = get_cpu(); 1850 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ 1851 if (cpumask_test_cpu(cpu, cpu_mask)) 1852 update(&enable); 1853 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ 1854 smp_call_function_many(cpu_mask, update, &enable, 1); 1855 put_cpu(); 1856 1857 free_cpumask_var(cpu_mask); 1858 1859 return 0; 1860 } 1861 1862 /* Restore the qos cfg state when a domain comes online */ 1863 void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 1864 { 1865 if (!r->alloc_capable) 1866 return; 1867 1868 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) 1869 l2_qos_cfg_update(&r->alloc_enabled); 1870 1871 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) 1872 l3_qos_cfg_update(&r->alloc_enabled); 1873 } 1874 1875 /* 1876 * Enable or disable the MBA software controller 1877 * which helps user specify bandwidth in MBps. 1878 * MBA software controller is supported only if 1879 * MBM is supported and MBA is in linear scale. 1880 */ 1881 static int set_mba_sc(bool mba_sc) 1882 { 1883 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; 1884 struct rdt_domain *d; 1885 1886 if (!is_mbm_enabled() || !is_mba_linear() || 1887 mba_sc == is_mba_sc(r)) 1888 return -EINVAL; 1889 1890 r->membw.mba_sc = mba_sc; 1891 list_for_each_entry(d, &r->domains, list) 1892 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val); 1893 1894 return 0; 1895 } 1896 1897 static int cdp_enable(int level, int data_type, int code_type) 1898 { 1899 struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; 1900 struct rdt_resource *r_lcode = &rdt_resources_all[code_type]; 1901 struct rdt_resource *r_l = &rdt_resources_all[level]; 1902 int ret; 1903 1904 if (!r_l->alloc_capable || !r_ldata->alloc_capable || 1905 !r_lcode->alloc_capable) 1906 return -EINVAL; 1907 1908 ret = set_cache_qos_cfg(level, true); 1909 if (!ret) { 1910 r_l->alloc_enabled = false; 1911 r_ldata->alloc_enabled = true; 1912 r_lcode->alloc_enabled = true; 1913 } 1914 return ret; 1915 } 1916 1917 static int cdpl3_enable(void) 1918 { 1919 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, 1920 RDT_RESOURCE_L3CODE); 1921 } 1922 1923 static int cdpl2_enable(void) 1924 { 1925 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, 1926 RDT_RESOURCE_L2CODE); 1927 } 1928 1929 static void cdp_disable(int level, int data_type, int code_type) 1930 { 1931 struct rdt_resource *r = &rdt_resources_all[level]; 1932 1933 r->alloc_enabled = r->alloc_capable; 1934 1935 if (rdt_resources_all[data_type].alloc_enabled) { 1936 rdt_resources_all[data_type].alloc_enabled = false; 1937 rdt_resources_all[code_type].alloc_enabled = false; 1938 set_cache_qos_cfg(level, false); 1939 } 1940 } 1941 1942 static void cdpl3_disable(void) 1943 { 1944 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE); 1945 } 1946 1947 static void cdpl2_disable(void) 1948 { 1949 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE); 1950 } 1951 1952 static void cdp_disable_all(void) 1953 { 1954 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 1955 cdpl3_disable(); 1956 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 1957 cdpl2_disable(); 1958 } 1959 1960 /* 1961 * We don't allow rdtgroup directories to be created anywhere 1962 * except the root directory. Thus when looking for the rdtgroup 1963 * structure for a kernfs node we are either looking at a directory, 1964 * in which case the rdtgroup structure is pointed at by the "priv" 1965 * field, otherwise we have a file, and need only look to the parent 1966 * to find the rdtgroup. 1967 */ 1968 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 1969 { 1970 if (kernfs_type(kn) == KERNFS_DIR) { 1971 /* 1972 * All the resource directories use "kn->priv" 1973 * to point to the "struct rdtgroup" for the 1974 * resource. "info" and its subdirectories don't 1975 * have rdtgroup structures, so return NULL here. 1976 */ 1977 if (kn == kn_info || kn->parent == kn_info) 1978 return NULL; 1979 else 1980 return kn->priv; 1981 } else { 1982 return kn->parent->priv; 1983 } 1984 } 1985 1986 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 1987 { 1988 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 1989 1990 if (!rdtgrp) 1991 return NULL; 1992 1993 atomic_inc(&rdtgrp->waitcount); 1994 kernfs_break_active_protection(kn); 1995 1996 mutex_lock(&rdtgroup_mutex); 1997 1998 /* Was this group deleted while we waited? */ 1999 if (rdtgrp->flags & RDT_DELETED) 2000 return NULL; 2001 2002 return rdtgrp; 2003 } 2004 2005 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2006 { 2007 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2008 2009 if (!rdtgrp) 2010 return; 2011 2012 mutex_unlock(&rdtgroup_mutex); 2013 2014 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2015 (rdtgrp->flags & RDT_DELETED)) { 2016 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2017 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2018 rdtgroup_pseudo_lock_remove(rdtgrp); 2019 kernfs_unbreak_active_protection(kn); 2020 kernfs_put(rdtgrp->kn); 2021 kfree(rdtgrp); 2022 } else { 2023 kernfs_unbreak_active_protection(kn); 2024 } 2025 } 2026 2027 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2028 struct rdtgroup *prgrp, 2029 struct kernfs_node **mon_data_kn); 2030 2031 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2032 { 2033 int ret = 0; 2034 2035 if (ctx->enable_cdpl2) 2036 ret = cdpl2_enable(); 2037 2038 if (!ret && ctx->enable_cdpl3) 2039 ret = cdpl3_enable(); 2040 2041 if (!ret && ctx->enable_mba_mbps) 2042 ret = set_mba_sc(true); 2043 2044 return ret; 2045 } 2046 2047 static int rdt_get_tree(struct fs_context *fc) 2048 { 2049 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2050 struct rdt_domain *dom; 2051 struct rdt_resource *r; 2052 int ret; 2053 2054 cpus_read_lock(); 2055 mutex_lock(&rdtgroup_mutex); 2056 /* 2057 * resctrl file system can only be mounted once. 2058 */ 2059 if (static_branch_unlikely(&rdt_enable_key)) { 2060 ret = -EBUSY; 2061 goto out; 2062 } 2063 2064 ret = rdt_enable_ctx(ctx); 2065 if (ret < 0) 2066 goto out_cdp; 2067 2068 closid_init(); 2069 2070 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2071 if (ret < 0) 2072 goto out_mba; 2073 2074 if (rdt_mon_capable) { 2075 ret = mongroup_create_dir(rdtgroup_default.kn, 2076 &rdtgroup_default, "mon_groups", 2077 &kn_mongrp); 2078 if (ret < 0) 2079 goto out_info; 2080 kernfs_get(kn_mongrp); 2081 2082 ret = mkdir_mondata_all(rdtgroup_default.kn, 2083 &rdtgroup_default, &kn_mondata); 2084 if (ret < 0) 2085 goto out_mongrp; 2086 kernfs_get(kn_mondata); 2087 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2088 } 2089 2090 ret = rdt_pseudo_lock_init(); 2091 if (ret) 2092 goto out_mondata; 2093 2094 ret = kernfs_get_tree(fc); 2095 if (ret < 0) 2096 goto out_psl; 2097 2098 if (rdt_alloc_capable) 2099 static_branch_enable_cpuslocked(&rdt_alloc_enable_key); 2100 if (rdt_mon_capable) 2101 static_branch_enable_cpuslocked(&rdt_mon_enable_key); 2102 2103 if (rdt_alloc_capable || rdt_mon_capable) 2104 static_branch_enable_cpuslocked(&rdt_enable_key); 2105 2106 if (is_mbm_enabled()) { 2107 r = &rdt_resources_all[RDT_RESOURCE_L3]; 2108 list_for_each_entry(dom, &r->domains, list) 2109 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); 2110 } 2111 2112 goto out; 2113 2114 out_psl: 2115 rdt_pseudo_lock_release(); 2116 out_mondata: 2117 if (rdt_mon_capable) 2118 kernfs_remove(kn_mondata); 2119 out_mongrp: 2120 if (rdt_mon_capable) 2121 kernfs_remove(kn_mongrp); 2122 out_info: 2123 kernfs_remove(kn_info); 2124 out_mba: 2125 if (ctx->enable_mba_mbps) 2126 set_mba_sc(false); 2127 out_cdp: 2128 cdp_disable_all(); 2129 out: 2130 rdt_last_cmd_clear(); 2131 mutex_unlock(&rdtgroup_mutex); 2132 cpus_read_unlock(); 2133 return ret; 2134 } 2135 2136 enum rdt_param { 2137 Opt_cdp, 2138 Opt_cdpl2, 2139 Opt_mba_mbps, 2140 nr__rdt_params 2141 }; 2142 2143 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2144 fsparam_flag("cdp", Opt_cdp), 2145 fsparam_flag("cdpl2", Opt_cdpl2), 2146 fsparam_flag("mba_MBps", Opt_mba_mbps), 2147 {} 2148 }; 2149 2150 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2151 { 2152 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2153 struct fs_parse_result result; 2154 int opt; 2155 2156 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2157 if (opt < 0) 2158 return opt; 2159 2160 switch (opt) { 2161 case Opt_cdp: 2162 ctx->enable_cdpl3 = true; 2163 return 0; 2164 case Opt_cdpl2: 2165 ctx->enable_cdpl2 = true; 2166 return 0; 2167 case Opt_mba_mbps: 2168 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2169 return -EINVAL; 2170 ctx->enable_mba_mbps = true; 2171 return 0; 2172 } 2173 2174 return -EINVAL; 2175 } 2176 2177 static void rdt_fs_context_free(struct fs_context *fc) 2178 { 2179 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2180 2181 kernfs_free_fs_context(fc); 2182 kfree(ctx); 2183 } 2184 2185 static const struct fs_context_operations rdt_fs_context_ops = { 2186 .free = rdt_fs_context_free, 2187 .parse_param = rdt_parse_param, 2188 .get_tree = rdt_get_tree, 2189 }; 2190 2191 static int rdt_init_fs_context(struct fs_context *fc) 2192 { 2193 struct rdt_fs_context *ctx; 2194 2195 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2196 if (!ctx) 2197 return -ENOMEM; 2198 2199 ctx->kfc.root = rdt_root; 2200 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2201 fc->fs_private = &ctx->kfc; 2202 fc->ops = &rdt_fs_context_ops; 2203 put_user_ns(fc->user_ns); 2204 fc->user_ns = get_user_ns(&init_user_ns); 2205 fc->global = true; 2206 return 0; 2207 } 2208 2209 static int reset_all_ctrls(struct rdt_resource *r) 2210 { 2211 struct msr_param msr_param; 2212 cpumask_var_t cpu_mask; 2213 struct rdt_domain *d; 2214 int i, cpu; 2215 2216 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2217 return -ENOMEM; 2218 2219 msr_param.res = r; 2220 msr_param.low = 0; 2221 msr_param.high = r->num_closid; 2222 2223 /* 2224 * Disable resource control for this resource by setting all 2225 * CBMs in all domains to the maximum mask value. Pick one CPU 2226 * from each domain to update the MSRs below. 2227 */ 2228 list_for_each_entry(d, &r->domains, list) { 2229 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2230 2231 for (i = 0; i < r->num_closid; i++) 2232 d->ctrl_val[i] = r->default_ctrl; 2233 } 2234 cpu = get_cpu(); 2235 /* Update CBM on this cpu if it's in cpu_mask. */ 2236 if (cpumask_test_cpu(cpu, cpu_mask)) 2237 rdt_ctrl_update(&msr_param); 2238 /* Update CBM on all other cpus in cpu_mask. */ 2239 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); 2240 put_cpu(); 2241 2242 free_cpumask_var(cpu_mask); 2243 2244 return 0; 2245 } 2246 2247 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 2248 { 2249 return (rdt_alloc_capable && 2250 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); 2251 } 2252 2253 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 2254 { 2255 return (rdt_mon_capable && 2256 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); 2257 } 2258 2259 /* 2260 * Move tasks from one to the other group. If @from is NULL, then all tasks 2261 * in the systems are moved unconditionally (used for teardown). 2262 * 2263 * If @mask is not NULL the cpus on which moved tasks are running are set 2264 * in that mask so the update smp function call is restricted to affected 2265 * cpus. 2266 */ 2267 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2268 struct cpumask *mask) 2269 { 2270 struct task_struct *p, *t; 2271 2272 read_lock(&tasklist_lock); 2273 for_each_process_thread(p, t) { 2274 if (!from || is_closid_match(t, from) || 2275 is_rmid_match(t, from)) { 2276 t->closid = to->closid; 2277 t->rmid = to->mon.rmid; 2278 2279 #ifdef CONFIG_SMP 2280 /* 2281 * This is safe on x86 w/o barriers as the ordering 2282 * of writing to task_cpu() and t->on_cpu is 2283 * reverse to the reading here. The detection is 2284 * inaccurate as tasks might move or schedule 2285 * before the smp function call takes place. In 2286 * such a case the function call is pointless, but 2287 * there is no other side effect. 2288 */ 2289 if (mask && t->on_cpu) 2290 cpumask_set_cpu(task_cpu(t), mask); 2291 #endif 2292 } 2293 } 2294 read_unlock(&tasklist_lock); 2295 } 2296 2297 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 2298 { 2299 struct rdtgroup *sentry, *stmp; 2300 struct list_head *head; 2301 2302 head = &rdtgrp->mon.crdtgrp_list; 2303 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 2304 free_rmid(sentry->mon.rmid); 2305 list_del(&sentry->mon.crdtgrp_list); 2306 2307 if (atomic_read(&sentry->waitcount) != 0) 2308 sentry->flags = RDT_DELETED; 2309 else 2310 kfree(sentry); 2311 } 2312 } 2313 2314 /* 2315 * Forcibly remove all of subdirectories under root. 2316 */ 2317 static void rmdir_all_sub(void) 2318 { 2319 struct rdtgroup *rdtgrp, *tmp; 2320 2321 /* Move all tasks to the default resource group */ 2322 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 2323 2324 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 2325 /* Free any child rmids */ 2326 free_all_child_rdtgrp(rdtgrp); 2327 2328 /* Remove each rdtgroup other than root */ 2329 if (rdtgrp == &rdtgroup_default) 2330 continue; 2331 2332 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2333 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2334 rdtgroup_pseudo_lock_remove(rdtgrp); 2335 2336 /* 2337 * Give any CPUs back to the default group. We cannot copy 2338 * cpu_online_mask because a CPU might have executed the 2339 * offline callback already, but is still marked online. 2340 */ 2341 cpumask_or(&rdtgroup_default.cpu_mask, 2342 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2343 2344 free_rmid(rdtgrp->mon.rmid); 2345 2346 kernfs_remove(rdtgrp->kn); 2347 list_del(&rdtgrp->rdtgroup_list); 2348 2349 if (atomic_read(&rdtgrp->waitcount) != 0) 2350 rdtgrp->flags = RDT_DELETED; 2351 else 2352 kfree(rdtgrp); 2353 } 2354 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 2355 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 2356 2357 kernfs_remove(kn_info); 2358 kernfs_remove(kn_mongrp); 2359 kernfs_remove(kn_mondata); 2360 } 2361 2362 static void rdt_kill_sb(struct super_block *sb) 2363 { 2364 struct rdt_resource *r; 2365 2366 cpus_read_lock(); 2367 mutex_lock(&rdtgroup_mutex); 2368 2369 set_mba_sc(false); 2370 2371 /*Put everything back to default values. */ 2372 for_each_alloc_enabled_rdt_resource(r) 2373 reset_all_ctrls(r); 2374 cdp_disable_all(); 2375 rmdir_all_sub(); 2376 rdt_pseudo_lock_release(); 2377 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 2378 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 2379 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 2380 static_branch_disable_cpuslocked(&rdt_enable_key); 2381 kernfs_kill_sb(sb); 2382 mutex_unlock(&rdtgroup_mutex); 2383 cpus_read_unlock(); 2384 } 2385 2386 static struct file_system_type rdt_fs_type = { 2387 .name = "resctrl", 2388 .init_fs_context = rdt_init_fs_context, 2389 .parameters = rdt_fs_parameters, 2390 .kill_sb = rdt_kill_sb, 2391 }; 2392 2393 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 2394 void *priv) 2395 { 2396 struct kernfs_node *kn; 2397 int ret = 0; 2398 2399 kn = __kernfs_create_file(parent_kn, name, 0444, 2400 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 2401 &kf_mondata_ops, priv, NULL, NULL); 2402 if (IS_ERR(kn)) 2403 return PTR_ERR(kn); 2404 2405 ret = rdtgroup_kn_set_ugid(kn); 2406 if (ret) { 2407 kernfs_remove(kn); 2408 return ret; 2409 } 2410 2411 return ret; 2412 } 2413 2414 /* 2415 * Remove all subdirectories of mon_data of ctrl_mon groups 2416 * and monitor groups with given domain id. 2417 */ 2418 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id) 2419 { 2420 struct rdtgroup *prgrp, *crgrp; 2421 char name[32]; 2422 2423 if (!r->mon_enabled) 2424 return; 2425 2426 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2427 sprintf(name, "mon_%s_%02d", r->name, dom_id); 2428 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); 2429 2430 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 2431 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); 2432 } 2433 } 2434 2435 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 2436 struct rdt_domain *d, 2437 struct rdt_resource *r, struct rdtgroup *prgrp) 2438 { 2439 union mon_data_bits priv; 2440 struct kernfs_node *kn; 2441 struct mon_evt *mevt; 2442 struct rmid_read rr; 2443 char name[32]; 2444 int ret; 2445 2446 sprintf(name, "mon_%s_%02d", r->name, d->id); 2447 /* create the directory */ 2448 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2449 if (IS_ERR(kn)) 2450 return PTR_ERR(kn); 2451 2452 /* 2453 * This extra ref will be put in kernfs_remove() and guarantees 2454 * that kn is always accessible. 2455 */ 2456 kernfs_get(kn); 2457 ret = rdtgroup_kn_set_ugid(kn); 2458 if (ret) 2459 goto out_destroy; 2460 2461 if (WARN_ON(list_empty(&r->evt_list))) { 2462 ret = -EPERM; 2463 goto out_destroy; 2464 } 2465 2466 priv.u.rid = r->rid; 2467 priv.u.domid = d->id; 2468 list_for_each_entry(mevt, &r->evt_list, list) { 2469 priv.u.evtid = mevt->evtid; 2470 ret = mon_addfile(kn, mevt->name, priv.priv); 2471 if (ret) 2472 goto out_destroy; 2473 2474 if (is_mbm_event(mevt->evtid)) 2475 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); 2476 } 2477 kernfs_activate(kn); 2478 return 0; 2479 2480 out_destroy: 2481 kernfs_remove(kn); 2482 return ret; 2483 } 2484 2485 /* 2486 * Add all subdirectories of mon_data for "ctrl_mon" groups 2487 * and "monitor" groups with given domain id. 2488 */ 2489 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2490 struct rdt_domain *d) 2491 { 2492 struct kernfs_node *parent_kn; 2493 struct rdtgroup *prgrp, *crgrp; 2494 struct list_head *head; 2495 2496 if (!r->mon_enabled) 2497 return; 2498 2499 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2500 parent_kn = prgrp->mon.mon_data_kn; 2501 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 2502 2503 head = &prgrp->mon.crdtgrp_list; 2504 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 2505 parent_kn = crgrp->mon.mon_data_kn; 2506 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 2507 } 2508 } 2509 } 2510 2511 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 2512 struct rdt_resource *r, 2513 struct rdtgroup *prgrp) 2514 { 2515 struct rdt_domain *dom; 2516 int ret; 2517 2518 list_for_each_entry(dom, &r->domains, list) { 2519 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 2520 if (ret) 2521 return ret; 2522 } 2523 2524 return 0; 2525 } 2526 2527 /* 2528 * This creates a directory mon_data which contains the monitored data. 2529 * 2530 * mon_data has one directory for each domain whic are named 2531 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 2532 * with L3 domain looks as below: 2533 * ./mon_data: 2534 * mon_L3_00 2535 * mon_L3_01 2536 * mon_L3_02 2537 * ... 2538 * 2539 * Each domain directory has one file per event: 2540 * ./mon_L3_00/: 2541 * llc_occupancy 2542 * 2543 */ 2544 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2545 struct rdtgroup *prgrp, 2546 struct kernfs_node **dest_kn) 2547 { 2548 struct rdt_resource *r; 2549 struct kernfs_node *kn; 2550 int ret; 2551 2552 /* 2553 * Create the mon_data directory first. 2554 */ 2555 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 2556 if (ret) 2557 return ret; 2558 2559 if (dest_kn) 2560 *dest_kn = kn; 2561 2562 /* 2563 * Create the subdirectories for each domain. Note that all events 2564 * in a domain like L3 are grouped into a resource whose domain is L3 2565 */ 2566 for_each_mon_enabled_rdt_resource(r) { 2567 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 2568 if (ret) 2569 goto out_destroy; 2570 } 2571 2572 return 0; 2573 2574 out_destroy: 2575 kernfs_remove(kn); 2576 return ret; 2577 } 2578 2579 /** 2580 * cbm_ensure_valid - Enforce validity on provided CBM 2581 * @_val: Candidate CBM 2582 * @r: RDT resource to which the CBM belongs 2583 * 2584 * The provided CBM represents all cache portions available for use. This 2585 * may be represented by a bitmap that does not consist of contiguous ones 2586 * and thus be an invalid CBM. 2587 * Here the provided CBM is forced to be a valid CBM by only considering 2588 * the first set of contiguous bits as valid and clearing all bits. 2589 * The intention here is to provide a valid default CBM with which a new 2590 * resource group is initialized. The user can follow this with a 2591 * modification to the CBM if the default does not satisfy the 2592 * requirements. 2593 */ 2594 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 2595 { 2596 unsigned int cbm_len = r->cache.cbm_len; 2597 unsigned long first_bit, zero_bit; 2598 unsigned long val = _val; 2599 2600 if (!val) 2601 return 0; 2602 2603 first_bit = find_first_bit(&val, cbm_len); 2604 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 2605 2606 /* Clear any remaining bits to ensure contiguous region */ 2607 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 2608 return (u32)val; 2609 } 2610 2611 /* 2612 * Initialize cache resources per RDT domain 2613 * 2614 * Set the RDT domain up to start off with all usable allocations. That is, 2615 * all shareable and unused bits. All-zero CBM is invalid. 2616 */ 2617 static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r, 2618 u32 closid) 2619 { 2620 struct rdt_resource *r_cdp = NULL; 2621 struct rdt_domain *d_cdp = NULL; 2622 u32 used_b = 0, unused_b = 0; 2623 unsigned long tmp_cbm; 2624 enum rdtgrp_mode mode; 2625 u32 peer_ctl, *ctrl; 2626 int i; 2627 2628 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp); 2629 d->have_new_ctrl = false; 2630 d->new_ctrl = r->cache.shareable_bits; 2631 used_b = r->cache.shareable_bits; 2632 ctrl = d->ctrl_val; 2633 for (i = 0; i < closids_supported(); i++, ctrl++) { 2634 if (closid_allocated(i) && i != closid) { 2635 mode = rdtgroup_mode_by_closid(i); 2636 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2637 /* 2638 * ctrl values for locksetup aren't relevant 2639 * until the schemata is written, and the mode 2640 * becomes RDT_MODE_PSEUDO_LOCKED. 2641 */ 2642 continue; 2643 /* 2644 * If CDP is active include peer domain's 2645 * usage to ensure there is no overlap 2646 * with an exclusive group. 2647 */ 2648 if (d_cdp) 2649 peer_ctl = d_cdp->ctrl_val[i]; 2650 else 2651 peer_ctl = 0; 2652 used_b |= *ctrl | peer_ctl; 2653 if (mode == RDT_MODE_SHAREABLE) 2654 d->new_ctrl |= *ctrl | peer_ctl; 2655 } 2656 } 2657 if (d->plr && d->plr->cbm > 0) 2658 used_b |= d->plr->cbm; 2659 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 2660 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 2661 d->new_ctrl |= unused_b; 2662 /* 2663 * Force the initial CBM to be valid, user can 2664 * modify the CBM based on system availability. 2665 */ 2666 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r); 2667 /* 2668 * Assign the u32 CBM to an unsigned long to ensure that 2669 * bitmap_weight() does not access out-of-bound memory. 2670 */ 2671 tmp_cbm = d->new_ctrl; 2672 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 2673 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id); 2674 return -ENOSPC; 2675 } 2676 d->have_new_ctrl = true; 2677 2678 return 0; 2679 } 2680 2681 /* 2682 * Initialize cache resources with default values. 2683 * 2684 * A new RDT group is being created on an allocation capable (CAT) 2685 * supporting system. Set this group up to start off with all usable 2686 * allocations. 2687 * 2688 * If there are no more shareable bits available on any domain then 2689 * the entire allocation will fail. 2690 */ 2691 static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid) 2692 { 2693 struct rdt_domain *d; 2694 int ret; 2695 2696 list_for_each_entry(d, &r->domains, list) { 2697 ret = __init_one_rdt_domain(d, r, closid); 2698 if (ret < 0) 2699 return ret; 2700 } 2701 2702 return 0; 2703 } 2704 2705 /* Initialize MBA resource with default values. */ 2706 static void rdtgroup_init_mba(struct rdt_resource *r) 2707 { 2708 struct rdt_domain *d; 2709 2710 list_for_each_entry(d, &r->domains, list) { 2711 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl; 2712 d->have_new_ctrl = true; 2713 } 2714 } 2715 2716 /* Initialize the RDT group's allocations. */ 2717 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 2718 { 2719 struct rdt_resource *r; 2720 int ret; 2721 2722 for_each_alloc_enabled_rdt_resource(r) { 2723 if (r->rid == RDT_RESOURCE_MBA) { 2724 rdtgroup_init_mba(r); 2725 } else { 2726 ret = rdtgroup_init_cat(r, rdtgrp->closid); 2727 if (ret < 0) 2728 return ret; 2729 } 2730 2731 ret = update_domains(r, rdtgrp->closid); 2732 if (ret < 0) { 2733 rdt_last_cmd_puts("Failed to initialize allocations\n"); 2734 return ret; 2735 } 2736 2737 } 2738 2739 rdtgrp->mode = RDT_MODE_SHAREABLE; 2740 2741 return 0; 2742 } 2743 2744 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 2745 const char *name, umode_t mode, 2746 enum rdt_group_type rtype, struct rdtgroup **r) 2747 { 2748 struct rdtgroup *prdtgrp, *rdtgrp; 2749 struct kernfs_node *kn; 2750 uint files = 0; 2751 int ret; 2752 2753 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 2754 if (!prdtgrp) { 2755 ret = -ENODEV; 2756 goto out_unlock; 2757 } 2758 2759 if (rtype == RDTMON_GROUP && 2760 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2761 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 2762 ret = -EINVAL; 2763 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 2764 goto out_unlock; 2765 } 2766 2767 /* allocate the rdtgroup. */ 2768 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 2769 if (!rdtgrp) { 2770 ret = -ENOSPC; 2771 rdt_last_cmd_puts("Kernel out of memory\n"); 2772 goto out_unlock; 2773 } 2774 *r = rdtgrp; 2775 rdtgrp->mon.parent = prdtgrp; 2776 rdtgrp->type = rtype; 2777 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 2778 2779 /* kernfs creates the directory for rdtgrp */ 2780 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 2781 if (IS_ERR(kn)) { 2782 ret = PTR_ERR(kn); 2783 rdt_last_cmd_puts("kernfs create error\n"); 2784 goto out_free_rgrp; 2785 } 2786 rdtgrp->kn = kn; 2787 2788 /* 2789 * kernfs_remove() will drop the reference count on "kn" which 2790 * will free it. But we still need it to stick around for the 2791 * rdtgroup_kn_unlock(kn} call below. Take one extra reference 2792 * here, which will be dropped inside rdtgroup_kn_unlock(). 2793 */ 2794 kernfs_get(kn); 2795 2796 ret = rdtgroup_kn_set_ugid(kn); 2797 if (ret) { 2798 rdt_last_cmd_puts("kernfs perm error\n"); 2799 goto out_destroy; 2800 } 2801 2802 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 2803 ret = rdtgroup_add_files(kn, files); 2804 if (ret) { 2805 rdt_last_cmd_puts("kernfs fill error\n"); 2806 goto out_destroy; 2807 } 2808 2809 if (rdt_mon_capable) { 2810 ret = alloc_rmid(); 2811 if (ret < 0) { 2812 rdt_last_cmd_puts("Out of RMIDs\n"); 2813 goto out_destroy; 2814 } 2815 rdtgrp->mon.rmid = ret; 2816 2817 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 2818 if (ret) { 2819 rdt_last_cmd_puts("kernfs subdir error\n"); 2820 goto out_idfree; 2821 } 2822 } 2823 kernfs_activate(kn); 2824 2825 /* 2826 * The caller unlocks the parent_kn upon success. 2827 */ 2828 return 0; 2829 2830 out_idfree: 2831 free_rmid(rdtgrp->mon.rmid); 2832 out_destroy: 2833 kernfs_remove(rdtgrp->kn); 2834 out_free_rgrp: 2835 kfree(rdtgrp); 2836 out_unlock: 2837 rdtgroup_kn_unlock(parent_kn); 2838 return ret; 2839 } 2840 2841 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 2842 { 2843 kernfs_remove(rgrp->kn); 2844 free_rmid(rgrp->mon.rmid); 2845 kfree(rgrp); 2846 } 2847 2848 /* 2849 * Create a monitor group under "mon_groups" directory of a control 2850 * and monitor group(ctrl_mon). This is a resource group 2851 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 2852 */ 2853 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 2854 const char *name, umode_t mode) 2855 { 2856 struct rdtgroup *rdtgrp, *prgrp; 2857 int ret; 2858 2859 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 2860 if (ret) 2861 return ret; 2862 2863 prgrp = rdtgrp->mon.parent; 2864 rdtgrp->closid = prgrp->closid; 2865 2866 /* 2867 * Add the rdtgrp to the list of rdtgrps the parent 2868 * ctrl_mon group has to track. 2869 */ 2870 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 2871 2872 rdtgroup_kn_unlock(parent_kn); 2873 return ret; 2874 } 2875 2876 /* 2877 * These are rdtgroups created under the root directory. Can be used 2878 * to allocate and monitor resources. 2879 */ 2880 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 2881 const char *name, umode_t mode) 2882 { 2883 struct rdtgroup *rdtgrp; 2884 struct kernfs_node *kn; 2885 u32 closid; 2886 int ret; 2887 2888 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 2889 if (ret) 2890 return ret; 2891 2892 kn = rdtgrp->kn; 2893 ret = closid_alloc(); 2894 if (ret < 0) { 2895 rdt_last_cmd_puts("Out of CLOSIDs\n"); 2896 goto out_common_fail; 2897 } 2898 closid = ret; 2899 ret = 0; 2900 2901 rdtgrp->closid = closid; 2902 ret = rdtgroup_init_alloc(rdtgrp); 2903 if (ret < 0) 2904 goto out_id_free; 2905 2906 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 2907 2908 if (rdt_mon_capable) { 2909 /* 2910 * Create an empty mon_groups directory to hold the subset 2911 * of tasks and cpus to monitor. 2912 */ 2913 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 2914 if (ret) { 2915 rdt_last_cmd_puts("kernfs subdir error\n"); 2916 goto out_del_list; 2917 } 2918 } 2919 2920 goto out_unlock; 2921 2922 out_del_list: 2923 list_del(&rdtgrp->rdtgroup_list); 2924 out_id_free: 2925 closid_free(closid); 2926 out_common_fail: 2927 mkdir_rdt_prepare_clean(rdtgrp); 2928 out_unlock: 2929 rdtgroup_kn_unlock(parent_kn); 2930 return ret; 2931 } 2932 2933 /* 2934 * We allow creating mon groups only with in a directory called "mon_groups" 2935 * which is present in every ctrl_mon group. Check if this is a valid 2936 * "mon_groups" directory. 2937 * 2938 * 1. The directory should be named "mon_groups". 2939 * 2. The mon group itself should "not" be named "mon_groups". 2940 * This makes sure "mon_groups" directory always has a ctrl_mon group 2941 * as parent. 2942 */ 2943 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 2944 { 2945 return (!strcmp(kn->name, "mon_groups") && 2946 strcmp(name, "mon_groups")); 2947 } 2948 2949 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 2950 umode_t mode) 2951 { 2952 /* Do not accept '\n' to avoid unparsable situation. */ 2953 if (strchr(name, '\n')) 2954 return -EINVAL; 2955 2956 /* 2957 * If the parent directory is the root directory and RDT 2958 * allocation is supported, add a control and monitoring 2959 * subdirectory 2960 */ 2961 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) 2962 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 2963 2964 /* 2965 * If RDT monitoring is supported and the parent directory is a valid 2966 * "mon_groups" directory, add a monitoring subdirectory. 2967 */ 2968 if (rdt_mon_capable && is_mon_groups(parent_kn, name)) 2969 return rdtgroup_mkdir_mon(parent_kn, name, mode); 2970 2971 return -EPERM; 2972 } 2973 2974 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 2975 cpumask_var_t tmpmask) 2976 { 2977 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 2978 int cpu; 2979 2980 /* Give any tasks back to the parent group */ 2981 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 2982 2983 /* Update per cpu rmid of the moved CPUs first */ 2984 for_each_cpu(cpu, &rdtgrp->cpu_mask) 2985 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; 2986 /* 2987 * Update the MSR on moved CPUs and CPUs which have moved 2988 * task running on them. 2989 */ 2990 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 2991 update_closid_rmid(tmpmask, NULL); 2992 2993 rdtgrp->flags = RDT_DELETED; 2994 free_rmid(rdtgrp->mon.rmid); 2995 2996 /* 2997 * Remove the rdtgrp from the parent ctrl_mon group's list 2998 */ 2999 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3000 list_del(&rdtgrp->mon.crdtgrp_list); 3001 3002 /* 3003 * one extra hold on this, will drop when we kfree(rdtgrp) 3004 * in rdtgroup_kn_unlock() 3005 */ 3006 kernfs_get(kn); 3007 kernfs_remove(rdtgrp->kn); 3008 3009 return 0; 3010 } 3011 3012 static int rdtgroup_ctrl_remove(struct kernfs_node *kn, 3013 struct rdtgroup *rdtgrp) 3014 { 3015 rdtgrp->flags = RDT_DELETED; 3016 list_del(&rdtgrp->rdtgroup_list); 3017 3018 /* 3019 * one extra hold on this, will drop when we kfree(rdtgrp) 3020 * in rdtgroup_kn_unlock() 3021 */ 3022 kernfs_get(kn); 3023 kernfs_remove(rdtgrp->kn); 3024 return 0; 3025 } 3026 3027 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 3028 cpumask_var_t tmpmask) 3029 { 3030 int cpu; 3031 3032 /* Give any tasks back to the default group */ 3033 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3034 3035 /* Give any CPUs back to the default group */ 3036 cpumask_or(&rdtgroup_default.cpu_mask, 3037 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3038 3039 /* Update per cpu closid and rmid of the moved CPUs first */ 3040 for_each_cpu(cpu, &rdtgrp->cpu_mask) { 3041 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; 3042 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; 3043 } 3044 3045 /* 3046 * Update the MSR on moved CPUs and CPUs which have moved 3047 * task running on them. 3048 */ 3049 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3050 update_closid_rmid(tmpmask, NULL); 3051 3052 closid_free(rdtgrp->closid); 3053 free_rmid(rdtgrp->mon.rmid); 3054 3055 rdtgroup_ctrl_remove(kn, rdtgrp); 3056 3057 /* 3058 * Free all the child monitor group rmids. 3059 */ 3060 free_all_child_rdtgrp(rdtgrp); 3061 3062 return 0; 3063 } 3064 3065 static int rdtgroup_rmdir(struct kernfs_node *kn) 3066 { 3067 struct kernfs_node *parent_kn = kn->parent; 3068 struct rdtgroup *rdtgrp; 3069 cpumask_var_t tmpmask; 3070 int ret = 0; 3071 3072 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 3073 return -ENOMEM; 3074 3075 rdtgrp = rdtgroup_kn_lock_live(kn); 3076 if (!rdtgrp) { 3077 ret = -EPERM; 3078 goto out; 3079 } 3080 3081 /* 3082 * If the rdtgroup is a ctrl_mon group and parent directory 3083 * is the root directory, remove the ctrl_mon group. 3084 * 3085 * If the rdtgroup is a mon group and parent directory 3086 * is a valid "mon_groups" directory, remove the mon group. 3087 */ 3088 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3089 rdtgrp != &rdtgroup_default) { 3090 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3091 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3092 ret = rdtgroup_ctrl_remove(kn, rdtgrp); 3093 } else { 3094 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask); 3095 } 3096 } else if (rdtgrp->type == RDTMON_GROUP && 3097 is_mon_groups(parent_kn, kn->name)) { 3098 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask); 3099 } else { 3100 ret = -EPERM; 3101 } 3102 3103 out: 3104 rdtgroup_kn_unlock(kn); 3105 free_cpumask_var(tmpmask); 3106 return ret; 3107 } 3108 3109 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 3110 { 3111 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 3112 seq_puts(seq, ",cdp"); 3113 3114 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 3115 seq_puts(seq, ",cdpl2"); 3116 3117 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) 3118 seq_puts(seq, ",mba_MBps"); 3119 3120 return 0; 3121 } 3122 3123 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 3124 .mkdir = rdtgroup_mkdir, 3125 .rmdir = rdtgroup_rmdir, 3126 .show_options = rdtgroup_show_options, 3127 }; 3128 3129 static int __init rdtgroup_setup_root(void) 3130 { 3131 int ret; 3132 3133 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 3134 KERNFS_ROOT_CREATE_DEACTIVATED | 3135 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 3136 &rdtgroup_default); 3137 if (IS_ERR(rdt_root)) 3138 return PTR_ERR(rdt_root); 3139 3140 mutex_lock(&rdtgroup_mutex); 3141 3142 rdtgroup_default.closid = 0; 3143 rdtgroup_default.mon.rmid = 0; 3144 rdtgroup_default.type = RDTCTRL_GROUP; 3145 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 3146 3147 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 3148 3149 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE); 3150 if (ret) { 3151 kernfs_destroy_root(rdt_root); 3152 goto out; 3153 } 3154 3155 rdtgroup_default.kn = rdt_root->kn; 3156 kernfs_activate(rdtgroup_default.kn); 3157 3158 out: 3159 mutex_unlock(&rdtgroup_mutex); 3160 3161 return ret; 3162 } 3163 3164 /* 3165 * rdtgroup_init - rdtgroup initialization 3166 * 3167 * Setup resctrl file system including set up root, create mount point, 3168 * register rdtgroup filesystem, and initialize files under root directory. 3169 * 3170 * Return: 0 on success or -errno 3171 */ 3172 int __init rdtgroup_init(void) 3173 { 3174 int ret = 0; 3175 3176 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 3177 sizeof(last_cmd_status_buf)); 3178 3179 ret = rdtgroup_setup_root(); 3180 if (ret) 3181 return ret; 3182 3183 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 3184 if (ret) 3185 goto cleanup_root; 3186 3187 ret = register_filesystem(&rdt_fs_type); 3188 if (ret) 3189 goto cleanup_mountpoint; 3190 3191 /* 3192 * Adding the resctrl debugfs directory here may not be ideal since 3193 * it would let the resctrl debugfs directory appear on the debugfs 3194 * filesystem before the resctrl filesystem is mounted. 3195 * It may also be ok since that would enable debugging of RDT before 3196 * resctrl is mounted. 3197 * The reason why the debugfs directory is created here and not in 3198 * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and 3199 * during the debugfs directory creation also &sb->s_type->i_mutex_key 3200 * (the lockdep class of inode->i_rwsem). Other filesystem 3201 * interactions (eg. SyS_getdents) have the lock ordering: 3202 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 3203 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 3204 * is taken, thus creating dependency: 3205 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 3206 * issues considering the other two lock dependencies. 3207 * By creating the debugfs directory here we avoid a dependency 3208 * that may cause deadlock (even though file operations cannot 3209 * occur until the filesystem is mounted, but I do not know how to 3210 * tell lockdep that). 3211 */ 3212 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 3213 3214 return 0; 3215 3216 cleanup_mountpoint: 3217 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3218 cleanup_root: 3219 kernfs_destroy_root(rdt_root); 3220 3221 return ret; 3222 } 3223 3224 void __exit rdtgroup_exit(void) 3225 { 3226 debugfs_remove_recursive(debugfs_resctrl); 3227 unregister_filesystem(&rdt_fs_type); 3228 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3229 kernfs_destroy_root(rdt_root); 3230 } 3231