1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Alloction in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cacheinfo.h> 16 #include <linux/cpu.h> 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/fs_parser.h> 20 #include <linux/sysfs.h> 21 #include <linux/kernfs.h> 22 #include <linux/seq_buf.h> 23 #include <linux/seq_file.h> 24 #include <linux/sched/signal.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/task_work.h> 28 #include <linux/user_namespace.h> 29 30 #include <uapi/linux/magic.h> 31 32 #include <asm/resctrl.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 static struct kernfs_root *rdt_root; 39 struct rdtgroup rdtgroup_default; 40 LIST_HEAD(rdt_all_groups); 41 42 /* Kernel fs node for "info" directory under root */ 43 static struct kernfs_node *kn_info; 44 45 /* Kernel fs node for "mon_groups" directory under root */ 46 static struct kernfs_node *kn_mongrp; 47 48 /* Kernel fs node for "mon_data" directory under root */ 49 static struct kernfs_node *kn_mondata; 50 51 static struct seq_buf last_cmd_status; 52 static char last_cmd_status_buf[512]; 53 54 struct dentry *debugfs_resctrl; 55 56 void rdt_last_cmd_clear(void) 57 { 58 lockdep_assert_held(&rdtgroup_mutex); 59 seq_buf_clear(&last_cmd_status); 60 } 61 62 void rdt_last_cmd_puts(const char *s) 63 { 64 lockdep_assert_held(&rdtgroup_mutex); 65 seq_buf_puts(&last_cmd_status, s); 66 } 67 68 void rdt_last_cmd_printf(const char *fmt, ...) 69 { 70 va_list ap; 71 72 va_start(ap, fmt); 73 lockdep_assert_held(&rdtgroup_mutex); 74 seq_buf_vprintf(&last_cmd_status, fmt, ap); 75 va_end(ap); 76 } 77 78 /* 79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 80 * we can keep a bitmap of free CLOSIDs in a single integer. 81 * 82 * Using a global CLOSID across all resources has some advantages and 83 * some drawbacks: 84 * + We can simply set "current->closid" to assign a task to a resource 85 * group. 86 * + Context switch code can avoid extra memory references deciding which 87 * CLOSID to load into the PQR_ASSOC MSR 88 * - We give up some options in configuring resource groups across multi-socket 89 * systems. 90 * - Our choices on how to configure each resource become progressively more 91 * limited as the number of resources grows. 92 */ 93 static int closid_free_map; 94 static int closid_free_map_len; 95 96 int closids_supported(void) 97 { 98 return closid_free_map_len; 99 } 100 101 static void closid_init(void) 102 { 103 struct rdt_resource *r; 104 int rdt_min_closid = 32; 105 106 /* Compute rdt_min_closid across all resources */ 107 for_each_alloc_enabled_rdt_resource(r) 108 rdt_min_closid = min(rdt_min_closid, r->num_closid); 109 110 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 111 112 /* CLOSID 0 is always reserved for the default group */ 113 closid_free_map &= ~1; 114 closid_free_map_len = rdt_min_closid; 115 } 116 117 static int closid_alloc(void) 118 { 119 u32 closid = ffs(closid_free_map); 120 121 if (closid == 0) 122 return -ENOSPC; 123 closid--; 124 closid_free_map &= ~(1 << closid); 125 126 return closid; 127 } 128 129 void closid_free(int closid) 130 { 131 closid_free_map |= 1 << closid; 132 } 133 134 /** 135 * closid_allocated - test if provided closid is in use 136 * @closid: closid to be tested 137 * 138 * Return: true if @closid is currently associated with a resource group, 139 * false if @closid is free 140 */ 141 static bool closid_allocated(unsigned int closid) 142 { 143 return (closid_free_map & (1 << closid)) == 0; 144 } 145 146 /** 147 * rdtgroup_mode_by_closid - Return mode of resource group with closid 148 * @closid: closid if the resource group 149 * 150 * Each resource group is associated with a @closid. Here the mode 151 * of a resource group can be queried by searching for it using its closid. 152 * 153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 154 */ 155 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 156 { 157 struct rdtgroup *rdtgrp; 158 159 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 160 if (rdtgrp->closid == closid) 161 return rdtgrp->mode; 162 } 163 164 return RDT_NUM_MODES; 165 } 166 167 static const char * const rdt_mode_str[] = { 168 [RDT_MODE_SHAREABLE] = "shareable", 169 [RDT_MODE_EXCLUSIVE] = "exclusive", 170 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 171 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 172 }; 173 174 /** 175 * rdtgroup_mode_str - Return the string representation of mode 176 * @mode: the resource group mode as &enum rdtgroup_mode 177 * 178 * Return: string representation of valid mode, "unknown" otherwise 179 */ 180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 181 { 182 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 183 return "unknown"; 184 185 return rdt_mode_str[mode]; 186 } 187 188 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 189 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 190 { 191 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 192 .ia_uid = current_fsuid(), 193 .ia_gid = current_fsgid(), }; 194 195 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 196 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 197 return 0; 198 199 return kernfs_setattr(kn, &iattr); 200 } 201 202 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 203 { 204 struct kernfs_node *kn; 205 int ret; 206 207 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 208 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 209 0, rft->kf_ops, rft, NULL, NULL); 210 if (IS_ERR(kn)) 211 return PTR_ERR(kn); 212 213 ret = rdtgroup_kn_set_ugid(kn); 214 if (ret) { 215 kernfs_remove(kn); 216 return ret; 217 } 218 219 return 0; 220 } 221 222 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 223 { 224 struct kernfs_open_file *of = m->private; 225 struct rftype *rft = of->kn->priv; 226 227 if (rft->seq_show) 228 return rft->seq_show(of, m, arg); 229 return 0; 230 } 231 232 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 233 size_t nbytes, loff_t off) 234 { 235 struct rftype *rft = of->kn->priv; 236 237 if (rft->write) 238 return rft->write(of, buf, nbytes, off); 239 240 return -EINVAL; 241 } 242 243 static struct kernfs_ops rdtgroup_kf_single_ops = { 244 .atomic_write_len = PAGE_SIZE, 245 .write = rdtgroup_file_write, 246 .seq_show = rdtgroup_seqfile_show, 247 }; 248 249 static struct kernfs_ops kf_mondata_ops = { 250 .atomic_write_len = PAGE_SIZE, 251 .seq_show = rdtgroup_mondata_show, 252 }; 253 254 static bool is_cpu_list(struct kernfs_open_file *of) 255 { 256 struct rftype *rft = of->kn->priv; 257 258 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 259 } 260 261 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 262 struct seq_file *s, void *v) 263 { 264 struct rdtgroup *rdtgrp; 265 struct cpumask *mask; 266 int ret = 0; 267 268 rdtgrp = rdtgroup_kn_lock_live(of->kn); 269 270 if (rdtgrp) { 271 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 272 if (!rdtgrp->plr->d) { 273 rdt_last_cmd_clear(); 274 rdt_last_cmd_puts("Cache domain offline\n"); 275 ret = -ENODEV; 276 } else { 277 mask = &rdtgrp->plr->d->cpu_mask; 278 seq_printf(s, is_cpu_list(of) ? 279 "%*pbl\n" : "%*pb\n", 280 cpumask_pr_args(mask)); 281 } 282 } else { 283 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 284 cpumask_pr_args(&rdtgrp->cpu_mask)); 285 } 286 } else { 287 ret = -ENOENT; 288 } 289 rdtgroup_kn_unlock(of->kn); 290 291 return ret; 292 } 293 294 /* 295 * This is safe against resctrl_sched_in() called from __switch_to() 296 * because __switch_to() is executed with interrupts disabled. A local call 297 * from update_closid_rmid() is proteced against __switch_to() because 298 * preemption is disabled. 299 */ 300 static void update_cpu_closid_rmid(void *info) 301 { 302 struct rdtgroup *r = info; 303 304 if (r) { 305 this_cpu_write(pqr_state.default_closid, r->closid); 306 this_cpu_write(pqr_state.default_rmid, r->mon.rmid); 307 } 308 309 /* 310 * We cannot unconditionally write the MSR because the current 311 * executing task might have its own closid selected. Just reuse 312 * the context switch code. 313 */ 314 resctrl_sched_in(); 315 } 316 317 /* 318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 319 * 320 * Per task closids/rmids must have been set up before calling this function. 321 */ 322 static void 323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 324 { 325 int cpu = get_cpu(); 326 327 if (cpumask_test_cpu(cpu, cpu_mask)) 328 update_cpu_closid_rmid(r); 329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1); 330 put_cpu(); 331 } 332 333 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 334 cpumask_var_t tmpmask) 335 { 336 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 337 struct list_head *head; 338 339 /* Check whether cpus belong to parent ctrl group */ 340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 341 if (cpumask_weight(tmpmask)) { 342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 343 return -EINVAL; 344 } 345 346 /* Check whether cpus are dropped from this group */ 347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 348 if (cpumask_weight(tmpmask)) { 349 /* Give any dropped cpus to parent rdtgroup */ 350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 351 update_closid_rmid(tmpmask, prgrp); 352 } 353 354 /* 355 * If we added cpus, remove them from previous group that owned them 356 * and update per-cpu rmid 357 */ 358 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 359 if (cpumask_weight(tmpmask)) { 360 head = &prgrp->mon.crdtgrp_list; 361 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 362 if (crgrp == rdtgrp) 363 continue; 364 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 365 tmpmask); 366 } 367 update_closid_rmid(tmpmask, rdtgrp); 368 } 369 370 /* Done pushing/pulling - update this group with new mask */ 371 cpumask_copy(&rdtgrp->cpu_mask, newmask); 372 373 return 0; 374 } 375 376 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 377 { 378 struct rdtgroup *crgrp; 379 380 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 381 /* update the child mon group masks as well*/ 382 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 383 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 384 } 385 386 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 387 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 388 { 389 struct rdtgroup *r, *crgrp; 390 struct list_head *head; 391 392 /* Check whether cpus are dropped from this group */ 393 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 394 if (cpumask_weight(tmpmask)) { 395 /* Can't drop from default group */ 396 if (rdtgrp == &rdtgroup_default) { 397 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 398 return -EINVAL; 399 } 400 401 /* Give any dropped cpus to rdtgroup_default */ 402 cpumask_or(&rdtgroup_default.cpu_mask, 403 &rdtgroup_default.cpu_mask, tmpmask); 404 update_closid_rmid(tmpmask, &rdtgroup_default); 405 } 406 407 /* 408 * If we added cpus, remove them from previous group and 409 * the prev group's child groups that owned them 410 * and update per-cpu closid/rmid. 411 */ 412 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 413 if (cpumask_weight(tmpmask)) { 414 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 415 if (r == rdtgrp) 416 continue; 417 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 418 if (cpumask_weight(tmpmask1)) 419 cpumask_rdtgrp_clear(r, tmpmask1); 420 } 421 update_closid_rmid(tmpmask, rdtgrp); 422 } 423 424 /* Done pushing/pulling - update this group with new mask */ 425 cpumask_copy(&rdtgrp->cpu_mask, newmask); 426 427 /* 428 * Clear child mon group masks since there is a new parent mask 429 * now and update the rmid for the cpus the child lost. 430 */ 431 head = &rdtgrp->mon.crdtgrp_list; 432 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 433 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 434 update_closid_rmid(tmpmask, rdtgrp); 435 cpumask_clear(&crgrp->cpu_mask); 436 } 437 438 return 0; 439 } 440 441 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 442 char *buf, size_t nbytes, loff_t off) 443 { 444 cpumask_var_t tmpmask, newmask, tmpmask1; 445 struct rdtgroup *rdtgrp; 446 int ret; 447 448 if (!buf) 449 return -EINVAL; 450 451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 452 return -ENOMEM; 453 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 454 free_cpumask_var(tmpmask); 455 return -ENOMEM; 456 } 457 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 458 free_cpumask_var(tmpmask); 459 free_cpumask_var(newmask); 460 return -ENOMEM; 461 } 462 463 rdtgrp = rdtgroup_kn_lock_live(of->kn); 464 if (!rdtgrp) { 465 ret = -ENOENT; 466 goto unlock; 467 } 468 469 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 470 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 471 ret = -EINVAL; 472 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 473 goto unlock; 474 } 475 476 if (is_cpu_list(of)) 477 ret = cpulist_parse(buf, newmask); 478 else 479 ret = cpumask_parse(buf, newmask); 480 481 if (ret) { 482 rdt_last_cmd_puts("Bad CPU list/mask\n"); 483 goto unlock; 484 } 485 486 /* check that user didn't specify any offline cpus */ 487 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 488 if (cpumask_weight(tmpmask)) { 489 ret = -EINVAL; 490 rdt_last_cmd_puts("Can only assign online CPUs\n"); 491 goto unlock; 492 } 493 494 if (rdtgrp->type == RDTCTRL_GROUP) 495 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 496 else if (rdtgrp->type == RDTMON_GROUP) 497 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 498 else 499 ret = -EINVAL; 500 501 unlock: 502 rdtgroup_kn_unlock(of->kn); 503 free_cpumask_var(tmpmask); 504 free_cpumask_var(newmask); 505 free_cpumask_var(tmpmask1); 506 507 return ret ?: nbytes; 508 } 509 510 /** 511 * rdtgroup_remove - the helper to remove resource group safely 512 * @rdtgrp: resource group to remove 513 * 514 * On resource group creation via a mkdir, an extra kernfs_node reference is 515 * taken to ensure that the rdtgroup structure remains accessible for the 516 * rdtgroup_kn_unlock() calls where it is removed. 517 * 518 * Drop the extra reference here, then free the rdtgroup structure. 519 * 520 * Return: void 521 */ 522 static void rdtgroup_remove(struct rdtgroup *rdtgrp) 523 { 524 kernfs_put(rdtgrp->kn); 525 kfree(rdtgrp); 526 } 527 528 struct task_move_callback { 529 struct callback_head work; 530 struct rdtgroup *rdtgrp; 531 }; 532 533 static void move_myself(struct callback_head *head) 534 { 535 struct task_move_callback *callback; 536 struct rdtgroup *rdtgrp; 537 538 callback = container_of(head, struct task_move_callback, work); 539 rdtgrp = callback->rdtgrp; 540 541 /* 542 * If resource group was deleted before this task work callback 543 * was invoked, then assign the task to root group and free the 544 * resource group. 545 */ 546 if (atomic_dec_and_test(&rdtgrp->waitcount) && 547 (rdtgrp->flags & RDT_DELETED)) { 548 current->closid = 0; 549 current->rmid = 0; 550 rdtgroup_remove(rdtgrp); 551 } 552 553 if (unlikely(current->flags & PF_EXITING)) 554 goto out; 555 556 preempt_disable(); 557 /* update PQR_ASSOC MSR to make resource group go into effect */ 558 resctrl_sched_in(); 559 preempt_enable(); 560 561 out: 562 kfree(callback); 563 } 564 565 static int __rdtgroup_move_task(struct task_struct *tsk, 566 struct rdtgroup *rdtgrp) 567 { 568 struct task_move_callback *callback; 569 int ret; 570 571 callback = kzalloc(sizeof(*callback), GFP_KERNEL); 572 if (!callback) 573 return -ENOMEM; 574 callback->work.func = move_myself; 575 callback->rdtgrp = rdtgrp; 576 577 /* 578 * Take a refcount, so rdtgrp cannot be freed before the 579 * callback has been invoked. 580 */ 581 atomic_inc(&rdtgrp->waitcount); 582 ret = task_work_add(tsk, &callback->work, TWA_RESUME); 583 if (ret) { 584 /* 585 * Task is exiting. Drop the refcount and free the callback. 586 * No need to check the refcount as the group cannot be 587 * deleted before the write function unlocks rdtgroup_mutex. 588 */ 589 atomic_dec(&rdtgrp->waitcount); 590 kfree(callback); 591 rdt_last_cmd_puts("Task exited\n"); 592 } else { 593 /* 594 * For ctrl_mon groups move both closid and rmid. 595 * For monitor groups, can move the tasks only from 596 * their parent CTRL group. 597 */ 598 if (rdtgrp->type == RDTCTRL_GROUP) { 599 tsk->closid = rdtgrp->closid; 600 tsk->rmid = rdtgrp->mon.rmid; 601 } else if (rdtgrp->type == RDTMON_GROUP) { 602 if (rdtgrp->mon.parent->closid == tsk->closid) { 603 tsk->rmid = rdtgrp->mon.rmid; 604 } else { 605 rdt_last_cmd_puts("Can't move task to different control group\n"); 606 ret = -EINVAL; 607 } 608 } 609 } 610 return ret; 611 } 612 613 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 614 { 615 return (rdt_alloc_capable && 616 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); 617 } 618 619 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 620 { 621 return (rdt_mon_capable && 622 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); 623 } 624 625 /** 626 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 627 * @r: Resource group 628 * 629 * Return: 1 if tasks have been assigned to @r, 0 otherwise 630 */ 631 int rdtgroup_tasks_assigned(struct rdtgroup *r) 632 { 633 struct task_struct *p, *t; 634 int ret = 0; 635 636 lockdep_assert_held(&rdtgroup_mutex); 637 638 rcu_read_lock(); 639 for_each_process_thread(p, t) { 640 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 641 ret = 1; 642 break; 643 } 644 } 645 rcu_read_unlock(); 646 647 return ret; 648 } 649 650 static int rdtgroup_task_write_permission(struct task_struct *task, 651 struct kernfs_open_file *of) 652 { 653 const struct cred *tcred = get_task_cred(task); 654 const struct cred *cred = current_cred(); 655 int ret = 0; 656 657 /* 658 * Even if we're attaching all tasks in the thread group, we only 659 * need to check permissions on one of them. 660 */ 661 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 662 !uid_eq(cred->euid, tcred->uid) && 663 !uid_eq(cred->euid, tcred->suid)) { 664 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 665 ret = -EPERM; 666 } 667 668 put_cred(tcred); 669 return ret; 670 } 671 672 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 673 struct kernfs_open_file *of) 674 { 675 struct task_struct *tsk; 676 int ret; 677 678 rcu_read_lock(); 679 if (pid) { 680 tsk = find_task_by_vpid(pid); 681 if (!tsk) { 682 rcu_read_unlock(); 683 rdt_last_cmd_printf("No task %d\n", pid); 684 return -ESRCH; 685 } 686 } else { 687 tsk = current; 688 } 689 690 get_task_struct(tsk); 691 rcu_read_unlock(); 692 693 ret = rdtgroup_task_write_permission(tsk, of); 694 if (!ret) 695 ret = __rdtgroup_move_task(tsk, rdtgrp); 696 697 put_task_struct(tsk); 698 return ret; 699 } 700 701 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 702 char *buf, size_t nbytes, loff_t off) 703 { 704 struct rdtgroup *rdtgrp; 705 int ret = 0; 706 pid_t pid; 707 708 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 709 return -EINVAL; 710 rdtgrp = rdtgroup_kn_lock_live(of->kn); 711 if (!rdtgrp) { 712 rdtgroup_kn_unlock(of->kn); 713 return -ENOENT; 714 } 715 rdt_last_cmd_clear(); 716 717 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 718 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 719 ret = -EINVAL; 720 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 721 goto unlock; 722 } 723 724 ret = rdtgroup_move_task(pid, rdtgrp, of); 725 726 unlock: 727 rdtgroup_kn_unlock(of->kn); 728 729 return ret ?: nbytes; 730 } 731 732 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 733 { 734 struct task_struct *p, *t; 735 736 rcu_read_lock(); 737 for_each_process_thread(p, t) { 738 if (is_closid_match(t, r) || is_rmid_match(t, r)) 739 seq_printf(s, "%d\n", t->pid); 740 } 741 rcu_read_unlock(); 742 } 743 744 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 745 struct seq_file *s, void *v) 746 { 747 struct rdtgroup *rdtgrp; 748 int ret = 0; 749 750 rdtgrp = rdtgroup_kn_lock_live(of->kn); 751 if (rdtgrp) 752 show_rdt_tasks(rdtgrp, s); 753 else 754 ret = -ENOENT; 755 rdtgroup_kn_unlock(of->kn); 756 757 return ret; 758 } 759 760 #ifdef CONFIG_PROC_CPU_RESCTRL 761 762 /* 763 * A task can only be part of one resctrl control group and of one monitor 764 * group which is associated to that control group. 765 * 766 * 1) res: 767 * mon: 768 * 769 * resctrl is not available. 770 * 771 * 2) res:/ 772 * mon: 773 * 774 * Task is part of the root resctrl control group, and it is not associated 775 * to any monitor group. 776 * 777 * 3) res:/ 778 * mon:mon0 779 * 780 * Task is part of the root resctrl control group and monitor group mon0. 781 * 782 * 4) res:group0 783 * mon: 784 * 785 * Task is part of resctrl control group group0, and it is not associated 786 * to any monitor group. 787 * 788 * 5) res:group0 789 * mon:mon1 790 * 791 * Task is part of resctrl control group group0 and monitor group mon1. 792 */ 793 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 794 struct pid *pid, struct task_struct *tsk) 795 { 796 struct rdtgroup *rdtg; 797 int ret = 0; 798 799 mutex_lock(&rdtgroup_mutex); 800 801 /* Return empty if resctrl has not been mounted. */ 802 if (!static_branch_unlikely(&rdt_enable_key)) { 803 seq_puts(s, "res:\nmon:\n"); 804 goto unlock; 805 } 806 807 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 808 struct rdtgroup *crg; 809 810 /* 811 * Task information is only relevant for shareable 812 * and exclusive groups. 813 */ 814 if (rdtg->mode != RDT_MODE_SHAREABLE && 815 rdtg->mode != RDT_MODE_EXCLUSIVE) 816 continue; 817 818 if (rdtg->closid != tsk->closid) 819 continue; 820 821 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 822 rdtg->kn->name); 823 seq_puts(s, "mon:"); 824 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 825 mon.crdtgrp_list) { 826 if (tsk->rmid != crg->mon.rmid) 827 continue; 828 seq_printf(s, "%s", crg->kn->name); 829 break; 830 } 831 seq_putc(s, '\n'); 832 goto unlock; 833 } 834 /* 835 * The above search should succeed. Otherwise return 836 * with an error. 837 */ 838 ret = -ENOENT; 839 unlock: 840 mutex_unlock(&rdtgroup_mutex); 841 842 return ret; 843 } 844 #endif 845 846 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 847 struct seq_file *seq, void *v) 848 { 849 int len; 850 851 mutex_lock(&rdtgroup_mutex); 852 len = seq_buf_used(&last_cmd_status); 853 if (len) 854 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 855 else 856 seq_puts(seq, "ok\n"); 857 mutex_unlock(&rdtgroup_mutex); 858 return 0; 859 } 860 861 static int rdt_num_closids_show(struct kernfs_open_file *of, 862 struct seq_file *seq, void *v) 863 { 864 struct rdt_resource *r = of->kn->parent->priv; 865 866 seq_printf(seq, "%d\n", r->num_closid); 867 return 0; 868 } 869 870 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 871 struct seq_file *seq, void *v) 872 { 873 struct rdt_resource *r = of->kn->parent->priv; 874 875 seq_printf(seq, "%x\n", r->default_ctrl); 876 return 0; 877 } 878 879 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 880 struct seq_file *seq, void *v) 881 { 882 struct rdt_resource *r = of->kn->parent->priv; 883 884 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 885 return 0; 886 } 887 888 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 889 struct seq_file *seq, void *v) 890 { 891 struct rdt_resource *r = of->kn->parent->priv; 892 893 seq_printf(seq, "%x\n", r->cache.shareable_bits); 894 return 0; 895 } 896 897 /** 898 * rdt_bit_usage_show - Display current usage of resources 899 * 900 * A domain is a shared resource that can now be allocated differently. Here 901 * we display the current regions of the domain as an annotated bitmask. 902 * For each domain of this resource its allocation bitmask 903 * is annotated as below to indicate the current usage of the corresponding bit: 904 * 0 - currently unused 905 * X - currently available for sharing and used by software and hardware 906 * H - currently used by hardware only but available for software use 907 * S - currently used and shareable by software only 908 * E - currently used exclusively by one resource group 909 * P - currently pseudo-locked by one resource group 910 */ 911 static int rdt_bit_usage_show(struct kernfs_open_file *of, 912 struct seq_file *seq, void *v) 913 { 914 struct rdt_resource *r = of->kn->parent->priv; 915 /* 916 * Use unsigned long even though only 32 bits are used to ensure 917 * test_bit() is used safely. 918 */ 919 unsigned long sw_shareable = 0, hw_shareable = 0; 920 unsigned long exclusive = 0, pseudo_locked = 0; 921 struct rdt_domain *dom; 922 int i, hwb, swb, excl, psl; 923 enum rdtgrp_mode mode; 924 bool sep = false; 925 u32 *ctrl; 926 927 mutex_lock(&rdtgroup_mutex); 928 hw_shareable = r->cache.shareable_bits; 929 list_for_each_entry(dom, &r->domains, list) { 930 if (sep) 931 seq_putc(seq, ';'); 932 ctrl = dom->ctrl_val; 933 sw_shareable = 0; 934 exclusive = 0; 935 seq_printf(seq, "%d=", dom->id); 936 for (i = 0; i < closids_supported(); i++, ctrl++) { 937 if (!closid_allocated(i)) 938 continue; 939 mode = rdtgroup_mode_by_closid(i); 940 switch (mode) { 941 case RDT_MODE_SHAREABLE: 942 sw_shareable |= *ctrl; 943 break; 944 case RDT_MODE_EXCLUSIVE: 945 exclusive |= *ctrl; 946 break; 947 case RDT_MODE_PSEUDO_LOCKSETUP: 948 /* 949 * RDT_MODE_PSEUDO_LOCKSETUP is possible 950 * here but not included since the CBM 951 * associated with this CLOSID in this mode 952 * is not initialized and no task or cpu can be 953 * assigned this CLOSID. 954 */ 955 break; 956 case RDT_MODE_PSEUDO_LOCKED: 957 case RDT_NUM_MODES: 958 WARN(1, 959 "invalid mode for closid %d\n", i); 960 break; 961 } 962 } 963 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 964 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 965 hwb = test_bit(i, &hw_shareable); 966 swb = test_bit(i, &sw_shareable); 967 excl = test_bit(i, &exclusive); 968 psl = test_bit(i, &pseudo_locked); 969 if (hwb && swb) 970 seq_putc(seq, 'X'); 971 else if (hwb && !swb) 972 seq_putc(seq, 'H'); 973 else if (!hwb && swb) 974 seq_putc(seq, 'S'); 975 else if (excl) 976 seq_putc(seq, 'E'); 977 else if (psl) 978 seq_putc(seq, 'P'); 979 else /* Unused bits remain */ 980 seq_putc(seq, '0'); 981 } 982 sep = true; 983 } 984 seq_putc(seq, '\n'); 985 mutex_unlock(&rdtgroup_mutex); 986 return 0; 987 } 988 989 static int rdt_min_bw_show(struct kernfs_open_file *of, 990 struct seq_file *seq, void *v) 991 { 992 struct rdt_resource *r = of->kn->parent->priv; 993 994 seq_printf(seq, "%u\n", r->membw.min_bw); 995 return 0; 996 } 997 998 static int rdt_num_rmids_show(struct kernfs_open_file *of, 999 struct seq_file *seq, void *v) 1000 { 1001 struct rdt_resource *r = of->kn->parent->priv; 1002 1003 seq_printf(seq, "%d\n", r->num_rmid); 1004 1005 return 0; 1006 } 1007 1008 static int rdt_mon_features_show(struct kernfs_open_file *of, 1009 struct seq_file *seq, void *v) 1010 { 1011 struct rdt_resource *r = of->kn->parent->priv; 1012 struct mon_evt *mevt; 1013 1014 list_for_each_entry(mevt, &r->evt_list, list) 1015 seq_printf(seq, "%s\n", mevt->name); 1016 1017 return 0; 1018 } 1019 1020 static int rdt_bw_gran_show(struct kernfs_open_file *of, 1021 struct seq_file *seq, void *v) 1022 { 1023 struct rdt_resource *r = of->kn->parent->priv; 1024 1025 seq_printf(seq, "%u\n", r->membw.bw_gran); 1026 return 0; 1027 } 1028 1029 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1030 struct seq_file *seq, void *v) 1031 { 1032 struct rdt_resource *r = of->kn->parent->priv; 1033 1034 seq_printf(seq, "%u\n", r->membw.delay_linear); 1035 return 0; 1036 } 1037 1038 static int max_threshold_occ_show(struct kernfs_open_file *of, 1039 struct seq_file *seq, void *v) 1040 { 1041 struct rdt_resource *r = of->kn->parent->priv; 1042 1043 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); 1044 1045 return 0; 1046 } 1047 1048 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, 1049 struct seq_file *seq, void *v) 1050 { 1051 struct rdt_resource *r = of->kn->parent->priv; 1052 1053 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) 1054 seq_puts(seq, "per-thread\n"); 1055 else 1056 seq_puts(seq, "max\n"); 1057 1058 return 0; 1059 } 1060 1061 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1062 char *buf, size_t nbytes, loff_t off) 1063 { 1064 struct rdt_resource *r = of->kn->parent->priv; 1065 unsigned int bytes; 1066 int ret; 1067 1068 ret = kstrtouint(buf, 0, &bytes); 1069 if (ret) 1070 return ret; 1071 1072 if (bytes > (boot_cpu_data.x86_cache_size * 1024)) 1073 return -EINVAL; 1074 1075 resctrl_cqm_threshold = bytes / r->mon_scale; 1076 1077 return nbytes; 1078 } 1079 1080 /* 1081 * rdtgroup_mode_show - Display mode of this resource group 1082 */ 1083 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1084 struct seq_file *s, void *v) 1085 { 1086 struct rdtgroup *rdtgrp; 1087 1088 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1089 if (!rdtgrp) { 1090 rdtgroup_kn_unlock(of->kn); 1091 return -ENOENT; 1092 } 1093 1094 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1095 1096 rdtgroup_kn_unlock(of->kn); 1097 return 0; 1098 } 1099 1100 /** 1101 * rdt_cdp_peer_get - Retrieve CDP peer if it exists 1102 * @r: RDT resource to which RDT domain @d belongs 1103 * @d: Cache instance for which a CDP peer is requested 1104 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer) 1105 * Used to return the result. 1106 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer) 1107 * Used to return the result. 1108 * 1109 * RDT resources are managed independently and by extension the RDT domains 1110 * (RDT resource instances) are managed independently also. The Code and 1111 * Data Prioritization (CDP) RDT resources, while managed independently, 1112 * could refer to the same underlying hardware. For example, 1113 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache. 1114 * 1115 * When provided with an RDT resource @r and an instance of that RDT 1116 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT 1117 * resource and the exact instance that shares the same hardware. 1118 * 1119 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists. 1120 * If a CDP peer was found, @r_cdp will point to the peer RDT resource 1121 * and @d_cdp will point to the peer RDT domain. 1122 */ 1123 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, 1124 struct rdt_resource **r_cdp, 1125 struct rdt_domain **d_cdp) 1126 { 1127 struct rdt_resource *_r_cdp = NULL; 1128 struct rdt_domain *_d_cdp = NULL; 1129 int ret = 0; 1130 1131 switch (r->rid) { 1132 case RDT_RESOURCE_L3DATA: 1133 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; 1134 break; 1135 case RDT_RESOURCE_L3CODE: 1136 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA]; 1137 break; 1138 case RDT_RESOURCE_L2DATA: 1139 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE]; 1140 break; 1141 case RDT_RESOURCE_L2CODE: 1142 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA]; 1143 break; 1144 default: 1145 ret = -ENOENT; 1146 goto out; 1147 } 1148 1149 /* 1150 * When a new CPU comes online and CDP is enabled then the new 1151 * RDT domains (if any) associated with both CDP RDT resources 1152 * are added in the same CPU online routine while the 1153 * rdtgroup_mutex is held. It should thus not happen for one 1154 * RDT domain to exist and be associated with its RDT CDP 1155 * resource but there is no RDT domain associated with the 1156 * peer RDT CDP resource. Hence the WARN. 1157 */ 1158 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); 1159 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { 1160 _r_cdp = NULL; 1161 _d_cdp = NULL; 1162 ret = -EINVAL; 1163 } 1164 1165 out: 1166 *r_cdp = _r_cdp; 1167 *d_cdp = _d_cdp; 1168 1169 return ret; 1170 } 1171 1172 /** 1173 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1174 * @r: Resource to which domain instance @d belongs. 1175 * @d: The domain instance for which @closid is being tested. 1176 * @cbm: Capacity bitmask being tested. 1177 * @closid: Intended closid for @cbm. 1178 * @exclusive: Only check if overlaps with exclusive resource groups 1179 * 1180 * Checks if provided @cbm intended to be used for @closid on domain 1181 * @d overlaps with any other closids or other hardware usage associated 1182 * with this domain. If @exclusive is true then only overlaps with 1183 * resource groups in exclusive mode will be considered. If @exclusive 1184 * is false then overlaps with any resource group or hardware entities 1185 * will be considered. 1186 * 1187 * @cbm is unsigned long, even if only 32 bits are used, to make the 1188 * bitmap functions work correctly. 1189 * 1190 * Return: false if CBM does not overlap, true if it does. 1191 */ 1192 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1193 unsigned long cbm, int closid, bool exclusive) 1194 { 1195 enum rdtgrp_mode mode; 1196 unsigned long ctrl_b; 1197 u32 *ctrl; 1198 int i; 1199 1200 /* Check for any overlap with regions used by hardware directly */ 1201 if (!exclusive) { 1202 ctrl_b = r->cache.shareable_bits; 1203 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1204 return true; 1205 } 1206 1207 /* Check for overlap with other resource groups */ 1208 ctrl = d->ctrl_val; 1209 for (i = 0; i < closids_supported(); i++, ctrl++) { 1210 ctrl_b = *ctrl; 1211 mode = rdtgroup_mode_by_closid(i); 1212 if (closid_allocated(i) && i != closid && 1213 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1214 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1215 if (exclusive) { 1216 if (mode == RDT_MODE_EXCLUSIVE) 1217 return true; 1218 continue; 1219 } 1220 return true; 1221 } 1222 } 1223 } 1224 1225 return false; 1226 } 1227 1228 /** 1229 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1230 * @r: Resource to which domain instance @d belongs. 1231 * @d: The domain instance for which @closid is being tested. 1232 * @cbm: Capacity bitmask being tested. 1233 * @closid: Intended closid for @cbm. 1234 * @exclusive: Only check if overlaps with exclusive resource groups 1235 * 1236 * Resources that can be allocated using a CBM can use the CBM to control 1237 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1238 * for overlap. Overlap test is not limited to the specific resource for 1239 * which the CBM is intended though - when dealing with CDP resources that 1240 * share the underlying hardware the overlap check should be performed on 1241 * the CDP resource sharing the hardware also. 1242 * 1243 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1244 * overlap test. 1245 * 1246 * Return: true if CBM overlap detected, false if there is no overlap 1247 */ 1248 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1249 unsigned long cbm, int closid, bool exclusive) 1250 { 1251 struct rdt_resource *r_cdp; 1252 struct rdt_domain *d_cdp; 1253 1254 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive)) 1255 return true; 1256 1257 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0) 1258 return false; 1259 1260 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive); 1261 } 1262 1263 /** 1264 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1265 * 1266 * An exclusive resource group implies that there should be no sharing of 1267 * its allocated resources. At the time this group is considered to be 1268 * exclusive this test can determine if its current schemata supports this 1269 * setting by testing for overlap with all other resource groups. 1270 * 1271 * Return: true if resource group can be exclusive, false if there is overlap 1272 * with allocations of other resource groups and thus this resource group 1273 * cannot be exclusive. 1274 */ 1275 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1276 { 1277 int closid = rdtgrp->closid; 1278 struct rdt_resource *r; 1279 bool has_cache = false; 1280 struct rdt_domain *d; 1281 1282 for_each_alloc_enabled_rdt_resource(r) { 1283 if (r->rid == RDT_RESOURCE_MBA) 1284 continue; 1285 has_cache = true; 1286 list_for_each_entry(d, &r->domains, list) { 1287 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], 1288 rdtgrp->closid, false)) { 1289 rdt_last_cmd_puts("Schemata overlaps\n"); 1290 return false; 1291 } 1292 } 1293 } 1294 1295 if (!has_cache) { 1296 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1297 return false; 1298 } 1299 1300 return true; 1301 } 1302 1303 /** 1304 * rdtgroup_mode_write - Modify the resource group's mode 1305 * 1306 */ 1307 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1308 char *buf, size_t nbytes, loff_t off) 1309 { 1310 struct rdtgroup *rdtgrp; 1311 enum rdtgrp_mode mode; 1312 int ret = 0; 1313 1314 /* Valid input requires a trailing newline */ 1315 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1316 return -EINVAL; 1317 buf[nbytes - 1] = '\0'; 1318 1319 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1320 if (!rdtgrp) { 1321 rdtgroup_kn_unlock(of->kn); 1322 return -ENOENT; 1323 } 1324 1325 rdt_last_cmd_clear(); 1326 1327 mode = rdtgrp->mode; 1328 1329 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1330 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1331 (!strcmp(buf, "pseudo-locksetup") && 1332 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1333 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1334 goto out; 1335 1336 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1337 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1338 ret = -EINVAL; 1339 goto out; 1340 } 1341 1342 if (!strcmp(buf, "shareable")) { 1343 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1344 ret = rdtgroup_locksetup_exit(rdtgrp); 1345 if (ret) 1346 goto out; 1347 } 1348 rdtgrp->mode = RDT_MODE_SHAREABLE; 1349 } else if (!strcmp(buf, "exclusive")) { 1350 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1351 ret = -EINVAL; 1352 goto out; 1353 } 1354 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1355 ret = rdtgroup_locksetup_exit(rdtgrp); 1356 if (ret) 1357 goto out; 1358 } 1359 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1360 } else if (!strcmp(buf, "pseudo-locksetup")) { 1361 ret = rdtgroup_locksetup_enter(rdtgrp); 1362 if (ret) 1363 goto out; 1364 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1365 } else { 1366 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1367 ret = -EINVAL; 1368 } 1369 1370 out: 1371 rdtgroup_kn_unlock(of->kn); 1372 return ret ?: nbytes; 1373 } 1374 1375 /** 1376 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1377 * @r: RDT resource to which @d belongs. 1378 * @d: RDT domain instance. 1379 * @cbm: bitmask for which the size should be computed. 1380 * 1381 * The bitmask provided associated with the RDT domain instance @d will be 1382 * translated into how many bytes it represents. The size in bytes is 1383 * computed by first dividing the total cache size by the CBM length to 1384 * determine how many bytes each bit in the bitmask represents. The result 1385 * is multiplied with the number of bits set in the bitmask. 1386 * 1387 * @cbm is unsigned long, even if only 32 bits are used to make the 1388 * bitmap functions work correctly. 1389 */ 1390 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1391 struct rdt_domain *d, unsigned long cbm) 1392 { 1393 struct cpu_cacheinfo *ci; 1394 unsigned int size = 0; 1395 int num_b, i; 1396 1397 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1398 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1399 for (i = 0; i < ci->num_leaves; i++) { 1400 if (ci->info_list[i].level == r->cache_level) { 1401 size = ci->info_list[i].size / r->cache.cbm_len * num_b; 1402 break; 1403 } 1404 } 1405 1406 return size; 1407 } 1408 1409 /** 1410 * rdtgroup_size_show - Display size in bytes of allocated regions 1411 * 1412 * The "size" file mirrors the layout of the "schemata" file, printing the 1413 * size in bytes of each region instead of the capacity bitmask. 1414 * 1415 */ 1416 static int rdtgroup_size_show(struct kernfs_open_file *of, 1417 struct seq_file *s, void *v) 1418 { 1419 struct rdtgroup *rdtgrp; 1420 struct rdt_resource *r; 1421 struct rdt_domain *d; 1422 unsigned int size; 1423 int ret = 0; 1424 bool sep; 1425 u32 ctrl; 1426 1427 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1428 if (!rdtgrp) { 1429 rdtgroup_kn_unlock(of->kn); 1430 return -ENOENT; 1431 } 1432 1433 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1434 if (!rdtgrp->plr->d) { 1435 rdt_last_cmd_clear(); 1436 rdt_last_cmd_puts("Cache domain offline\n"); 1437 ret = -ENODEV; 1438 } else { 1439 seq_printf(s, "%*s:", max_name_width, 1440 rdtgrp->plr->r->name); 1441 size = rdtgroup_cbm_to_size(rdtgrp->plr->r, 1442 rdtgrp->plr->d, 1443 rdtgrp->plr->cbm); 1444 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); 1445 } 1446 goto out; 1447 } 1448 1449 for_each_alloc_enabled_rdt_resource(r) { 1450 sep = false; 1451 seq_printf(s, "%*s:", max_name_width, r->name); 1452 list_for_each_entry(d, &r->domains, list) { 1453 if (sep) 1454 seq_putc(s, ';'); 1455 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1456 size = 0; 1457 } else { 1458 ctrl = (!is_mba_sc(r) ? 1459 d->ctrl_val[rdtgrp->closid] : 1460 d->mbps_val[rdtgrp->closid]); 1461 if (r->rid == RDT_RESOURCE_MBA) 1462 size = ctrl; 1463 else 1464 size = rdtgroup_cbm_to_size(r, d, ctrl); 1465 } 1466 seq_printf(s, "%d=%u", d->id, size); 1467 sep = true; 1468 } 1469 seq_putc(s, '\n'); 1470 } 1471 1472 out: 1473 rdtgroup_kn_unlock(of->kn); 1474 1475 return ret; 1476 } 1477 1478 /* rdtgroup information files for one cache resource. */ 1479 static struct rftype res_common_files[] = { 1480 { 1481 .name = "last_cmd_status", 1482 .mode = 0444, 1483 .kf_ops = &rdtgroup_kf_single_ops, 1484 .seq_show = rdt_last_cmd_status_show, 1485 .fflags = RF_TOP_INFO, 1486 }, 1487 { 1488 .name = "num_closids", 1489 .mode = 0444, 1490 .kf_ops = &rdtgroup_kf_single_ops, 1491 .seq_show = rdt_num_closids_show, 1492 .fflags = RF_CTRL_INFO, 1493 }, 1494 { 1495 .name = "mon_features", 1496 .mode = 0444, 1497 .kf_ops = &rdtgroup_kf_single_ops, 1498 .seq_show = rdt_mon_features_show, 1499 .fflags = RF_MON_INFO, 1500 }, 1501 { 1502 .name = "num_rmids", 1503 .mode = 0444, 1504 .kf_ops = &rdtgroup_kf_single_ops, 1505 .seq_show = rdt_num_rmids_show, 1506 .fflags = RF_MON_INFO, 1507 }, 1508 { 1509 .name = "cbm_mask", 1510 .mode = 0444, 1511 .kf_ops = &rdtgroup_kf_single_ops, 1512 .seq_show = rdt_default_ctrl_show, 1513 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1514 }, 1515 { 1516 .name = "min_cbm_bits", 1517 .mode = 0444, 1518 .kf_ops = &rdtgroup_kf_single_ops, 1519 .seq_show = rdt_min_cbm_bits_show, 1520 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1521 }, 1522 { 1523 .name = "shareable_bits", 1524 .mode = 0444, 1525 .kf_ops = &rdtgroup_kf_single_ops, 1526 .seq_show = rdt_shareable_bits_show, 1527 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1528 }, 1529 { 1530 .name = "bit_usage", 1531 .mode = 0444, 1532 .kf_ops = &rdtgroup_kf_single_ops, 1533 .seq_show = rdt_bit_usage_show, 1534 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1535 }, 1536 { 1537 .name = "min_bandwidth", 1538 .mode = 0444, 1539 .kf_ops = &rdtgroup_kf_single_ops, 1540 .seq_show = rdt_min_bw_show, 1541 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1542 }, 1543 { 1544 .name = "bandwidth_gran", 1545 .mode = 0444, 1546 .kf_ops = &rdtgroup_kf_single_ops, 1547 .seq_show = rdt_bw_gran_show, 1548 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1549 }, 1550 { 1551 .name = "delay_linear", 1552 .mode = 0444, 1553 .kf_ops = &rdtgroup_kf_single_ops, 1554 .seq_show = rdt_delay_linear_show, 1555 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1556 }, 1557 /* 1558 * Platform specific which (if any) capabilities are provided by 1559 * thread_throttle_mode. Defer "fflags" initialization to platform 1560 * discovery. 1561 */ 1562 { 1563 .name = "thread_throttle_mode", 1564 .mode = 0444, 1565 .kf_ops = &rdtgroup_kf_single_ops, 1566 .seq_show = rdt_thread_throttle_mode_show, 1567 }, 1568 { 1569 .name = "max_threshold_occupancy", 1570 .mode = 0644, 1571 .kf_ops = &rdtgroup_kf_single_ops, 1572 .write = max_threshold_occ_write, 1573 .seq_show = max_threshold_occ_show, 1574 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, 1575 }, 1576 { 1577 .name = "cpus", 1578 .mode = 0644, 1579 .kf_ops = &rdtgroup_kf_single_ops, 1580 .write = rdtgroup_cpus_write, 1581 .seq_show = rdtgroup_cpus_show, 1582 .fflags = RFTYPE_BASE, 1583 }, 1584 { 1585 .name = "cpus_list", 1586 .mode = 0644, 1587 .kf_ops = &rdtgroup_kf_single_ops, 1588 .write = rdtgroup_cpus_write, 1589 .seq_show = rdtgroup_cpus_show, 1590 .flags = RFTYPE_FLAGS_CPUS_LIST, 1591 .fflags = RFTYPE_BASE, 1592 }, 1593 { 1594 .name = "tasks", 1595 .mode = 0644, 1596 .kf_ops = &rdtgroup_kf_single_ops, 1597 .write = rdtgroup_tasks_write, 1598 .seq_show = rdtgroup_tasks_show, 1599 .fflags = RFTYPE_BASE, 1600 }, 1601 { 1602 .name = "schemata", 1603 .mode = 0644, 1604 .kf_ops = &rdtgroup_kf_single_ops, 1605 .write = rdtgroup_schemata_write, 1606 .seq_show = rdtgroup_schemata_show, 1607 .fflags = RF_CTRL_BASE, 1608 }, 1609 { 1610 .name = "mode", 1611 .mode = 0644, 1612 .kf_ops = &rdtgroup_kf_single_ops, 1613 .write = rdtgroup_mode_write, 1614 .seq_show = rdtgroup_mode_show, 1615 .fflags = RF_CTRL_BASE, 1616 }, 1617 { 1618 .name = "size", 1619 .mode = 0444, 1620 .kf_ops = &rdtgroup_kf_single_ops, 1621 .seq_show = rdtgroup_size_show, 1622 .fflags = RF_CTRL_BASE, 1623 }, 1624 1625 }; 1626 1627 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 1628 { 1629 struct rftype *rfts, *rft; 1630 int ret, len; 1631 1632 rfts = res_common_files; 1633 len = ARRAY_SIZE(res_common_files); 1634 1635 lockdep_assert_held(&rdtgroup_mutex); 1636 1637 for (rft = rfts; rft < rfts + len; rft++) { 1638 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { 1639 ret = rdtgroup_add_file(kn, rft); 1640 if (ret) 1641 goto error; 1642 } 1643 } 1644 1645 return 0; 1646 error: 1647 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 1648 while (--rft >= rfts) { 1649 if ((fflags & rft->fflags) == rft->fflags) 1650 kernfs_remove_by_name(kn, rft->name); 1651 } 1652 return ret; 1653 } 1654 1655 static struct rftype *rdtgroup_get_rftype_by_name(const char *name) 1656 { 1657 struct rftype *rfts, *rft; 1658 int len; 1659 1660 rfts = res_common_files; 1661 len = ARRAY_SIZE(res_common_files); 1662 1663 for (rft = rfts; rft < rfts + len; rft++) { 1664 if (!strcmp(rft->name, name)) 1665 return rft; 1666 } 1667 1668 return NULL; 1669 } 1670 1671 void __init thread_throttle_mode_init(void) 1672 { 1673 struct rftype *rft; 1674 1675 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); 1676 if (!rft) 1677 return; 1678 1679 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; 1680 } 1681 1682 /** 1683 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 1684 * @r: The resource group with which the file is associated. 1685 * @name: Name of the file 1686 * 1687 * The permissions of named resctrl file, directory, or link are modified 1688 * to not allow read, write, or execute by any user. 1689 * 1690 * WARNING: This function is intended to communicate to the user that the 1691 * resctrl file has been locked down - that it is not relevant to the 1692 * particular state the system finds itself in. It should not be relied 1693 * on to protect from user access because after the file's permissions 1694 * are restricted the user can still change the permissions using chmod 1695 * from the command line. 1696 * 1697 * Return: 0 on success, <0 on failure. 1698 */ 1699 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 1700 { 1701 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1702 struct kernfs_node *kn; 1703 int ret = 0; 1704 1705 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1706 if (!kn) 1707 return -ENOENT; 1708 1709 switch (kernfs_type(kn)) { 1710 case KERNFS_DIR: 1711 iattr.ia_mode = S_IFDIR; 1712 break; 1713 case KERNFS_FILE: 1714 iattr.ia_mode = S_IFREG; 1715 break; 1716 case KERNFS_LINK: 1717 iattr.ia_mode = S_IFLNK; 1718 break; 1719 } 1720 1721 ret = kernfs_setattr(kn, &iattr); 1722 kernfs_put(kn); 1723 return ret; 1724 } 1725 1726 /** 1727 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 1728 * @r: The resource group with which the file is associated. 1729 * @name: Name of the file 1730 * @mask: Mask of permissions that should be restored 1731 * 1732 * Restore the permissions of the named file. If @name is a directory the 1733 * permissions of its parent will be used. 1734 * 1735 * Return: 0 on success, <0 on failure. 1736 */ 1737 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 1738 umode_t mask) 1739 { 1740 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1741 struct kernfs_node *kn, *parent; 1742 struct rftype *rfts, *rft; 1743 int ret, len; 1744 1745 rfts = res_common_files; 1746 len = ARRAY_SIZE(res_common_files); 1747 1748 for (rft = rfts; rft < rfts + len; rft++) { 1749 if (!strcmp(rft->name, name)) 1750 iattr.ia_mode = rft->mode & mask; 1751 } 1752 1753 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1754 if (!kn) 1755 return -ENOENT; 1756 1757 switch (kernfs_type(kn)) { 1758 case KERNFS_DIR: 1759 parent = kernfs_get_parent(kn); 1760 if (parent) { 1761 iattr.ia_mode |= parent->mode; 1762 kernfs_put(parent); 1763 } 1764 iattr.ia_mode |= S_IFDIR; 1765 break; 1766 case KERNFS_FILE: 1767 iattr.ia_mode |= S_IFREG; 1768 break; 1769 case KERNFS_LINK: 1770 iattr.ia_mode |= S_IFLNK; 1771 break; 1772 } 1773 1774 ret = kernfs_setattr(kn, &iattr); 1775 kernfs_put(kn); 1776 return ret; 1777 } 1778 1779 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name, 1780 unsigned long fflags) 1781 { 1782 struct kernfs_node *kn_subdir; 1783 int ret; 1784 1785 kn_subdir = kernfs_create_dir(kn_info, name, 1786 kn_info->mode, r); 1787 if (IS_ERR(kn_subdir)) 1788 return PTR_ERR(kn_subdir); 1789 1790 ret = rdtgroup_kn_set_ugid(kn_subdir); 1791 if (ret) 1792 return ret; 1793 1794 ret = rdtgroup_add_files(kn_subdir, fflags); 1795 if (!ret) 1796 kernfs_activate(kn_subdir); 1797 1798 return ret; 1799 } 1800 1801 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 1802 { 1803 struct rdt_resource *r; 1804 unsigned long fflags; 1805 char name[32]; 1806 int ret; 1807 1808 /* create the directory */ 1809 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 1810 if (IS_ERR(kn_info)) 1811 return PTR_ERR(kn_info); 1812 1813 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); 1814 if (ret) 1815 goto out_destroy; 1816 1817 for_each_alloc_enabled_rdt_resource(r) { 1818 fflags = r->fflags | RF_CTRL_INFO; 1819 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags); 1820 if (ret) 1821 goto out_destroy; 1822 } 1823 1824 for_each_mon_enabled_rdt_resource(r) { 1825 fflags = r->fflags | RF_MON_INFO; 1826 sprintf(name, "%s_MON", r->name); 1827 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 1828 if (ret) 1829 goto out_destroy; 1830 } 1831 1832 ret = rdtgroup_kn_set_ugid(kn_info); 1833 if (ret) 1834 goto out_destroy; 1835 1836 kernfs_activate(kn_info); 1837 1838 return 0; 1839 1840 out_destroy: 1841 kernfs_remove(kn_info); 1842 return ret; 1843 } 1844 1845 static int 1846 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 1847 char *name, struct kernfs_node **dest_kn) 1848 { 1849 struct kernfs_node *kn; 1850 int ret; 1851 1852 /* create the directory */ 1853 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 1854 if (IS_ERR(kn)) 1855 return PTR_ERR(kn); 1856 1857 if (dest_kn) 1858 *dest_kn = kn; 1859 1860 ret = rdtgroup_kn_set_ugid(kn); 1861 if (ret) 1862 goto out_destroy; 1863 1864 kernfs_activate(kn); 1865 1866 return 0; 1867 1868 out_destroy: 1869 kernfs_remove(kn); 1870 return ret; 1871 } 1872 1873 static void l3_qos_cfg_update(void *arg) 1874 { 1875 bool *enable = arg; 1876 1877 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 1878 } 1879 1880 static void l2_qos_cfg_update(void *arg) 1881 { 1882 bool *enable = arg; 1883 1884 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 1885 } 1886 1887 static inline bool is_mba_linear(void) 1888 { 1889 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; 1890 } 1891 1892 static int set_cache_qos_cfg(int level, bool enable) 1893 { 1894 void (*update)(void *arg); 1895 struct rdt_resource *r_l; 1896 cpumask_var_t cpu_mask; 1897 struct rdt_domain *d; 1898 int cpu; 1899 1900 if (level == RDT_RESOURCE_L3) 1901 update = l3_qos_cfg_update; 1902 else if (level == RDT_RESOURCE_L2) 1903 update = l2_qos_cfg_update; 1904 else 1905 return -EINVAL; 1906 1907 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 1908 return -ENOMEM; 1909 1910 r_l = &rdt_resources_all[level]; 1911 list_for_each_entry(d, &r_l->domains, list) { 1912 /* Pick one CPU from each domain instance to update MSR */ 1913 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1914 } 1915 cpu = get_cpu(); 1916 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ 1917 if (cpumask_test_cpu(cpu, cpu_mask)) 1918 update(&enable); 1919 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ 1920 smp_call_function_many(cpu_mask, update, &enable, 1); 1921 put_cpu(); 1922 1923 free_cpumask_var(cpu_mask); 1924 1925 return 0; 1926 } 1927 1928 /* Restore the qos cfg state when a domain comes online */ 1929 void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 1930 { 1931 if (!r->alloc_capable) 1932 return; 1933 1934 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) 1935 l2_qos_cfg_update(&r->alloc_enabled); 1936 1937 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) 1938 l3_qos_cfg_update(&r->alloc_enabled); 1939 } 1940 1941 /* 1942 * Enable or disable the MBA software controller 1943 * which helps user specify bandwidth in MBps. 1944 * MBA software controller is supported only if 1945 * MBM is supported and MBA is in linear scale. 1946 */ 1947 static int set_mba_sc(bool mba_sc) 1948 { 1949 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; 1950 struct rdt_domain *d; 1951 1952 if (!is_mbm_enabled() || !is_mba_linear() || 1953 mba_sc == is_mba_sc(r)) 1954 return -EINVAL; 1955 1956 r->membw.mba_sc = mba_sc; 1957 list_for_each_entry(d, &r->domains, list) 1958 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val); 1959 1960 return 0; 1961 } 1962 1963 static int cdp_enable(int level, int data_type, int code_type) 1964 { 1965 struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; 1966 struct rdt_resource *r_lcode = &rdt_resources_all[code_type]; 1967 struct rdt_resource *r_l = &rdt_resources_all[level]; 1968 int ret; 1969 1970 if (!r_l->alloc_capable || !r_ldata->alloc_capable || 1971 !r_lcode->alloc_capable) 1972 return -EINVAL; 1973 1974 ret = set_cache_qos_cfg(level, true); 1975 if (!ret) { 1976 r_l->alloc_enabled = false; 1977 r_ldata->alloc_enabled = true; 1978 r_lcode->alloc_enabled = true; 1979 } 1980 return ret; 1981 } 1982 1983 static int cdpl3_enable(void) 1984 { 1985 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, 1986 RDT_RESOURCE_L3CODE); 1987 } 1988 1989 static int cdpl2_enable(void) 1990 { 1991 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, 1992 RDT_RESOURCE_L2CODE); 1993 } 1994 1995 static void cdp_disable(int level, int data_type, int code_type) 1996 { 1997 struct rdt_resource *r = &rdt_resources_all[level]; 1998 1999 r->alloc_enabled = r->alloc_capable; 2000 2001 if (rdt_resources_all[data_type].alloc_enabled) { 2002 rdt_resources_all[data_type].alloc_enabled = false; 2003 rdt_resources_all[code_type].alloc_enabled = false; 2004 set_cache_qos_cfg(level, false); 2005 } 2006 } 2007 2008 static void cdpl3_disable(void) 2009 { 2010 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE); 2011 } 2012 2013 static void cdpl2_disable(void) 2014 { 2015 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE); 2016 } 2017 2018 static void cdp_disable_all(void) 2019 { 2020 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 2021 cdpl3_disable(); 2022 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 2023 cdpl2_disable(); 2024 } 2025 2026 /* 2027 * We don't allow rdtgroup directories to be created anywhere 2028 * except the root directory. Thus when looking for the rdtgroup 2029 * structure for a kernfs node we are either looking at a directory, 2030 * in which case the rdtgroup structure is pointed at by the "priv" 2031 * field, otherwise we have a file, and need only look to the parent 2032 * to find the rdtgroup. 2033 */ 2034 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 2035 { 2036 if (kernfs_type(kn) == KERNFS_DIR) { 2037 /* 2038 * All the resource directories use "kn->priv" 2039 * to point to the "struct rdtgroup" for the 2040 * resource. "info" and its subdirectories don't 2041 * have rdtgroup structures, so return NULL here. 2042 */ 2043 if (kn == kn_info || kn->parent == kn_info) 2044 return NULL; 2045 else 2046 return kn->priv; 2047 } else { 2048 return kn->parent->priv; 2049 } 2050 } 2051 2052 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 2053 { 2054 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2055 2056 if (!rdtgrp) 2057 return NULL; 2058 2059 atomic_inc(&rdtgrp->waitcount); 2060 kernfs_break_active_protection(kn); 2061 2062 mutex_lock(&rdtgroup_mutex); 2063 2064 /* Was this group deleted while we waited? */ 2065 if (rdtgrp->flags & RDT_DELETED) 2066 return NULL; 2067 2068 return rdtgrp; 2069 } 2070 2071 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2072 { 2073 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2074 2075 if (!rdtgrp) 2076 return; 2077 2078 mutex_unlock(&rdtgroup_mutex); 2079 2080 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2081 (rdtgrp->flags & RDT_DELETED)) { 2082 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2083 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2084 rdtgroup_pseudo_lock_remove(rdtgrp); 2085 kernfs_unbreak_active_protection(kn); 2086 rdtgroup_remove(rdtgrp); 2087 } else { 2088 kernfs_unbreak_active_protection(kn); 2089 } 2090 } 2091 2092 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2093 struct rdtgroup *prgrp, 2094 struct kernfs_node **mon_data_kn); 2095 2096 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2097 { 2098 int ret = 0; 2099 2100 if (ctx->enable_cdpl2) 2101 ret = cdpl2_enable(); 2102 2103 if (!ret && ctx->enable_cdpl3) 2104 ret = cdpl3_enable(); 2105 2106 if (!ret && ctx->enable_mba_mbps) 2107 ret = set_mba_sc(true); 2108 2109 return ret; 2110 } 2111 2112 static int rdt_get_tree(struct fs_context *fc) 2113 { 2114 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2115 struct rdt_domain *dom; 2116 struct rdt_resource *r; 2117 int ret; 2118 2119 cpus_read_lock(); 2120 mutex_lock(&rdtgroup_mutex); 2121 /* 2122 * resctrl file system can only be mounted once. 2123 */ 2124 if (static_branch_unlikely(&rdt_enable_key)) { 2125 ret = -EBUSY; 2126 goto out; 2127 } 2128 2129 ret = rdt_enable_ctx(ctx); 2130 if (ret < 0) 2131 goto out_cdp; 2132 2133 closid_init(); 2134 2135 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2136 if (ret < 0) 2137 goto out_mba; 2138 2139 if (rdt_mon_capable) { 2140 ret = mongroup_create_dir(rdtgroup_default.kn, 2141 &rdtgroup_default, "mon_groups", 2142 &kn_mongrp); 2143 if (ret < 0) 2144 goto out_info; 2145 2146 ret = mkdir_mondata_all(rdtgroup_default.kn, 2147 &rdtgroup_default, &kn_mondata); 2148 if (ret < 0) 2149 goto out_mongrp; 2150 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2151 } 2152 2153 ret = rdt_pseudo_lock_init(); 2154 if (ret) 2155 goto out_mondata; 2156 2157 ret = kernfs_get_tree(fc); 2158 if (ret < 0) 2159 goto out_psl; 2160 2161 if (rdt_alloc_capable) 2162 static_branch_enable_cpuslocked(&rdt_alloc_enable_key); 2163 if (rdt_mon_capable) 2164 static_branch_enable_cpuslocked(&rdt_mon_enable_key); 2165 2166 if (rdt_alloc_capable || rdt_mon_capable) 2167 static_branch_enable_cpuslocked(&rdt_enable_key); 2168 2169 if (is_mbm_enabled()) { 2170 r = &rdt_resources_all[RDT_RESOURCE_L3]; 2171 list_for_each_entry(dom, &r->domains, list) 2172 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); 2173 } 2174 2175 goto out; 2176 2177 out_psl: 2178 rdt_pseudo_lock_release(); 2179 out_mondata: 2180 if (rdt_mon_capable) 2181 kernfs_remove(kn_mondata); 2182 out_mongrp: 2183 if (rdt_mon_capable) 2184 kernfs_remove(kn_mongrp); 2185 out_info: 2186 kernfs_remove(kn_info); 2187 out_mba: 2188 if (ctx->enable_mba_mbps) 2189 set_mba_sc(false); 2190 out_cdp: 2191 cdp_disable_all(); 2192 out: 2193 rdt_last_cmd_clear(); 2194 mutex_unlock(&rdtgroup_mutex); 2195 cpus_read_unlock(); 2196 return ret; 2197 } 2198 2199 enum rdt_param { 2200 Opt_cdp, 2201 Opt_cdpl2, 2202 Opt_mba_mbps, 2203 nr__rdt_params 2204 }; 2205 2206 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2207 fsparam_flag("cdp", Opt_cdp), 2208 fsparam_flag("cdpl2", Opt_cdpl2), 2209 fsparam_flag("mba_MBps", Opt_mba_mbps), 2210 {} 2211 }; 2212 2213 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2214 { 2215 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2216 struct fs_parse_result result; 2217 int opt; 2218 2219 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2220 if (opt < 0) 2221 return opt; 2222 2223 switch (opt) { 2224 case Opt_cdp: 2225 ctx->enable_cdpl3 = true; 2226 return 0; 2227 case Opt_cdpl2: 2228 ctx->enable_cdpl2 = true; 2229 return 0; 2230 case Opt_mba_mbps: 2231 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2232 return -EINVAL; 2233 ctx->enable_mba_mbps = true; 2234 return 0; 2235 } 2236 2237 return -EINVAL; 2238 } 2239 2240 static void rdt_fs_context_free(struct fs_context *fc) 2241 { 2242 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2243 2244 kernfs_free_fs_context(fc); 2245 kfree(ctx); 2246 } 2247 2248 static const struct fs_context_operations rdt_fs_context_ops = { 2249 .free = rdt_fs_context_free, 2250 .parse_param = rdt_parse_param, 2251 .get_tree = rdt_get_tree, 2252 }; 2253 2254 static int rdt_init_fs_context(struct fs_context *fc) 2255 { 2256 struct rdt_fs_context *ctx; 2257 2258 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2259 if (!ctx) 2260 return -ENOMEM; 2261 2262 ctx->kfc.root = rdt_root; 2263 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2264 fc->fs_private = &ctx->kfc; 2265 fc->ops = &rdt_fs_context_ops; 2266 put_user_ns(fc->user_ns); 2267 fc->user_ns = get_user_ns(&init_user_ns); 2268 fc->global = true; 2269 return 0; 2270 } 2271 2272 static int reset_all_ctrls(struct rdt_resource *r) 2273 { 2274 struct msr_param msr_param; 2275 cpumask_var_t cpu_mask; 2276 struct rdt_domain *d; 2277 int i, cpu; 2278 2279 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2280 return -ENOMEM; 2281 2282 msr_param.res = r; 2283 msr_param.low = 0; 2284 msr_param.high = r->num_closid; 2285 2286 /* 2287 * Disable resource control for this resource by setting all 2288 * CBMs in all domains to the maximum mask value. Pick one CPU 2289 * from each domain to update the MSRs below. 2290 */ 2291 list_for_each_entry(d, &r->domains, list) { 2292 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2293 2294 for (i = 0; i < r->num_closid; i++) 2295 d->ctrl_val[i] = r->default_ctrl; 2296 } 2297 cpu = get_cpu(); 2298 /* Update CBM on this cpu if it's in cpu_mask. */ 2299 if (cpumask_test_cpu(cpu, cpu_mask)) 2300 rdt_ctrl_update(&msr_param); 2301 /* Update CBM on all other cpus in cpu_mask. */ 2302 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); 2303 put_cpu(); 2304 2305 free_cpumask_var(cpu_mask); 2306 2307 return 0; 2308 } 2309 2310 /* 2311 * Move tasks from one to the other group. If @from is NULL, then all tasks 2312 * in the systems are moved unconditionally (used for teardown). 2313 * 2314 * If @mask is not NULL the cpus on which moved tasks are running are set 2315 * in that mask so the update smp function call is restricted to affected 2316 * cpus. 2317 */ 2318 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2319 struct cpumask *mask) 2320 { 2321 struct task_struct *p, *t; 2322 2323 read_lock(&tasklist_lock); 2324 for_each_process_thread(p, t) { 2325 if (!from || is_closid_match(t, from) || 2326 is_rmid_match(t, from)) { 2327 t->closid = to->closid; 2328 t->rmid = to->mon.rmid; 2329 2330 #ifdef CONFIG_SMP 2331 /* 2332 * This is safe on x86 w/o barriers as the ordering 2333 * of writing to task_cpu() and t->on_cpu is 2334 * reverse to the reading here. The detection is 2335 * inaccurate as tasks might move or schedule 2336 * before the smp function call takes place. In 2337 * such a case the function call is pointless, but 2338 * there is no other side effect. 2339 */ 2340 if (mask && t->on_cpu) 2341 cpumask_set_cpu(task_cpu(t), mask); 2342 #endif 2343 } 2344 } 2345 read_unlock(&tasklist_lock); 2346 } 2347 2348 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 2349 { 2350 struct rdtgroup *sentry, *stmp; 2351 struct list_head *head; 2352 2353 head = &rdtgrp->mon.crdtgrp_list; 2354 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 2355 free_rmid(sentry->mon.rmid); 2356 list_del(&sentry->mon.crdtgrp_list); 2357 2358 if (atomic_read(&sentry->waitcount) != 0) 2359 sentry->flags = RDT_DELETED; 2360 else 2361 rdtgroup_remove(sentry); 2362 } 2363 } 2364 2365 /* 2366 * Forcibly remove all of subdirectories under root. 2367 */ 2368 static void rmdir_all_sub(void) 2369 { 2370 struct rdtgroup *rdtgrp, *tmp; 2371 2372 /* Move all tasks to the default resource group */ 2373 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 2374 2375 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 2376 /* Free any child rmids */ 2377 free_all_child_rdtgrp(rdtgrp); 2378 2379 /* Remove each rdtgroup other than root */ 2380 if (rdtgrp == &rdtgroup_default) 2381 continue; 2382 2383 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2384 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2385 rdtgroup_pseudo_lock_remove(rdtgrp); 2386 2387 /* 2388 * Give any CPUs back to the default group. We cannot copy 2389 * cpu_online_mask because a CPU might have executed the 2390 * offline callback already, but is still marked online. 2391 */ 2392 cpumask_or(&rdtgroup_default.cpu_mask, 2393 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2394 2395 free_rmid(rdtgrp->mon.rmid); 2396 2397 kernfs_remove(rdtgrp->kn); 2398 list_del(&rdtgrp->rdtgroup_list); 2399 2400 if (atomic_read(&rdtgrp->waitcount) != 0) 2401 rdtgrp->flags = RDT_DELETED; 2402 else 2403 rdtgroup_remove(rdtgrp); 2404 } 2405 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 2406 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 2407 2408 kernfs_remove(kn_info); 2409 kernfs_remove(kn_mongrp); 2410 kernfs_remove(kn_mondata); 2411 } 2412 2413 static void rdt_kill_sb(struct super_block *sb) 2414 { 2415 struct rdt_resource *r; 2416 2417 cpus_read_lock(); 2418 mutex_lock(&rdtgroup_mutex); 2419 2420 set_mba_sc(false); 2421 2422 /*Put everything back to default values. */ 2423 for_each_alloc_enabled_rdt_resource(r) 2424 reset_all_ctrls(r); 2425 cdp_disable_all(); 2426 rmdir_all_sub(); 2427 rdt_pseudo_lock_release(); 2428 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 2429 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 2430 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 2431 static_branch_disable_cpuslocked(&rdt_enable_key); 2432 kernfs_kill_sb(sb); 2433 mutex_unlock(&rdtgroup_mutex); 2434 cpus_read_unlock(); 2435 } 2436 2437 static struct file_system_type rdt_fs_type = { 2438 .name = "resctrl", 2439 .init_fs_context = rdt_init_fs_context, 2440 .parameters = rdt_fs_parameters, 2441 .kill_sb = rdt_kill_sb, 2442 }; 2443 2444 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 2445 void *priv) 2446 { 2447 struct kernfs_node *kn; 2448 int ret = 0; 2449 2450 kn = __kernfs_create_file(parent_kn, name, 0444, 2451 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 2452 &kf_mondata_ops, priv, NULL, NULL); 2453 if (IS_ERR(kn)) 2454 return PTR_ERR(kn); 2455 2456 ret = rdtgroup_kn_set_ugid(kn); 2457 if (ret) { 2458 kernfs_remove(kn); 2459 return ret; 2460 } 2461 2462 return ret; 2463 } 2464 2465 /* 2466 * Remove all subdirectories of mon_data of ctrl_mon groups 2467 * and monitor groups with given domain id. 2468 */ 2469 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id) 2470 { 2471 struct rdtgroup *prgrp, *crgrp; 2472 char name[32]; 2473 2474 if (!r->mon_enabled) 2475 return; 2476 2477 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2478 sprintf(name, "mon_%s_%02d", r->name, dom_id); 2479 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); 2480 2481 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 2482 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); 2483 } 2484 } 2485 2486 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 2487 struct rdt_domain *d, 2488 struct rdt_resource *r, struct rdtgroup *prgrp) 2489 { 2490 union mon_data_bits priv; 2491 struct kernfs_node *kn; 2492 struct mon_evt *mevt; 2493 struct rmid_read rr; 2494 char name[32]; 2495 int ret; 2496 2497 sprintf(name, "mon_%s_%02d", r->name, d->id); 2498 /* create the directory */ 2499 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2500 if (IS_ERR(kn)) 2501 return PTR_ERR(kn); 2502 2503 ret = rdtgroup_kn_set_ugid(kn); 2504 if (ret) 2505 goto out_destroy; 2506 2507 if (WARN_ON(list_empty(&r->evt_list))) { 2508 ret = -EPERM; 2509 goto out_destroy; 2510 } 2511 2512 priv.u.rid = r->rid; 2513 priv.u.domid = d->id; 2514 list_for_each_entry(mevt, &r->evt_list, list) { 2515 priv.u.evtid = mevt->evtid; 2516 ret = mon_addfile(kn, mevt->name, priv.priv); 2517 if (ret) 2518 goto out_destroy; 2519 2520 if (is_mbm_event(mevt->evtid)) 2521 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); 2522 } 2523 kernfs_activate(kn); 2524 return 0; 2525 2526 out_destroy: 2527 kernfs_remove(kn); 2528 return ret; 2529 } 2530 2531 /* 2532 * Add all subdirectories of mon_data for "ctrl_mon" groups 2533 * and "monitor" groups with given domain id. 2534 */ 2535 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2536 struct rdt_domain *d) 2537 { 2538 struct kernfs_node *parent_kn; 2539 struct rdtgroup *prgrp, *crgrp; 2540 struct list_head *head; 2541 2542 if (!r->mon_enabled) 2543 return; 2544 2545 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2546 parent_kn = prgrp->mon.mon_data_kn; 2547 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 2548 2549 head = &prgrp->mon.crdtgrp_list; 2550 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 2551 parent_kn = crgrp->mon.mon_data_kn; 2552 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 2553 } 2554 } 2555 } 2556 2557 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 2558 struct rdt_resource *r, 2559 struct rdtgroup *prgrp) 2560 { 2561 struct rdt_domain *dom; 2562 int ret; 2563 2564 list_for_each_entry(dom, &r->domains, list) { 2565 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 2566 if (ret) 2567 return ret; 2568 } 2569 2570 return 0; 2571 } 2572 2573 /* 2574 * This creates a directory mon_data which contains the monitored data. 2575 * 2576 * mon_data has one directory for each domain whic are named 2577 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 2578 * with L3 domain looks as below: 2579 * ./mon_data: 2580 * mon_L3_00 2581 * mon_L3_01 2582 * mon_L3_02 2583 * ... 2584 * 2585 * Each domain directory has one file per event: 2586 * ./mon_L3_00/: 2587 * llc_occupancy 2588 * 2589 */ 2590 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2591 struct rdtgroup *prgrp, 2592 struct kernfs_node **dest_kn) 2593 { 2594 struct rdt_resource *r; 2595 struct kernfs_node *kn; 2596 int ret; 2597 2598 /* 2599 * Create the mon_data directory first. 2600 */ 2601 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 2602 if (ret) 2603 return ret; 2604 2605 if (dest_kn) 2606 *dest_kn = kn; 2607 2608 /* 2609 * Create the subdirectories for each domain. Note that all events 2610 * in a domain like L3 are grouped into a resource whose domain is L3 2611 */ 2612 for_each_mon_enabled_rdt_resource(r) { 2613 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 2614 if (ret) 2615 goto out_destroy; 2616 } 2617 2618 return 0; 2619 2620 out_destroy: 2621 kernfs_remove(kn); 2622 return ret; 2623 } 2624 2625 /** 2626 * cbm_ensure_valid - Enforce validity on provided CBM 2627 * @_val: Candidate CBM 2628 * @r: RDT resource to which the CBM belongs 2629 * 2630 * The provided CBM represents all cache portions available for use. This 2631 * may be represented by a bitmap that does not consist of contiguous ones 2632 * and thus be an invalid CBM. 2633 * Here the provided CBM is forced to be a valid CBM by only considering 2634 * the first set of contiguous bits as valid and clearing all bits. 2635 * The intention here is to provide a valid default CBM with which a new 2636 * resource group is initialized. The user can follow this with a 2637 * modification to the CBM if the default does not satisfy the 2638 * requirements. 2639 */ 2640 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 2641 { 2642 unsigned int cbm_len = r->cache.cbm_len; 2643 unsigned long first_bit, zero_bit; 2644 unsigned long val = _val; 2645 2646 if (!val) 2647 return 0; 2648 2649 first_bit = find_first_bit(&val, cbm_len); 2650 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 2651 2652 /* Clear any remaining bits to ensure contiguous region */ 2653 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 2654 return (u32)val; 2655 } 2656 2657 /* 2658 * Initialize cache resources per RDT domain 2659 * 2660 * Set the RDT domain up to start off with all usable allocations. That is, 2661 * all shareable and unused bits. All-zero CBM is invalid. 2662 */ 2663 static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r, 2664 u32 closid) 2665 { 2666 struct rdt_resource *r_cdp = NULL; 2667 struct rdt_domain *d_cdp = NULL; 2668 u32 used_b = 0, unused_b = 0; 2669 unsigned long tmp_cbm; 2670 enum rdtgrp_mode mode; 2671 u32 peer_ctl, *ctrl; 2672 int i; 2673 2674 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp); 2675 d->have_new_ctrl = false; 2676 d->new_ctrl = r->cache.shareable_bits; 2677 used_b = r->cache.shareable_bits; 2678 ctrl = d->ctrl_val; 2679 for (i = 0; i < closids_supported(); i++, ctrl++) { 2680 if (closid_allocated(i) && i != closid) { 2681 mode = rdtgroup_mode_by_closid(i); 2682 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2683 /* 2684 * ctrl values for locksetup aren't relevant 2685 * until the schemata is written, and the mode 2686 * becomes RDT_MODE_PSEUDO_LOCKED. 2687 */ 2688 continue; 2689 /* 2690 * If CDP is active include peer domain's 2691 * usage to ensure there is no overlap 2692 * with an exclusive group. 2693 */ 2694 if (d_cdp) 2695 peer_ctl = d_cdp->ctrl_val[i]; 2696 else 2697 peer_ctl = 0; 2698 used_b |= *ctrl | peer_ctl; 2699 if (mode == RDT_MODE_SHAREABLE) 2700 d->new_ctrl |= *ctrl | peer_ctl; 2701 } 2702 } 2703 if (d->plr && d->plr->cbm > 0) 2704 used_b |= d->plr->cbm; 2705 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 2706 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 2707 d->new_ctrl |= unused_b; 2708 /* 2709 * Force the initial CBM to be valid, user can 2710 * modify the CBM based on system availability. 2711 */ 2712 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r); 2713 /* 2714 * Assign the u32 CBM to an unsigned long to ensure that 2715 * bitmap_weight() does not access out-of-bound memory. 2716 */ 2717 tmp_cbm = d->new_ctrl; 2718 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 2719 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id); 2720 return -ENOSPC; 2721 } 2722 d->have_new_ctrl = true; 2723 2724 return 0; 2725 } 2726 2727 /* 2728 * Initialize cache resources with default values. 2729 * 2730 * A new RDT group is being created on an allocation capable (CAT) 2731 * supporting system. Set this group up to start off with all usable 2732 * allocations. 2733 * 2734 * If there are no more shareable bits available on any domain then 2735 * the entire allocation will fail. 2736 */ 2737 static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid) 2738 { 2739 struct rdt_domain *d; 2740 int ret; 2741 2742 list_for_each_entry(d, &r->domains, list) { 2743 ret = __init_one_rdt_domain(d, r, closid); 2744 if (ret < 0) 2745 return ret; 2746 } 2747 2748 return 0; 2749 } 2750 2751 /* Initialize MBA resource with default values. */ 2752 static void rdtgroup_init_mba(struct rdt_resource *r) 2753 { 2754 struct rdt_domain *d; 2755 2756 list_for_each_entry(d, &r->domains, list) { 2757 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl; 2758 d->have_new_ctrl = true; 2759 } 2760 } 2761 2762 /* Initialize the RDT group's allocations. */ 2763 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 2764 { 2765 struct rdt_resource *r; 2766 int ret; 2767 2768 for_each_alloc_enabled_rdt_resource(r) { 2769 if (r->rid == RDT_RESOURCE_MBA) { 2770 rdtgroup_init_mba(r); 2771 } else { 2772 ret = rdtgroup_init_cat(r, rdtgrp->closid); 2773 if (ret < 0) 2774 return ret; 2775 } 2776 2777 ret = update_domains(r, rdtgrp->closid); 2778 if (ret < 0) { 2779 rdt_last_cmd_puts("Failed to initialize allocations\n"); 2780 return ret; 2781 } 2782 2783 } 2784 2785 rdtgrp->mode = RDT_MODE_SHAREABLE; 2786 2787 return 0; 2788 } 2789 2790 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 2791 const char *name, umode_t mode, 2792 enum rdt_group_type rtype, struct rdtgroup **r) 2793 { 2794 struct rdtgroup *prdtgrp, *rdtgrp; 2795 struct kernfs_node *kn; 2796 uint files = 0; 2797 int ret; 2798 2799 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 2800 if (!prdtgrp) { 2801 ret = -ENODEV; 2802 goto out_unlock; 2803 } 2804 2805 if (rtype == RDTMON_GROUP && 2806 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2807 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 2808 ret = -EINVAL; 2809 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 2810 goto out_unlock; 2811 } 2812 2813 /* allocate the rdtgroup. */ 2814 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 2815 if (!rdtgrp) { 2816 ret = -ENOSPC; 2817 rdt_last_cmd_puts("Kernel out of memory\n"); 2818 goto out_unlock; 2819 } 2820 *r = rdtgrp; 2821 rdtgrp->mon.parent = prdtgrp; 2822 rdtgrp->type = rtype; 2823 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 2824 2825 /* kernfs creates the directory for rdtgrp */ 2826 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 2827 if (IS_ERR(kn)) { 2828 ret = PTR_ERR(kn); 2829 rdt_last_cmd_puts("kernfs create error\n"); 2830 goto out_free_rgrp; 2831 } 2832 rdtgrp->kn = kn; 2833 2834 /* 2835 * kernfs_remove() will drop the reference count on "kn" which 2836 * will free it. But we still need it to stick around for the 2837 * rdtgroup_kn_unlock(kn) call. Take one extra reference here, 2838 * which will be dropped by kernfs_put() in rdtgroup_remove(). 2839 */ 2840 kernfs_get(kn); 2841 2842 ret = rdtgroup_kn_set_ugid(kn); 2843 if (ret) { 2844 rdt_last_cmd_puts("kernfs perm error\n"); 2845 goto out_destroy; 2846 } 2847 2848 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 2849 ret = rdtgroup_add_files(kn, files); 2850 if (ret) { 2851 rdt_last_cmd_puts("kernfs fill error\n"); 2852 goto out_destroy; 2853 } 2854 2855 if (rdt_mon_capable) { 2856 ret = alloc_rmid(); 2857 if (ret < 0) { 2858 rdt_last_cmd_puts("Out of RMIDs\n"); 2859 goto out_destroy; 2860 } 2861 rdtgrp->mon.rmid = ret; 2862 2863 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 2864 if (ret) { 2865 rdt_last_cmd_puts("kernfs subdir error\n"); 2866 goto out_idfree; 2867 } 2868 } 2869 kernfs_activate(kn); 2870 2871 /* 2872 * The caller unlocks the parent_kn upon success. 2873 */ 2874 return 0; 2875 2876 out_idfree: 2877 free_rmid(rdtgrp->mon.rmid); 2878 out_destroy: 2879 kernfs_put(rdtgrp->kn); 2880 kernfs_remove(rdtgrp->kn); 2881 out_free_rgrp: 2882 kfree(rdtgrp); 2883 out_unlock: 2884 rdtgroup_kn_unlock(parent_kn); 2885 return ret; 2886 } 2887 2888 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 2889 { 2890 kernfs_remove(rgrp->kn); 2891 free_rmid(rgrp->mon.rmid); 2892 rdtgroup_remove(rgrp); 2893 } 2894 2895 /* 2896 * Create a monitor group under "mon_groups" directory of a control 2897 * and monitor group(ctrl_mon). This is a resource group 2898 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 2899 */ 2900 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 2901 const char *name, umode_t mode) 2902 { 2903 struct rdtgroup *rdtgrp, *prgrp; 2904 int ret; 2905 2906 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 2907 if (ret) 2908 return ret; 2909 2910 prgrp = rdtgrp->mon.parent; 2911 rdtgrp->closid = prgrp->closid; 2912 2913 /* 2914 * Add the rdtgrp to the list of rdtgrps the parent 2915 * ctrl_mon group has to track. 2916 */ 2917 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 2918 2919 rdtgroup_kn_unlock(parent_kn); 2920 return ret; 2921 } 2922 2923 /* 2924 * These are rdtgroups created under the root directory. Can be used 2925 * to allocate and monitor resources. 2926 */ 2927 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 2928 const char *name, umode_t mode) 2929 { 2930 struct rdtgroup *rdtgrp; 2931 struct kernfs_node *kn; 2932 u32 closid; 2933 int ret; 2934 2935 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 2936 if (ret) 2937 return ret; 2938 2939 kn = rdtgrp->kn; 2940 ret = closid_alloc(); 2941 if (ret < 0) { 2942 rdt_last_cmd_puts("Out of CLOSIDs\n"); 2943 goto out_common_fail; 2944 } 2945 closid = ret; 2946 ret = 0; 2947 2948 rdtgrp->closid = closid; 2949 ret = rdtgroup_init_alloc(rdtgrp); 2950 if (ret < 0) 2951 goto out_id_free; 2952 2953 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 2954 2955 if (rdt_mon_capable) { 2956 /* 2957 * Create an empty mon_groups directory to hold the subset 2958 * of tasks and cpus to monitor. 2959 */ 2960 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 2961 if (ret) { 2962 rdt_last_cmd_puts("kernfs subdir error\n"); 2963 goto out_del_list; 2964 } 2965 } 2966 2967 goto out_unlock; 2968 2969 out_del_list: 2970 list_del(&rdtgrp->rdtgroup_list); 2971 out_id_free: 2972 closid_free(closid); 2973 out_common_fail: 2974 mkdir_rdt_prepare_clean(rdtgrp); 2975 out_unlock: 2976 rdtgroup_kn_unlock(parent_kn); 2977 return ret; 2978 } 2979 2980 /* 2981 * We allow creating mon groups only with in a directory called "mon_groups" 2982 * which is present in every ctrl_mon group. Check if this is a valid 2983 * "mon_groups" directory. 2984 * 2985 * 1. The directory should be named "mon_groups". 2986 * 2. The mon group itself should "not" be named "mon_groups". 2987 * This makes sure "mon_groups" directory always has a ctrl_mon group 2988 * as parent. 2989 */ 2990 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 2991 { 2992 return (!strcmp(kn->name, "mon_groups") && 2993 strcmp(name, "mon_groups")); 2994 } 2995 2996 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 2997 umode_t mode) 2998 { 2999 /* Do not accept '\n' to avoid unparsable situation. */ 3000 if (strchr(name, '\n')) 3001 return -EINVAL; 3002 3003 /* 3004 * If the parent directory is the root directory and RDT 3005 * allocation is supported, add a control and monitoring 3006 * subdirectory 3007 */ 3008 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) 3009 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 3010 3011 /* 3012 * If RDT monitoring is supported and the parent directory is a valid 3013 * "mon_groups" directory, add a monitoring subdirectory. 3014 */ 3015 if (rdt_mon_capable && is_mon_groups(parent_kn, name)) 3016 return rdtgroup_mkdir_mon(parent_kn, name, mode); 3017 3018 return -EPERM; 3019 } 3020 3021 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 3022 cpumask_var_t tmpmask) 3023 { 3024 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3025 int cpu; 3026 3027 /* Give any tasks back to the parent group */ 3028 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 3029 3030 /* Update per cpu rmid of the moved CPUs first */ 3031 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3032 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; 3033 /* 3034 * Update the MSR on moved CPUs and CPUs which have moved 3035 * task running on them. 3036 */ 3037 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3038 update_closid_rmid(tmpmask, NULL); 3039 3040 rdtgrp->flags = RDT_DELETED; 3041 free_rmid(rdtgrp->mon.rmid); 3042 3043 /* 3044 * Remove the rdtgrp from the parent ctrl_mon group's list 3045 */ 3046 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3047 list_del(&rdtgrp->mon.crdtgrp_list); 3048 3049 kernfs_remove(rdtgrp->kn); 3050 3051 return 0; 3052 } 3053 3054 static int rdtgroup_ctrl_remove(struct kernfs_node *kn, 3055 struct rdtgroup *rdtgrp) 3056 { 3057 rdtgrp->flags = RDT_DELETED; 3058 list_del(&rdtgrp->rdtgroup_list); 3059 3060 kernfs_remove(rdtgrp->kn); 3061 return 0; 3062 } 3063 3064 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 3065 cpumask_var_t tmpmask) 3066 { 3067 int cpu; 3068 3069 /* Give any tasks back to the default group */ 3070 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3071 3072 /* Give any CPUs back to the default group */ 3073 cpumask_or(&rdtgroup_default.cpu_mask, 3074 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3075 3076 /* Update per cpu closid and rmid of the moved CPUs first */ 3077 for_each_cpu(cpu, &rdtgrp->cpu_mask) { 3078 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; 3079 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; 3080 } 3081 3082 /* 3083 * Update the MSR on moved CPUs and CPUs which have moved 3084 * task running on them. 3085 */ 3086 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3087 update_closid_rmid(tmpmask, NULL); 3088 3089 closid_free(rdtgrp->closid); 3090 free_rmid(rdtgrp->mon.rmid); 3091 3092 rdtgroup_ctrl_remove(kn, rdtgrp); 3093 3094 /* 3095 * Free all the child monitor group rmids. 3096 */ 3097 free_all_child_rdtgrp(rdtgrp); 3098 3099 return 0; 3100 } 3101 3102 static int rdtgroup_rmdir(struct kernfs_node *kn) 3103 { 3104 struct kernfs_node *parent_kn = kn->parent; 3105 struct rdtgroup *rdtgrp; 3106 cpumask_var_t tmpmask; 3107 int ret = 0; 3108 3109 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 3110 return -ENOMEM; 3111 3112 rdtgrp = rdtgroup_kn_lock_live(kn); 3113 if (!rdtgrp) { 3114 ret = -EPERM; 3115 goto out; 3116 } 3117 3118 /* 3119 * If the rdtgroup is a ctrl_mon group and parent directory 3120 * is the root directory, remove the ctrl_mon group. 3121 * 3122 * If the rdtgroup is a mon group and parent directory 3123 * is a valid "mon_groups" directory, remove the mon group. 3124 */ 3125 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3126 rdtgrp != &rdtgroup_default) { 3127 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3128 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3129 ret = rdtgroup_ctrl_remove(kn, rdtgrp); 3130 } else { 3131 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask); 3132 } 3133 } else if (rdtgrp->type == RDTMON_GROUP && 3134 is_mon_groups(parent_kn, kn->name)) { 3135 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask); 3136 } else { 3137 ret = -EPERM; 3138 } 3139 3140 out: 3141 rdtgroup_kn_unlock(kn); 3142 free_cpumask_var(tmpmask); 3143 return ret; 3144 } 3145 3146 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 3147 { 3148 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 3149 seq_puts(seq, ",cdp"); 3150 3151 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 3152 seq_puts(seq, ",cdpl2"); 3153 3154 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) 3155 seq_puts(seq, ",mba_MBps"); 3156 3157 return 0; 3158 } 3159 3160 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 3161 .mkdir = rdtgroup_mkdir, 3162 .rmdir = rdtgroup_rmdir, 3163 .show_options = rdtgroup_show_options, 3164 }; 3165 3166 static int __init rdtgroup_setup_root(void) 3167 { 3168 int ret; 3169 3170 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 3171 KERNFS_ROOT_CREATE_DEACTIVATED | 3172 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 3173 &rdtgroup_default); 3174 if (IS_ERR(rdt_root)) 3175 return PTR_ERR(rdt_root); 3176 3177 mutex_lock(&rdtgroup_mutex); 3178 3179 rdtgroup_default.closid = 0; 3180 rdtgroup_default.mon.rmid = 0; 3181 rdtgroup_default.type = RDTCTRL_GROUP; 3182 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 3183 3184 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 3185 3186 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE); 3187 if (ret) { 3188 kernfs_destroy_root(rdt_root); 3189 goto out; 3190 } 3191 3192 rdtgroup_default.kn = rdt_root->kn; 3193 kernfs_activate(rdtgroup_default.kn); 3194 3195 out: 3196 mutex_unlock(&rdtgroup_mutex); 3197 3198 return ret; 3199 } 3200 3201 /* 3202 * rdtgroup_init - rdtgroup initialization 3203 * 3204 * Setup resctrl file system including set up root, create mount point, 3205 * register rdtgroup filesystem, and initialize files under root directory. 3206 * 3207 * Return: 0 on success or -errno 3208 */ 3209 int __init rdtgroup_init(void) 3210 { 3211 int ret = 0; 3212 3213 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 3214 sizeof(last_cmd_status_buf)); 3215 3216 ret = rdtgroup_setup_root(); 3217 if (ret) 3218 return ret; 3219 3220 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 3221 if (ret) 3222 goto cleanup_root; 3223 3224 ret = register_filesystem(&rdt_fs_type); 3225 if (ret) 3226 goto cleanup_mountpoint; 3227 3228 /* 3229 * Adding the resctrl debugfs directory here may not be ideal since 3230 * it would let the resctrl debugfs directory appear on the debugfs 3231 * filesystem before the resctrl filesystem is mounted. 3232 * It may also be ok since that would enable debugging of RDT before 3233 * resctrl is mounted. 3234 * The reason why the debugfs directory is created here and not in 3235 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and 3236 * during the debugfs directory creation also &sb->s_type->i_mutex_key 3237 * (the lockdep class of inode->i_rwsem). Other filesystem 3238 * interactions (eg. SyS_getdents) have the lock ordering: 3239 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 3240 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 3241 * is taken, thus creating dependency: 3242 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 3243 * issues considering the other two lock dependencies. 3244 * By creating the debugfs directory here we avoid a dependency 3245 * that may cause deadlock (even though file operations cannot 3246 * occur until the filesystem is mounted, but I do not know how to 3247 * tell lockdep that). 3248 */ 3249 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 3250 3251 return 0; 3252 3253 cleanup_mountpoint: 3254 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3255 cleanup_root: 3256 kernfs_destroy_root(rdt_root); 3257 3258 return ret; 3259 } 3260 3261 void __exit rdtgroup_exit(void) 3262 { 3263 debugfs_remove_recursive(debugfs_resctrl); 3264 unregister_filesystem(&rdt_fs_type); 3265 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3266 kernfs_destroy_root(rdt_root); 3267 } 3268