1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Resource Director Technology(RDT) 4 * - Cache Allocation code. 5 * 6 * Copyright (C) 2016 Intel Corporation 7 * 8 * Authors: 9 * Fenghua Yu <fenghua.yu@intel.com> 10 * Tony Luck <tony.luck@intel.com> 11 * 12 * More information about RDT be found in the Intel (R) x86 Architecture 13 * Software Developer Manual June 2016, volume 3, section 17.17. 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/cpu.h> 19 #include <linux/kernfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/slab.h> 22 #include <linux/tick.h> 23 24 #include "internal.h" 25 26 struct rdt_parse_data { 27 struct rdtgroup *rdtgrp; 28 char *buf; 29 }; 30 31 typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, 32 struct resctrl_schema *s, 33 struct rdt_ctrl_domain *d); 34 35 /* 36 * Check whether MBA bandwidth percentage value is correct. The value is 37 * checked against the minimum and max bandwidth values specified by the 38 * hardware. The allocated bandwidth percentage is rounded to the next 39 * control step available on the hardware. 40 */ 41 static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) 42 { 43 int ret; 44 u32 bw; 45 46 /* 47 * Only linear delay values is supported for current Intel SKUs. 48 */ 49 if (!r->membw.delay_linear && r->membw.arch_needs_linear) { 50 rdt_last_cmd_puts("No support for non-linear MB domains\n"); 51 return false; 52 } 53 54 ret = kstrtou32(buf, 10, &bw); 55 if (ret) { 56 rdt_last_cmd_printf("Invalid MB value %s\n", buf); 57 return false; 58 } 59 60 /* Nothing else to do if software controller is enabled. */ 61 if (is_mba_sc(r)) { 62 *data = bw; 63 return true; 64 } 65 66 if (bw < r->membw.min_bw || bw > r->membw.max_bw) { 67 rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n", 68 bw, r->membw.min_bw, r->membw.max_bw); 69 return false; 70 } 71 72 *data = roundup(bw, (unsigned long)r->membw.bw_gran); 73 return true; 74 } 75 76 static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, 77 struct rdt_ctrl_domain *d) 78 { 79 struct resctrl_staged_config *cfg; 80 u32 closid = data->rdtgrp->closid; 81 struct rdt_resource *r = s->res; 82 u32 bw_val; 83 84 cfg = &d->staged_config[s->conf_type]; 85 if (cfg->have_new_ctrl) { 86 rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id); 87 return -EINVAL; 88 } 89 90 if (!bw_validate(data->buf, &bw_val, r)) 91 return -EINVAL; 92 93 if (is_mba_sc(r)) { 94 d->mbps_val[closid] = bw_val; 95 return 0; 96 } 97 98 cfg->new_ctrl = bw_val; 99 cfg->have_new_ctrl = true; 100 101 return 0; 102 } 103 104 /* 105 * Check whether a cache bit mask is valid. 106 * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: 107 * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 108 * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 109 * 110 * Haswell does not support a non-contiguous 1s value and additionally 111 * requires at least two bits set. 112 * AMD allows non-contiguous bitmasks. 113 */ 114 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) 115 { 116 u32 supported_bits = BIT_MASK(r->cache.cbm_len) - 1; 117 unsigned int cbm_len = r->cache.cbm_len; 118 unsigned long first_bit, zero_bit, val; 119 int ret; 120 121 ret = kstrtoul(buf, 16, &val); 122 if (ret) { 123 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); 124 return false; 125 } 126 127 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > supported_bits) { 128 rdt_last_cmd_puts("Mask out of range\n"); 129 return false; 130 } 131 132 first_bit = find_first_bit(&val, cbm_len); 133 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 134 135 /* Are non-contiguous bitmasks allowed? */ 136 if (!r->cache.arch_has_sparse_bitmasks && 137 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { 138 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); 139 return false; 140 } 141 142 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { 143 rdt_last_cmd_printf("Need at least %d bits in the mask\n", 144 r->cache.min_cbm_bits); 145 return false; 146 } 147 148 *data = val; 149 return true; 150 } 151 152 /* 153 * Read one cache bit mask (hex). Check that it is valid for the current 154 * resource type. 155 */ 156 static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, 157 struct rdt_ctrl_domain *d) 158 { 159 struct rdtgroup *rdtgrp = data->rdtgrp; 160 struct resctrl_staged_config *cfg; 161 struct rdt_resource *r = s->res; 162 u32 cbm_val; 163 164 cfg = &d->staged_config[s->conf_type]; 165 if (cfg->have_new_ctrl) { 166 rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id); 167 return -EINVAL; 168 } 169 170 /* 171 * Cannot set up more than one pseudo-locked region in a cache 172 * hierarchy. 173 */ 174 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && 175 rdtgroup_pseudo_locked_in_hierarchy(d)) { 176 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); 177 return -EINVAL; 178 } 179 180 if (!cbm_validate(data->buf, &cbm_val, r)) 181 return -EINVAL; 182 183 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || 184 rdtgrp->mode == RDT_MODE_SHAREABLE) && 185 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { 186 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); 187 return -EINVAL; 188 } 189 190 /* 191 * The CBM may not overlap with the CBM of another closid if 192 * either is exclusive. 193 */ 194 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { 195 rdt_last_cmd_puts("Overlaps with exclusive group\n"); 196 return -EINVAL; 197 } 198 199 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { 200 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || 201 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 202 rdt_last_cmd_puts("Overlaps with other group\n"); 203 return -EINVAL; 204 } 205 } 206 207 cfg->new_ctrl = cbm_val; 208 cfg->have_new_ctrl = true; 209 210 return 0; 211 } 212 213 /* 214 * For each domain in this resource we expect to find a series of: 215 * id=mask 216 * separated by ";". The "id" is in decimal, and must match one of 217 * the "id"s for this resource. 218 */ 219 static int parse_line(char *line, struct resctrl_schema *s, 220 struct rdtgroup *rdtgrp) 221 { 222 enum resctrl_conf_type t = s->conf_type; 223 ctrlval_parser_t *parse_ctrlval = NULL; 224 struct resctrl_staged_config *cfg; 225 struct rdt_resource *r = s->res; 226 struct rdt_parse_data data; 227 struct rdt_ctrl_domain *d; 228 char *dom = NULL, *id; 229 unsigned long dom_id; 230 231 /* Walking r->domains, ensure it can't race with cpuhp */ 232 lockdep_assert_cpus_held(); 233 234 switch (r->schema_fmt) { 235 case RESCTRL_SCHEMA_BITMAP: 236 parse_ctrlval = &parse_cbm; 237 break; 238 case RESCTRL_SCHEMA_RANGE: 239 parse_ctrlval = &parse_bw; 240 break; 241 } 242 243 if (WARN_ON_ONCE(!parse_ctrlval)) 244 return -EINVAL; 245 246 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && 247 (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { 248 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); 249 return -EINVAL; 250 } 251 252 next: 253 if (!line || line[0] == '\0') 254 return 0; 255 dom = strsep(&line, ";"); 256 id = strsep(&dom, "="); 257 if (!dom || kstrtoul(id, 10, &dom_id)) { 258 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); 259 return -EINVAL; 260 } 261 dom = strim(dom); 262 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 263 if (d->hdr.id == dom_id) { 264 data.buf = dom; 265 data.rdtgrp = rdtgrp; 266 if (parse_ctrlval(&data, s, d)) 267 return -EINVAL; 268 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 269 cfg = &d->staged_config[t]; 270 /* 271 * In pseudo-locking setup mode and just 272 * parsed a valid CBM that should be 273 * pseudo-locked. Only one locked region per 274 * resource group and domain so just do 275 * the required initialization for single 276 * region and return. 277 */ 278 rdtgrp->plr->s = s; 279 rdtgrp->plr->d = d; 280 rdtgrp->plr->cbm = cfg->new_ctrl; 281 d->plr = rdtgrp->plr; 282 return 0; 283 } 284 goto next; 285 } 286 } 287 return -EINVAL; 288 } 289 290 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d, 291 u32 closid, enum resctrl_conf_type t, u32 cfg_val) 292 { 293 struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d); 294 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 295 u32 idx = resctrl_get_config_index(closid, t); 296 struct msr_param msr_param; 297 298 if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask)) 299 return -EINVAL; 300 301 hw_dom->ctrl_val[idx] = cfg_val; 302 303 msr_param.res = r; 304 msr_param.dom = d; 305 msr_param.low = idx; 306 msr_param.high = idx + 1; 307 hw_res->msr_update(&msr_param); 308 309 return 0; 310 } 311 312 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) 313 { 314 struct resctrl_staged_config *cfg; 315 struct rdt_hw_ctrl_domain *hw_dom; 316 struct msr_param msr_param; 317 struct rdt_ctrl_domain *d; 318 enum resctrl_conf_type t; 319 u32 idx; 320 321 /* Walking r->domains, ensure it can't race with cpuhp */ 322 lockdep_assert_cpus_held(); 323 324 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { 325 hw_dom = resctrl_to_arch_ctrl_dom(d); 326 msr_param.res = NULL; 327 for (t = 0; t < CDP_NUM_TYPES; t++) { 328 cfg = &hw_dom->d_resctrl.staged_config[t]; 329 if (!cfg->have_new_ctrl) 330 continue; 331 332 idx = resctrl_get_config_index(closid, t); 333 if (cfg->new_ctrl == hw_dom->ctrl_val[idx]) 334 continue; 335 hw_dom->ctrl_val[idx] = cfg->new_ctrl; 336 337 if (!msr_param.res) { 338 msr_param.low = idx; 339 msr_param.high = msr_param.low + 1; 340 msr_param.res = r; 341 msr_param.dom = d; 342 } else { 343 msr_param.low = min(msr_param.low, idx); 344 msr_param.high = max(msr_param.high, idx + 1); 345 } 346 } 347 if (msr_param.res) 348 smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1); 349 } 350 351 return 0; 352 } 353 354 static int rdtgroup_parse_resource(char *resname, char *tok, 355 struct rdtgroup *rdtgrp) 356 { 357 struct resctrl_schema *s; 358 359 list_for_each_entry(s, &resctrl_schema_all, list) { 360 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) 361 return parse_line(tok, s, rdtgrp); 362 } 363 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); 364 return -EINVAL; 365 } 366 367 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, 368 char *buf, size_t nbytes, loff_t off) 369 { 370 struct resctrl_schema *s; 371 struct rdtgroup *rdtgrp; 372 struct rdt_resource *r; 373 char *tok, *resname; 374 int ret = 0; 375 376 /* Valid input requires a trailing newline */ 377 if (nbytes == 0 || buf[nbytes - 1] != '\n') 378 return -EINVAL; 379 buf[nbytes - 1] = '\0'; 380 381 rdtgrp = rdtgroup_kn_lock_live(of->kn); 382 if (!rdtgrp) { 383 rdtgroup_kn_unlock(of->kn); 384 return -ENOENT; 385 } 386 rdt_last_cmd_clear(); 387 388 /* 389 * No changes to pseudo-locked region allowed. It has to be removed 390 * and re-created instead. 391 */ 392 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 393 ret = -EINVAL; 394 rdt_last_cmd_puts("Resource group is pseudo-locked\n"); 395 goto out; 396 } 397 398 rdt_staged_configs_clear(); 399 400 while ((tok = strsep(&buf, "\n")) != NULL) { 401 resname = strim(strsep(&tok, ":")); 402 if (!tok) { 403 rdt_last_cmd_puts("Missing ':'\n"); 404 ret = -EINVAL; 405 goto out; 406 } 407 if (tok[0] == '\0') { 408 rdt_last_cmd_printf("Missing '%s' value\n", resname); 409 ret = -EINVAL; 410 goto out; 411 } 412 ret = rdtgroup_parse_resource(resname, tok, rdtgrp); 413 if (ret) 414 goto out; 415 } 416 417 list_for_each_entry(s, &resctrl_schema_all, list) { 418 r = s->res; 419 420 /* 421 * Writes to mba_sc resources update the software controller, 422 * not the control MSR. 423 */ 424 if (is_mba_sc(r)) 425 continue; 426 427 ret = resctrl_arch_update_domains(r, rdtgrp->closid); 428 if (ret) 429 goto out; 430 } 431 432 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 433 /* 434 * If pseudo-locking fails we keep the resource group in 435 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service 436 * active and updated for just the domain the pseudo-locked 437 * region was requested for. 438 */ 439 ret = rdtgroup_pseudo_lock_create(rdtgrp); 440 } 441 442 out: 443 rdt_staged_configs_clear(); 444 rdtgroup_kn_unlock(of->kn); 445 return ret ?: nbytes; 446 } 447 448 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d, 449 u32 closid, enum resctrl_conf_type type) 450 { 451 struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d); 452 u32 idx = resctrl_get_config_index(closid, type); 453 454 return hw_dom->ctrl_val[idx]; 455 } 456 457 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) 458 { 459 struct rdt_resource *r = schema->res; 460 struct rdt_ctrl_domain *dom; 461 bool sep = false; 462 u32 ctrl_val; 463 464 /* Walking r->domains, ensure it can't race with cpuhp */ 465 lockdep_assert_cpus_held(); 466 467 seq_printf(s, "%*s:", max_name_width, schema->name); 468 list_for_each_entry(dom, &r->ctrl_domains, hdr.list) { 469 if (sep) 470 seq_puts(s, ";"); 471 472 if (is_mba_sc(r)) 473 ctrl_val = dom->mbps_val[closid]; 474 else 475 ctrl_val = resctrl_arch_get_config(r, dom, closid, 476 schema->conf_type); 477 478 seq_printf(s, schema->fmt_str, dom->hdr.id, ctrl_val); 479 sep = true; 480 } 481 seq_puts(s, "\n"); 482 } 483 484 int rdtgroup_schemata_show(struct kernfs_open_file *of, 485 struct seq_file *s, void *v) 486 { 487 struct resctrl_schema *schema; 488 struct rdtgroup *rdtgrp; 489 int ret = 0; 490 u32 closid; 491 492 rdtgrp = rdtgroup_kn_lock_live(of->kn); 493 if (rdtgrp) { 494 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 495 list_for_each_entry(schema, &resctrl_schema_all, list) { 496 seq_printf(s, "%s:uninitialized\n", schema->name); 497 } 498 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 499 if (!rdtgrp->plr->d) { 500 rdt_last_cmd_clear(); 501 rdt_last_cmd_puts("Cache domain offline\n"); 502 ret = -ENODEV; 503 } else { 504 seq_printf(s, "%s:%d=%x\n", 505 rdtgrp->plr->s->res->name, 506 rdtgrp->plr->d->hdr.id, 507 rdtgrp->plr->cbm); 508 } 509 } else { 510 closid = rdtgrp->closid; 511 list_for_each_entry(schema, &resctrl_schema_all, list) { 512 if (closid < schema->num_closid) 513 show_doms(s, schema, closid); 514 } 515 } 516 } else { 517 ret = -ENOENT; 518 } 519 rdtgroup_kn_unlock(of->kn); 520 return ret; 521 } 522 523 static int smp_mon_event_count(void *arg) 524 { 525 mon_event_count(arg); 526 527 return 0; 528 } 529 530 ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of, 531 char *buf, size_t nbytes, loff_t off) 532 { 533 struct rdtgroup *rdtgrp; 534 int ret = 0; 535 536 /* Valid input requires a trailing newline */ 537 if (nbytes == 0 || buf[nbytes - 1] != '\n') 538 return -EINVAL; 539 buf[nbytes - 1] = '\0'; 540 541 rdtgrp = rdtgroup_kn_lock_live(of->kn); 542 if (!rdtgrp) { 543 rdtgroup_kn_unlock(of->kn); 544 return -ENOENT; 545 } 546 rdt_last_cmd_clear(); 547 548 if (!strcmp(buf, "mbm_local_bytes")) { 549 if (resctrl_arch_is_mbm_local_enabled()) 550 rdtgrp->mba_mbps_event = QOS_L3_MBM_LOCAL_EVENT_ID; 551 else 552 ret = -EINVAL; 553 } else if (!strcmp(buf, "mbm_total_bytes")) { 554 if (resctrl_arch_is_mbm_total_enabled()) 555 rdtgrp->mba_mbps_event = QOS_L3_MBM_TOTAL_EVENT_ID; 556 else 557 ret = -EINVAL; 558 } else { 559 ret = -EINVAL; 560 } 561 562 if (ret) 563 rdt_last_cmd_printf("Unsupported event id '%s'\n", buf); 564 565 rdtgroup_kn_unlock(of->kn); 566 567 return ret ?: nbytes; 568 } 569 570 int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of, 571 struct seq_file *s, void *v) 572 { 573 struct rdtgroup *rdtgrp; 574 int ret = 0; 575 576 rdtgrp = rdtgroup_kn_lock_live(of->kn); 577 578 if (rdtgrp) { 579 switch (rdtgrp->mba_mbps_event) { 580 case QOS_L3_MBM_LOCAL_EVENT_ID: 581 seq_puts(s, "mbm_local_bytes\n"); 582 break; 583 case QOS_L3_MBM_TOTAL_EVENT_ID: 584 seq_puts(s, "mbm_total_bytes\n"); 585 break; 586 default: 587 pr_warn_once("Bad event %d\n", rdtgrp->mba_mbps_event); 588 ret = -EINVAL; 589 break; 590 } 591 } else { 592 ret = -ENOENT; 593 } 594 595 rdtgroup_kn_unlock(of->kn); 596 597 return ret; 598 } 599 600 struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id, 601 struct list_head **pos) 602 { 603 struct rdt_domain_hdr *d; 604 struct list_head *l; 605 606 list_for_each(l, h) { 607 d = list_entry(l, struct rdt_domain_hdr, list); 608 /* When id is found, return its domain. */ 609 if (id == d->id) 610 return d; 611 /* Stop searching when finding id's position in sorted list. */ 612 if (id < d->id) 613 break; 614 } 615 616 if (pos) 617 *pos = l; 618 619 return NULL; 620 } 621 622 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, 623 struct rdt_mon_domain *d, struct rdtgroup *rdtgrp, 624 cpumask_t *cpumask, int evtid, int first) 625 { 626 int cpu; 627 628 /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ 629 lockdep_assert_cpus_held(); 630 631 /* 632 * Setup the parameters to pass to mon_event_count() to read the data. 633 */ 634 rr->rgrp = rdtgrp; 635 rr->evtid = evtid; 636 rr->r = r; 637 rr->d = d; 638 rr->first = first; 639 rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); 640 if (IS_ERR(rr->arch_mon_ctx)) { 641 rr->err = -EINVAL; 642 return; 643 } 644 645 cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU); 646 647 /* 648 * cpumask_any_housekeeping() prefers housekeeping CPUs, but 649 * are all the CPUs nohz_full? If yes, pick a CPU to IPI. 650 * MPAM's resctrl_arch_rmid_read() is unable to read the 651 * counters on some platforms if its called in IRQ context. 652 */ 653 if (tick_nohz_full_cpu(cpu)) 654 smp_call_function_any(cpumask, mon_event_count, rr, 1); 655 else 656 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); 657 658 resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); 659 } 660 661 int rdtgroup_mondata_show(struct seq_file *m, void *arg) 662 { 663 struct kernfs_open_file *of = m->private; 664 struct rdt_domain_hdr *hdr; 665 struct rmid_read rr = {0}; 666 struct rdt_mon_domain *d; 667 u32 resid, evtid, domid; 668 struct rdtgroup *rdtgrp; 669 struct rdt_resource *r; 670 union mon_data_bits md; 671 int ret = 0; 672 673 rdtgrp = rdtgroup_kn_lock_live(of->kn); 674 if (!rdtgrp) { 675 ret = -ENOENT; 676 goto out; 677 } 678 679 md.priv = of->kn->priv; 680 resid = md.u.rid; 681 domid = md.u.domid; 682 evtid = md.u.evtid; 683 r = resctrl_arch_get_resource(resid); 684 685 if (md.u.sum) { 686 /* 687 * This file requires summing across all domains that share 688 * the L3 cache id that was provided in the "domid" field of the 689 * mon_data_bits union. Search all domains in the resource for 690 * one that matches this cache id. 691 */ 692 list_for_each_entry(d, &r->mon_domains, hdr.list) { 693 if (d->ci->id == domid) { 694 rr.ci = d->ci; 695 mon_event_read(&rr, r, NULL, rdtgrp, 696 &d->ci->shared_cpu_map, evtid, false); 697 goto checkresult; 698 } 699 } 700 ret = -ENOENT; 701 goto out; 702 } else { 703 /* 704 * This file provides data from a single domain. Search 705 * the resource to find the domain with "domid". 706 */ 707 hdr = resctrl_find_domain(&r->mon_domains, domid, NULL); 708 if (!hdr || WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) { 709 ret = -ENOENT; 710 goto out; 711 } 712 d = container_of(hdr, struct rdt_mon_domain, hdr); 713 mon_event_read(&rr, r, d, rdtgrp, &d->hdr.cpu_mask, evtid, false); 714 } 715 716 checkresult: 717 718 if (rr.err == -EIO) 719 seq_puts(m, "Error\n"); 720 else if (rr.err == -EINVAL) 721 seq_puts(m, "Unavailable\n"); 722 else 723 seq_printf(m, "%llu\n", rr.val); 724 725 out: 726 rdtgroup_kn_unlock(of->kn); 727 return ret; 728 } 729