1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Resource Director Technology(RDT) 4 * - Cache Allocation code. 5 * 6 * Copyright (C) 2016 Intel Corporation 7 * 8 * Authors: 9 * Fenghua Yu <fenghua.yu@intel.com> 10 * Tony Luck <tony.luck@intel.com> 11 * 12 * More information about RDT be found in the Intel (R) x86 Architecture 13 * Software Developer Manual June 2016, volume 3, section 17.17. 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/cpu.h> 19 #include <linux/kernfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/slab.h> 22 #include <linux/tick.h> 23 24 #include "internal.h" 25 26 /* 27 * Check whether MBA bandwidth percentage value is correct. The value is 28 * checked against the minimum and max bandwidth values specified by the 29 * hardware. The allocated bandwidth percentage is rounded to the next 30 * control step available on the hardware. 31 */ 32 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) 33 { 34 unsigned long bw; 35 int ret; 36 37 /* 38 * Only linear delay values is supported for current Intel SKUs. 39 */ 40 if (!r->membw.delay_linear && r->membw.arch_needs_linear) { 41 rdt_last_cmd_puts("No support for non-linear MB domains\n"); 42 return false; 43 } 44 45 ret = kstrtoul(buf, 10, &bw); 46 if (ret) { 47 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); 48 return false; 49 } 50 51 if ((bw < r->membw.min_bw || bw > r->default_ctrl) && 52 !is_mba_sc(r)) { 53 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, 54 r->membw.min_bw, r->default_ctrl); 55 return false; 56 } 57 58 *data = roundup(bw, (unsigned long)r->membw.bw_gran); 59 return true; 60 } 61 62 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, 63 struct rdt_domain *d) 64 { 65 struct resctrl_staged_config *cfg; 66 u32 closid = data->rdtgrp->closid; 67 struct rdt_resource *r = s->res; 68 unsigned long bw_val; 69 70 cfg = &d->staged_config[s->conf_type]; 71 if (cfg->have_new_ctrl) { 72 rdt_last_cmd_printf("Duplicate domain %d\n", d->id); 73 return -EINVAL; 74 } 75 76 if (!bw_validate(data->buf, &bw_val, r)) 77 return -EINVAL; 78 79 if (is_mba_sc(r)) { 80 d->mbps_val[closid] = bw_val; 81 return 0; 82 } 83 84 cfg->new_ctrl = bw_val; 85 cfg->have_new_ctrl = true; 86 87 return 0; 88 } 89 90 /* 91 * Check whether a cache bit mask is valid. 92 * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: 93 * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 94 * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 95 * 96 * Haswell does not support a non-contiguous 1s value and additionally 97 * requires at least two bits set. 98 * AMD allows non-contiguous bitmasks. 99 */ 100 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) 101 { 102 unsigned long first_bit, zero_bit, val; 103 unsigned int cbm_len = r->cache.cbm_len; 104 int ret; 105 106 ret = kstrtoul(buf, 16, &val); 107 if (ret) { 108 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); 109 return false; 110 } 111 112 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { 113 rdt_last_cmd_puts("Mask out of range\n"); 114 return false; 115 } 116 117 first_bit = find_first_bit(&val, cbm_len); 118 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 119 120 /* Are non-contiguous bitmasks allowed? */ 121 if (!r->cache.arch_has_sparse_bitmasks && 122 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { 123 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); 124 return false; 125 } 126 127 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { 128 rdt_last_cmd_printf("Need at least %d bits in the mask\n", 129 r->cache.min_cbm_bits); 130 return false; 131 } 132 133 *data = val; 134 return true; 135 } 136 137 /* 138 * Read one cache bit mask (hex). Check that it is valid for the current 139 * resource type. 140 */ 141 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, 142 struct rdt_domain *d) 143 { 144 struct rdtgroup *rdtgrp = data->rdtgrp; 145 struct resctrl_staged_config *cfg; 146 struct rdt_resource *r = s->res; 147 u32 cbm_val; 148 149 cfg = &d->staged_config[s->conf_type]; 150 if (cfg->have_new_ctrl) { 151 rdt_last_cmd_printf("Duplicate domain %d\n", d->id); 152 return -EINVAL; 153 } 154 155 /* 156 * Cannot set up more than one pseudo-locked region in a cache 157 * hierarchy. 158 */ 159 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && 160 rdtgroup_pseudo_locked_in_hierarchy(d)) { 161 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); 162 return -EINVAL; 163 } 164 165 if (!cbm_validate(data->buf, &cbm_val, r)) 166 return -EINVAL; 167 168 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || 169 rdtgrp->mode == RDT_MODE_SHAREABLE) && 170 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { 171 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); 172 return -EINVAL; 173 } 174 175 /* 176 * The CBM may not overlap with the CBM of another closid if 177 * either is exclusive. 178 */ 179 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { 180 rdt_last_cmd_puts("Overlaps with exclusive group\n"); 181 return -EINVAL; 182 } 183 184 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { 185 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || 186 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 187 rdt_last_cmd_puts("Overlaps with other group\n"); 188 return -EINVAL; 189 } 190 } 191 192 cfg->new_ctrl = cbm_val; 193 cfg->have_new_ctrl = true; 194 195 return 0; 196 } 197 198 /* 199 * For each domain in this resource we expect to find a series of: 200 * id=mask 201 * separated by ";". The "id" is in decimal, and must match one of 202 * the "id"s for this resource. 203 */ 204 static int parse_line(char *line, struct resctrl_schema *s, 205 struct rdtgroup *rdtgrp) 206 { 207 enum resctrl_conf_type t = s->conf_type; 208 struct resctrl_staged_config *cfg; 209 struct rdt_resource *r = s->res; 210 struct rdt_parse_data data; 211 char *dom = NULL, *id; 212 struct rdt_domain *d; 213 unsigned long dom_id; 214 215 /* Walking r->domains, ensure it can't race with cpuhp */ 216 lockdep_assert_cpus_held(); 217 218 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && 219 (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { 220 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); 221 return -EINVAL; 222 } 223 224 next: 225 if (!line || line[0] == '\0') 226 return 0; 227 dom = strsep(&line, ";"); 228 id = strsep(&dom, "="); 229 if (!dom || kstrtoul(id, 10, &dom_id)) { 230 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); 231 return -EINVAL; 232 } 233 dom = strim(dom); 234 list_for_each_entry(d, &r->domains, list) { 235 if (d->id == dom_id) { 236 data.buf = dom; 237 data.rdtgrp = rdtgrp; 238 if (r->parse_ctrlval(&data, s, d)) 239 return -EINVAL; 240 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 241 cfg = &d->staged_config[t]; 242 /* 243 * In pseudo-locking setup mode and just 244 * parsed a valid CBM that should be 245 * pseudo-locked. Only one locked region per 246 * resource group and domain so just do 247 * the required initialization for single 248 * region and return. 249 */ 250 rdtgrp->plr->s = s; 251 rdtgrp->plr->d = d; 252 rdtgrp->plr->cbm = cfg->new_ctrl; 253 d->plr = rdtgrp->plr; 254 return 0; 255 } 256 goto next; 257 } 258 } 259 return -EINVAL; 260 } 261 262 static u32 get_config_index(u32 closid, enum resctrl_conf_type type) 263 { 264 switch (type) { 265 default: 266 case CDP_NONE: 267 return closid; 268 case CDP_CODE: 269 return closid * 2 + 1; 270 case CDP_DATA: 271 return closid * 2; 272 } 273 } 274 275 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, 276 u32 closid, enum resctrl_conf_type t, u32 cfg_val) 277 { 278 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 279 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 280 u32 idx = get_config_index(closid, t); 281 struct msr_param msr_param; 282 283 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) 284 return -EINVAL; 285 286 hw_dom->ctrl_val[idx] = cfg_val; 287 288 msr_param.res = r; 289 msr_param.dom = d; 290 msr_param.low = idx; 291 msr_param.high = idx + 1; 292 hw_res->msr_update(&msr_param); 293 294 return 0; 295 } 296 297 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) 298 { 299 struct resctrl_staged_config *cfg; 300 struct rdt_hw_domain *hw_dom; 301 struct msr_param msr_param; 302 enum resctrl_conf_type t; 303 struct rdt_domain *d; 304 u32 idx; 305 306 /* Walking r->domains, ensure it can't race with cpuhp */ 307 lockdep_assert_cpus_held(); 308 309 list_for_each_entry(d, &r->domains, list) { 310 hw_dom = resctrl_to_arch_dom(d); 311 msr_param.res = NULL; 312 for (t = 0; t < CDP_NUM_TYPES; t++) { 313 cfg = &hw_dom->d_resctrl.staged_config[t]; 314 if (!cfg->have_new_ctrl) 315 continue; 316 317 idx = get_config_index(closid, t); 318 if (cfg->new_ctrl == hw_dom->ctrl_val[idx]) 319 continue; 320 hw_dom->ctrl_val[idx] = cfg->new_ctrl; 321 322 if (!msr_param.res) { 323 msr_param.low = idx; 324 msr_param.high = msr_param.low + 1; 325 msr_param.res = r; 326 msr_param.dom = d; 327 } else { 328 msr_param.low = min(msr_param.low, idx); 329 msr_param.high = max(msr_param.high, idx + 1); 330 } 331 } 332 if (msr_param.res) 333 smp_call_function_any(&d->cpu_mask, rdt_ctrl_update, &msr_param, 1); 334 } 335 336 return 0; 337 } 338 339 static int rdtgroup_parse_resource(char *resname, char *tok, 340 struct rdtgroup *rdtgrp) 341 { 342 struct resctrl_schema *s; 343 344 list_for_each_entry(s, &resctrl_schema_all, list) { 345 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) 346 return parse_line(tok, s, rdtgrp); 347 } 348 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); 349 return -EINVAL; 350 } 351 352 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, 353 char *buf, size_t nbytes, loff_t off) 354 { 355 struct resctrl_schema *s; 356 struct rdtgroup *rdtgrp; 357 struct rdt_resource *r; 358 char *tok, *resname; 359 int ret = 0; 360 361 /* Valid input requires a trailing newline */ 362 if (nbytes == 0 || buf[nbytes - 1] != '\n') 363 return -EINVAL; 364 buf[nbytes - 1] = '\0'; 365 366 rdtgrp = rdtgroup_kn_lock_live(of->kn); 367 if (!rdtgrp) { 368 rdtgroup_kn_unlock(of->kn); 369 return -ENOENT; 370 } 371 rdt_last_cmd_clear(); 372 373 /* 374 * No changes to pseudo-locked region allowed. It has to be removed 375 * and re-created instead. 376 */ 377 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 378 ret = -EINVAL; 379 rdt_last_cmd_puts("Resource group is pseudo-locked\n"); 380 goto out; 381 } 382 383 rdt_staged_configs_clear(); 384 385 while ((tok = strsep(&buf, "\n")) != NULL) { 386 resname = strim(strsep(&tok, ":")); 387 if (!tok) { 388 rdt_last_cmd_puts("Missing ':'\n"); 389 ret = -EINVAL; 390 goto out; 391 } 392 if (tok[0] == '\0') { 393 rdt_last_cmd_printf("Missing '%s' value\n", resname); 394 ret = -EINVAL; 395 goto out; 396 } 397 ret = rdtgroup_parse_resource(resname, tok, rdtgrp); 398 if (ret) 399 goto out; 400 } 401 402 list_for_each_entry(s, &resctrl_schema_all, list) { 403 r = s->res; 404 405 /* 406 * Writes to mba_sc resources update the software controller, 407 * not the control MSR. 408 */ 409 if (is_mba_sc(r)) 410 continue; 411 412 ret = resctrl_arch_update_domains(r, rdtgrp->closid); 413 if (ret) 414 goto out; 415 } 416 417 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 418 /* 419 * If pseudo-locking fails we keep the resource group in 420 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service 421 * active and updated for just the domain the pseudo-locked 422 * region was requested for. 423 */ 424 ret = rdtgroup_pseudo_lock_create(rdtgrp); 425 } 426 427 out: 428 rdt_staged_configs_clear(); 429 rdtgroup_kn_unlock(of->kn); 430 return ret ?: nbytes; 431 } 432 433 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, 434 u32 closid, enum resctrl_conf_type type) 435 { 436 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 437 u32 idx = get_config_index(closid, type); 438 439 return hw_dom->ctrl_val[idx]; 440 } 441 442 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) 443 { 444 struct rdt_resource *r = schema->res; 445 struct rdt_domain *dom; 446 bool sep = false; 447 u32 ctrl_val; 448 449 /* Walking r->domains, ensure it can't race with cpuhp */ 450 lockdep_assert_cpus_held(); 451 452 seq_printf(s, "%*s:", max_name_width, schema->name); 453 list_for_each_entry(dom, &r->domains, list) { 454 if (sep) 455 seq_puts(s, ";"); 456 457 if (is_mba_sc(r)) 458 ctrl_val = dom->mbps_val[closid]; 459 else 460 ctrl_val = resctrl_arch_get_config(r, dom, closid, 461 schema->conf_type); 462 463 seq_printf(s, r->format_str, dom->id, max_data_width, 464 ctrl_val); 465 sep = true; 466 } 467 seq_puts(s, "\n"); 468 } 469 470 int rdtgroup_schemata_show(struct kernfs_open_file *of, 471 struct seq_file *s, void *v) 472 { 473 struct resctrl_schema *schema; 474 struct rdtgroup *rdtgrp; 475 int ret = 0; 476 u32 closid; 477 478 rdtgrp = rdtgroup_kn_lock_live(of->kn); 479 if (rdtgrp) { 480 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 481 list_for_each_entry(schema, &resctrl_schema_all, list) { 482 seq_printf(s, "%s:uninitialized\n", schema->name); 483 } 484 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 485 if (!rdtgrp->plr->d) { 486 rdt_last_cmd_clear(); 487 rdt_last_cmd_puts("Cache domain offline\n"); 488 ret = -ENODEV; 489 } else { 490 seq_printf(s, "%s:%d=%x\n", 491 rdtgrp->plr->s->res->name, 492 rdtgrp->plr->d->id, 493 rdtgrp->plr->cbm); 494 } 495 } else { 496 closid = rdtgrp->closid; 497 list_for_each_entry(schema, &resctrl_schema_all, list) { 498 if (closid < schema->num_closid) 499 show_doms(s, schema, closid); 500 } 501 } 502 } else { 503 ret = -ENOENT; 504 } 505 rdtgroup_kn_unlock(of->kn); 506 return ret; 507 } 508 509 static int smp_mon_event_count(void *arg) 510 { 511 mon_event_count(arg); 512 513 return 0; 514 } 515 516 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, 517 struct rdt_domain *d, struct rdtgroup *rdtgrp, 518 int evtid, int first) 519 { 520 int cpu; 521 522 /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ 523 lockdep_assert_cpus_held(); 524 525 /* 526 * Setup the parameters to pass to mon_event_count() to read the data. 527 */ 528 rr->rgrp = rdtgrp; 529 rr->evtid = evtid; 530 rr->r = r; 531 rr->d = d; 532 rr->val = 0; 533 rr->first = first; 534 rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); 535 if (IS_ERR(rr->arch_mon_ctx)) { 536 rr->err = -EINVAL; 537 return; 538 } 539 540 cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); 541 542 /* 543 * cpumask_any_housekeeping() prefers housekeeping CPUs, but 544 * are all the CPUs nohz_full? If yes, pick a CPU to IPI. 545 * MPAM's resctrl_arch_rmid_read() is unable to read the 546 * counters on some platforms if its called in IRQ context. 547 */ 548 if (tick_nohz_full_cpu(cpu)) 549 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); 550 else 551 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); 552 553 resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); 554 } 555 556 int rdtgroup_mondata_show(struct seq_file *m, void *arg) 557 { 558 struct kernfs_open_file *of = m->private; 559 u32 resid, evtid, domid; 560 struct rdtgroup *rdtgrp; 561 struct rdt_resource *r; 562 union mon_data_bits md; 563 struct rdt_domain *d; 564 struct rmid_read rr; 565 int ret = 0; 566 567 rdtgrp = rdtgroup_kn_lock_live(of->kn); 568 if (!rdtgrp) { 569 ret = -ENOENT; 570 goto out; 571 } 572 573 md.priv = of->kn->priv; 574 resid = md.u.rid; 575 domid = md.u.domid; 576 evtid = md.u.evtid; 577 578 r = &rdt_resources_all[resid].r_resctrl; 579 d = rdt_find_domain(r, domid, NULL); 580 if (IS_ERR_OR_NULL(d)) { 581 ret = -ENOENT; 582 goto out; 583 } 584 585 mon_event_read(&rr, r, d, rdtgrp, evtid, false); 586 587 if (rr.err == -EIO) 588 seq_puts(m, "Error\n"); 589 else if (rr.err == -EINVAL) 590 seq_puts(m, "Unavailable\n"); 591 else 592 seq_printf(m, "%llu\n", rr.val); 593 594 out: 595 rdtgroup_kn_unlock(of->kn); 596 return ret; 597 } 598