1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2025 Arm Ltd. 3 4 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ 5 6 #include <linux/acpi.h> 7 #include <linux/atomic.h> 8 #include <linux/arm_mpam.h> 9 #include <linux/bitfield.h> 10 #include <linux/bitmap.h> 11 #include <linux/cacheinfo.h> 12 #include <linux/cpu.h> 13 #include <linux/cpumask.h> 14 #include <linux/device.h> 15 #include <linux/errno.h> 16 #include <linux/gfp.h> 17 #include <linux/list.h> 18 #include <linux/lockdep.h> 19 #include <linux/mutex.h> 20 #include <linux/platform_device.h> 21 #include <linux/printk.h> 22 #include <linux/srcu.h> 23 #include <linux/spinlock.h> 24 #include <linux/types.h> 25 #include <linux/workqueue.h> 26 27 #include "mpam_internal.h" 28 29 /* 30 * mpam_list_lock protects the SRCU lists when writing. Once the 31 * mpam_enabled key is enabled these lists are read-only, 32 * unless the error interrupt disables the driver. 33 */ 34 static DEFINE_MUTEX(mpam_list_lock); 35 static LIST_HEAD(mpam_all_msc); 36 37 struct srcu_struct mpam_srcu; 38 39 /* 40 * Number of MSCs that have been probed. Once all MSCs have been probed MPAM 41 * can be enabled. 42 */ 43 static atomic_t mpam_num_msc; 44 45 static int mpam_cpuhp_state; 46 static DEFINE_MUTEX(mpam_cpuhp_state_lock); 47 48 /* 49 * The smallest common values for any CPU or MSC in the system. 50 * Generating traffic outside this range will result in screaming interrupts. 51 */ 52 u16 mpam_partid_max; 53 u8 mpam_pmg_max; 54 static bool partid_max_init, partid_max_published; 55 static DEFINE_SPINLOCK(partid_max_lock); 56 57 /* 58 * mpam is enabled once all devices have been probed from CPU online callbacks, 59 * scheduled via this work_struct. If access to an MSC depends on a CPU that 60 * was not brought online at boot, this can happen surprisingly late. 61 */ 62 static DECLARE_WORK(mpam_enable_work, &mpam_enable); 63 64 /* 65 * All mpam error interrupts indicate a software bug. On receipt, disable the 66 * driver. 67 */ 68 static DECLARE_WORK(mpam_broken_work, &mpam_disable); 69 70 /* When mpam is disabled, the printed reason to aid debugging */ 71 static char *mpam_disable_reason; 72 73 /* 74 * An MSC is a physical container for controls and monitors, each identified by 75 * their RIS index. These share a base-address, interrupts and some MMIO 76 * registers. A vMSC is a virtual container for RIS in an MSC that control or 77 * monitor the same thing. Members of a vMSC are all RIS in the same MSC, but 78 * not all RIS in an MSC share a vMSC. 79 * 80 * Components are a group of vMSC that control or monitor the same thing but 81 * are from different MSC, so have different base-address, interrupts etc. 82 * Classes are the set components of the same type. 83 * 84 * The features of a vMSC is the union of the RIS it contains. 85 * The features of a Class and Component are the common subset of the vMSC 86 * they contain. 87 * 88 * e.g. The system cache may have bandwidth controls on multiple interfaces, 89 * for regulating traffic from devices independently of traffic from CPUs. 90 * If these are two RIS in one MSC, they will be treated as controlling 91 * different things, and will not share a vMSC/component/class. 92 * 93 * e.g. The L2 may have one MSC and two RIS, one for cache-controls another 94 * for bandwidth. These two RIS are members of the same vMSC. 95 * 96 * e.g. The set of RIS that make up the L2 are grouped as a component. These 97 * are sometimes termed slices. They should be configured the same, as if there 98 * were only one. 99 * 100 * e.g. The SoC probably has more than one L2, each attached to a distinct set 101 * of CPUs. All the L2 components are grouped as a class. 102 * 103 * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, 104 * then linked via struct mpam_ris to a vmsc, component and class. 105 * The same MSC may exist under different class->component->vmsc paths, but the 106 * RIS index will be unique. 107 */ 108 LIST_HEAD(mpam_classes); 109 110 /* List of all objects that can be free()d after synchronise_srcu() */ 111 static LLIST_HEAD(mpam_garbage); 112 113 static inline void init_garbage(struct mpam_garbage *garbage) 114 { 115 init_llist_node(&garbage->llist); 116 } 117 118 #define add_to_garbage(x) \ 119 do { \ 120 __typeof__(x) _x = (x); \ 121 _x->garbage.to_free = _x; \ 122 llist_add(&_x->garbage.llist, &mpam_garbage); \ 123 } while (0) 124 125 static void mpam_free_garbage(void) 126 { 127 struct mpam_garbage *iter, *tmp; 128 struct llist_node *to_free = llist_del_all(&mpam_garbage); 129 130 if (!to_free) 131 return; 132 133 synchronize_srcu(&mpam_srcu); 134 135 llist_for_each_entry_safe(iter, tmp, to_free, llist) { 136 if (iter->pdev) 137 devm_kfree(&iter->pdev->dev, iter->to_free); 138 else 139 kfree(iter->to_free); 140 } 141 } 142 143 static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) 144 { 145 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); 146 147 return readl_relaxed(msc->mapped_hwpage + reg); 148 } 149 150 static inline u32 _mpam_read_partsel_reg(struct mpam_msc *msc, u16 reg) 151 { 152 lockdep_assert_held_once(&msc->part_sel_lock); 153 return __mpam_read_reg(msc, reg); 154 } 155 156 #define mpam_read_partsel_reg(msc, reg) _mpam_read_partsel_reg(msc, MPAMF_##reg) 157 158 static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) 159 { 160 WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); 161 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); 162 163 writel_relaxed(val, msc->mapped_hwpage + reg); 164 } 165 166 static inline void _mpam_write_partsel_reg(struct mpam_msc *msc, u16 reg, u32 val) 167 { 168 lockdep_assert_held_once(&msc->part_sel_lock); 169 __mpam_write_reg(msc, reg, val); 170 } 171 172 #define mpam_write_partsel_reg(msc, reg, val) _mpam_write_partsel_reg(msc, MPAMCFG_##reg, val) 173 174 static inline u32 _mpam_read_monsel_reg(struct mpam_msc *msc, u16 reg) 175 { 176 mpam_mon_sel_lock_held(msc); 177 return __mpam_read_reg(msc, reg); 178 } 179 180 #define mpam_read_monsel_reg(msc, reg) _mpam_read_monsel_reg(msc, MSMON_##reg) 181 182 static inline void _mpam_write_monsel_reg(struct mpam_msc *msc, u16 reg, u32 val) 183 { 184 mpam_mon_sel_lock_held(msc); 185 __mpam_write_reg(msc, reg, val); 186 } 187 188 #define mpam_write_monsel_reg(msc, reg, val) _mpam_write_monsel_reg(msc, MSMON_##reg, val) 189 190 static u64 mpam_msc_read_idr(struct mpam_msc *msc) 191 { 192 u64 idr_high = 0, idr_low; 193 194 lockdep_assert_held(&msc->part_sel_lock); 195 196 idr_low = mpam_read_partsel_reg(msc, IDR); 197 if (FIELD_GET(MPAMF_IDR_EXT, idr_low)) 198 idr_high = mpam_read_partsel_reg(msc, IDR + 4); 199 200 return (idr_high << 32) | idr_low; 201 } 202 203 static void __mpam_part_sel_raw(u32 partsel, struct mpam_msc *msc) 204 { 205 lockdep_assert_held(&msc->part_sel_lock); 206 207 mpam_write_partsel_reg(msc, PART_SEL, partsel); 208 } 209 210 static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) 211 { 212 u32 partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | 213 FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); 214 215 __mpam_part_sel_raw(partsel, msc); 216 } 217 218 int mpam_register_requestor(u16 partid_max, u8 pmg_max) 219 { 220 guard(spinlock)(&partid_max_lock); 221 if (!partid_max_init) { 222 mpam_partid_max = partid_max; 223 mpam_pmg_max = pmg_max; 224 partid_max_init = true; 225 } else if (!partid_max_published) { 226 mpam_partid_max = min(mpam_partid_max, partid_max); 227 mpam_pmg_max = min(mpam_pmg_max, pmg_max); 228 } else { 229 /* New requestors can't lower the values */ 230 if (partid_max < mpam_partid_max || pmg_max < mpam_pmg_max) 231 return -EBUSY; 232 } 233 234 return 0; 235 } 236 EXPORT_SYMBOL(mpam_register_requestor); 237 238 static struct mpam_class * 239 mpam_class_alloc(u8 level_idx, enum mpam_class_types type) 240 { 241 struct mpam_class *class; 242 243 lockdep_assert_held(&mpam_list_lock); 244 245 class = kzalloc(sizeof(*class), GFP_KERNEL); 246 if (!class) 247 return ERR_PTR(-ENOMEM); 248 init_garbage(&class->garbage); 249 250 INIT_LIST_HEAD_RCU(&class->components); 251 /* Affinity is updated when ris are added */ 252 class->level = level_idx; 253 class->type = type; 254 INIT_LIST_HEAD_RCU(&class->classes_list); 255 256 list_add_rcu(&class->classes_list, &mpam_classes); 257 258 return class; 259 } 260 261 static void mpam_class_destroy(struct mpam_class *class) 262 { 263 lockdep_assert_held(&mpam_list_lock); 264 265 list_del_rcu(&class->classes_list); 266 add_to_garbage(class); 267 } 268 269 static struct mpam_class * 270 mpam_class_find(u8 level_idx, enum mpam_class_types type) 271 { 272 struct mpam_class *class; 273 274 lockdep_assert_held(&mpam_list_lock); 275 276 list_for_each_entry(class, &mpam_classes, classes_list) { 277 if (class->type == type && class->level == level_idx) 278 return class; 279 } 280 281 return mpam_class_alloc(level_idx, type); 282 } 283 284 static struct mpam_component * 285 mpam_component_alloc(struct mpam_class *class, int id) 286 { 287 struct mpam_component *comp; 288 289 lockdep_assert_held(&mpam_list_lock); 290 291 comp = kzalloc(sizeof(*comp), GFP_KERNEL); 292 if (!comp) 293 return ERR_PTR(-ENOMEM); 294 init_garbage(&comp->garbage); 295 296 comp->comp_id = id; 297 INIT_LIST_HEAD_RCU(&comp->vmsc); 298 /* Affinity is updated when RIS are added */ 299 INIT_LIST_HEAD_RCU(&comp->class_list); 300 comp->class = class; 301 302 list_add_rcu(&comp->class_list, &class->components); 303 304 return comp; 305 } 306 307 static void mpam_component_destroy(struct mpam_component *comp) 308 { 309 struct mpam_class *class = comp->class; 310 311 lockdep_assert_held(&mpam_list_lock); 312 313 list_del_rcu(&comp->class_list); 314 add_to_garbage(comp); 315 316 if (list_empty(&class->components)) 317 mpam_class_destroy(class); 318 } 319 320 static struct mpam_component * 321 mpam_component_find(struct mpam_class *class, int id) 322 { 323 struct mpam_component *comp; 324 325 lockdep_assert_held(&mpam_list_lock); 326 327 list_for_each_entry(comp, &class->components, class_list) { 328 if (comp->comp_id == id) 329 return comp; 330 } 331 332 return mpam_component_alloc(class, id); 333 } 334 335 static struct mpam_vmsc * 336 mpam_vmsc_alloc(struct mpam_component *comp, struct mpam_msc *msc) 337 { 338 struct mpam_vmsc *vmsc; 339 340 lockdep_assert_held(&mpam_list_lock); 341 342 vmsc = kzalloc(sizeof(*vmsc), GFP_KERNEL); 343 if (!vmsc) 344 return ERR_PTR(-ENOMEM); 345 init_garbage(&vmsc->garbage); 346 347 INIT_LIST_HEAD_RCU(&vmsc->ris); 348 INIT_LIST_HEAD_RCU(&vmsc->comp_list); 349 vmsc->comp = comp; 350 vmsc->msc = msc; 351 352 list_add_rcu(&vmsc->comp_list, &comp->vmsc); 353 354 return vmsc; 355 } 356 357 static void mpam_vmsc_destroy(struct mpam_vmsc *vmsc) 358 { 359 struct mpam_component *comp = vmsc->comp; 360 361 lockdep_assert_held(&mpam_list_lock); 362 363 list_del_rcu(&vmsc->comp_list); 364 add_to_garbage(vmsc); 365 366 if (list_empty(&comp->vmsc)) 367 mpam_component_destroy(comp); 368 } 369 370 static struct mpam_vmsc * 371 mpam_vmsc_find(struct mpam_component *comp, struct mpam_msc *msc) 372 { 373 struct mpam_vmsc *vmsc; 374 375 lockdep_assert_held(&mpam_list_lock); 376 377 list_for_each_entry(vmsc, &comp->vmsc, comp_list) { 378 if (vmsc->msc->id == msc->id) 379 return vmsc; 380 } 381 382 return mpam_vmsc_alloc(comp, msc); 383 } 384 385 /* 386 * The cacheinfo structures are only populated when CPUs are online. 387 * This helper walks the acpi tables to include offline CPUs too. 388 */ 389 int mpam_get_cpumask_from_cache_id(unsigned long cache_id, u32 cache_level, 390 cpumask_t *affinity) 391 { 392 return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); 393 } 394 395 /* 396 * cpumask_of_node() only knows about online CPUs. This can't tell us whether 397 * a class is represented on all possible CPUs. 398 */ 399 static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) 400 { 401 int cpu; 402 403 for_each_possible_cpu(cpu) { 404 if (node_id == cpu_to_node(cpu)) 405 cpumask_set_cpu(cpu, affinity); 406 } 407 } 408 409 static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, 410 enum mpam_class_types type, 411 struct mpam_class *class, 412 struct mpam_component *comp) 413 { 414 int err; 415 416 switch (type) { 417 case MPAM_CLASS_CACHE: 418 err = mpam_get_cpumask_from_cache_id(comp->comp_id, class->level, 419 affinity); 420 if (err) { 421 dev_warn_once(&msc->pdev->dev, 422 "Failed to determine CPU affinity\n"); 423 return err; 424 } 425 426 if (cpumask_empty(affinity)) 427 dev_warn_once(&msc->pdev->dev, "no CPUs associated with cache node\n"); 428 429 break; 430 case MPAM_CLASS_MEMORY: 431 get_cpumask_from_node_id(comp->comp_id, affinity); 432 /* affinity may be empty for CPU-less memory nodes */ 433 break; 434 case MPAM_CLASS_UNKNOWN: 435 return 0; 436 } 437 438 cpumask_and(affinity, affinity, &msc->accessibility); 439 440 return 0; 441 } 442 443 static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, 444 enum mpam_class_types type, u8 class_id, 445 int component_id) 446 { 447 int err; 448 struct mpam_vmsc *vmsc; 449 struct mpam_msc_ris *ris; 450 struct mpam_class *class; 451 struct mpam_component *comp; 452 struct platform_device *pdev = msc->pdev; 453 454 lockdep_assert_held(&mpam_list_lock); 455 456 if (ris_idx > MPAM_MSC_MAX_NUM_RIS) 457 return -EINVAL; 458 459 if (test_and_set_bit(ris_idx, &msc->ris_idxs)) 460 return -EBUSY; 461 462 ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), GFP_KERNEL); 463 if (!ris) 464 return -ENOMEM; 465 init_garbage(&ris->garbage); 466 ris->garbage.pdev = pdev; 467 468 class = mpam_class_find(class_id, type); 469 if (IS_ERR(class)) 470 return PTR_ERR(class); 471 472 comp = mpam_component_find(class, component_id); 473 if (IS_ERR(comp)) { 474 if (list_empty(&class->components)) 475 mpam_class_destroy(class); 476 return PTR_ERR(comp); 477 } 478 479 vmsc = mpam_vmsc_find(comp, msc); 480 if (IS_ERR(vmsc)) { 481 if (list_empty(&comp->vmsc)) 482 mpam_component_destroy(comp); 483 return PTR_ERR(vmsc); 484 } 485 486 err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); 487 if (err) { 488 if (list_empty(&vmsc->ris)) 489 mpam_vmsc_destroy(vmsc); 490 return err; 491 } 492 493 ris->ris_idx = ris_idx; 494 INIT_LIST_HEAD_RCU(&ris->msc_list); 495 INIT_LIST_HEAD_RCU(&ris->vmsc_list); 496 ris->vmsc = vmsc; 497 498 cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); 499 cpumask_or(&class->affinity, &class->affinity, &ris->affinity); 500 list_add_rcu(&ris->vmsc_list, &vmsc->ris); 501 list_add_rcu(&ris->msc_list, &msc->ris); 502 503 return 0; 504 } 505 506 static void mpam_ris_destroy(struct mpam_msc_ris *ris) 507 { 508 struct mpam_vmsc *vmsc = ris->vmsc; 509 struct mpam_msc *msc = vmsc->msc; 510 struct mpam_component *comp = vmsc->comp; 511 struct mpam_class *class = comp->class; 512 513 lockdep_assert_held(&mpam_list_lock); 514 515 /* 516 * It is assumed affinities don't overlap. If they do the class becomes 517 * unusable immediately. 518 */ 519 cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); 520 cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); 521 clear_bit(ris->ris_idx, &msc->ris_idxs); 522 list_del_rcu(&ris->msc_list); 523 list_del_rcu(&ris->vmsc_list); 524 add_to_garbage(ris); 525 526 if (list_empty(&vmsc->ris)) 527 mpam_vmsc_destroy(vmsc); 528 } 529 530 int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, 531 enum mpam_class_types type, u8 class_id, int component_id) 532 { 533 int err; 534 535 mutex_lock(&mpam_list_lock); 536 err = mpam_ris_create_locked(msc, ris_idx, type, class_id, 537 component_id); 538 mutex_unlock(&mpam_list_lock); 539 if (err) 540 mpam_free_garbage(); 541 542 return err; 543 } 544 545 static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, 546 u8 ris_idx) 547 { 548 int err; 549 struct mpam_msc_ris *ris; 550 551 lockdep_assert_held(&mpam_list_lock); 552 553 if (!test_bit(ris_idx, &msc->ris_idxs)) { 554 err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, 555 0, 0); 556 if (err) 557 return ERR_PTR(err); 558 } 559 560 list_for_each_entry(ris, &msc->ris, msc_list) { 561 if (ris->ris_idx == ris_idx) 562 return ris; 563 } 564 565 return ERR_PTR(-ENOENT); 566 } 567 568 /* 569 * IHI009A.a has this nugget: "If a monitor does not support automatic behaviour 570 * of NRDY, software can use this bit for any purpose" - so hardware might not 571 * implement this - but it isn't RES0. 572 * 573 * Try and see what values stick in this bit. If we can write either value, 574 * its probably not implemented by hardware. 575 */ 576 static bool _mpam_ris_hw_probe_hw_nrdy(struct mpam_msc_ris *ris, u32 mon_reg) 577 { 578 u32 now; 579 u64 mon_sel; 580 bool can_set, can_clear; 581 struct mpam_msc *msc = ris->vmsc->msc; 582 583 if (WARN_ON_ONCE(!mpam_mon_sel_lock(msc))) 584 return false; 585 586 mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, 0) | 587 FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); 588 _mpam_write_monsel_reg(msc, mon_reg, mon_sel); 589 590 _mpam_write_monsel_reg(msc, mon_reg, MSMON___NRDY); 591 now = _mpam_read_monsel_reg(msc, mon_reg); 592 can_set = now & MSMON___NRDY; 593 594 _mpam_write_monsel_reg(msc, mon_reg, 0); 595 now = _mpam_read_monsel_reg(msc, mon_reg); 596 can_clear = !(now & MSMON___NRDY); 597 mpam_mon_sel_unlock(msc); 598 599 return (!can_set || !can_clear); 600 } 601 602 #define mpam_ris_hw_probe_hw_nrdy(_ris, _mon_reg) \ 603 _mpam_ris_hw_probe_hw_nrdy(_ris, MSMON_##_mon_reg) 604 605 static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) 606 { 607 int err; 608 struct mpam_msc *msc = ris->vmsc->msc; 609 struct device *dev = &msc->pdev->dev; 610 struct mpam_props *props = &ris->props; 611 612 lockdep_assert_held(&msc->probe_lock); 613 lockdep_assert_held(&msc->part_sel_lock); 614 615 /* Cache Portion partitioning */ 616 if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { 617 u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); 618 619 props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); 620 if (props->cpbm_wd) 621 mpam_set_feature(mpam_feat_cpor_part, props); 622 } 623 624 /* Memory bandwidth partitioning */ 625 if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { 626 u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); 627 628 /* portion bitmap resolution */ 629 props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); 630 if (props->mbw_pbm_bits && 631 FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) 632 mpam_set_feature(mpam_feat_mbw_part, props); 633 634 props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); 635 if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) 636 mpam_set_feature(mpam_feat_mbw_max, props); 637 } 638 639 /* Performance Monitoring */ 640 if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { 641 u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); 642 643 /* 644 * If the firmware max-nrdy-us property is missing, the 645 * CSU counters can't be used. Should we wait forever? 646 */ 647 err = device_property_read_u32(&msc->pdev->dev, 648 "arm,not-ready-us", 649 &msc->nrdy_usec); 650 651 if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { 652 u32 csumonidr; 653 654 csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); 655 props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); 656 if (props->num_csu_mon) { 657 bool hw_managed; 658 659 mpam_set_feature(mpam_feat_msmon_csu, props); 660 661 /* Is NRDY hardware managed? */ 662 hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, CSU); 663 if (hw_managed) 664 mpam_set_feature(mpam_feat_msmon_csu_hw_nrdy, props); 665 } 666 667 /* 668 * Accept the missing firmware property if NRDY appears 669 * un-implemented. 670 */ 671 if (err && mpam_has_feature(mpam_feat_msmon_csu_hw_nrdy, props)) 672 dev_err_once(dev, "Counters are not usable because not-ready timeout was not provided by firmware."); 673 } 674 if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { 675 bool hw_managed; 676 u32 mbwumon_idr = mpam_read_partsel_reg(msc, MBWUMON_IDR); 677 678 props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumon_idr); 679 if (props->num_mbwu_mon) 680 mpam_set_feature(mpam_feat_msmon_mbwu, props); 681 682 /* Is NRDY hardware managed? */ 683 hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, MBWU); 684 if (hw_managed) 685 mpam_set_feature(mpam_feat_msmon_mbwu_hw_nrdy, props); 686 687 /* 688 * Don't warn about any missing firmware property for 689 * MBWU NRDY - it doesn't make any sense! 690 */ 691 } 692 } 693 } 694 695 static int mpam_msc_hw_probe(struct mpam_msc *msc) 696 { 697 u64 idr; 698 u16 partid_max; 699 u8 ris_idx, pmg_max; 700 struct mpam_msc_ris *ris; 701 struct device *dev = &msc->pdev->dev; 702 703 lockdep_assert_held(&msc->probe_lock); 704 705 idr = __mpam_read_reg(msc, MPAMF_AIDR); 706 if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { 707 dev_err_once(dev, "MSC does not match MPAM architecture v1.x\n"); 708 return -EIO; 709 } 710 711 /* Grab an IDR value to find out how many RIS there are */ 712 mutex_lock(&msc->part_sel_lock); 713 idr = mpam_msc_read_idr(msc); 714 mutex_unlock(&msc->part_sel_lock); 715 716 msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); 717 718 /* Use these values so partid/pmg always starts with a valid value */ 719 msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); 720 msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); 721 722 for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { 723 mutex_lock(&msc->part_sel_lock); 724 __mpam_part_sel(ris_idx, 0, msc); 725 idr = mpam_msc_read_idr(msc); 726 mutex_unlock(&msc->part_sel_lock); 727 728 partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); 729 pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); 730 msc->partid_max = min(msc->partid_max, partid_max); 731 msc->pmg_max = min(msc->pmg_max, pmg_max); 732 733 mutex_lock(&mpam_list_lock); 734 ris = mpam_get_or_create_ris(msc, ris_idx); 735 mutex_unlock(&mpam_list_lock); 736 if (IS_ERR(ris)) 737 return PTR_ERR(ris); 738 ris->idr = idr; 739 740 mutex_lock(&msc->part_sel_lock); 741 __mpam_part_sel(ris_idx, 0, msc); 742 mpam_ris_hw_probe(ris); 743 mutex_unlock(&msc->part_sel_lock); 744 } 745 746 spin_lock(&partid_max_lock); 747 mpam_partid_max = min(mpam_partid_max, msc->partid_max); 748 mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); 749 spin_unlock(&partid_max_lock); 750 751 msc->probed = true; 752 753 return 0; 754 } 755 756 static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) 757 { 758 u32 num_words, msb; 759 u32 bm = ~0; 760 int i; 761 762 lockdep_assert_held(&msc->part_sel_lock); 763 764 if (wd == 0) 765 return; 766 767 /* 768 * Write all ~0 to all but the last 32bit-word, which may 769 * have fewer bits... 770 */ 771 num_words = DIV_ROUND_UP(wd, 32); 772 for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) 773 __mpam_write_reg(msc, reg, bm); 774 775 /* 776 * ....and then the last (maybe) partial 32bit word. When wd is a 777 * multiple of 32, msb should be 31 to write a full 32bit word. 778 */ 779 msb = (wd - 1) % 32; 780 bm = GENMASK(msb, 0); 781 __mpam_write_reg(msc, reg, bm); 782 } 783 784 static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) 785 { 786 struct mpam_msc *msc = ris->vmsc->msc; 787 struct mpam_props *rprops = &ris->props; 788 789 WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu))); 790 791 mutex_lock(&msc->part_sel_lock); 792 __mpam_part_sel(ris->ris_idx, partid, msc); 793 794 if (mpam_has_feature(mpam_feat_cpor_part, rprops)) 795 mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); 796 797 if (mpam_has_feature(mpam_feat_mbw_part, rprops)) 798 mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); 799 800 if (mpam_has_feature(mpam_feat_mbw_min, rprops)) 801 mpam_write_partsel_reg(msc, MBW_MIN, 0); 802 803 if (mpam_has_feature(mpam_feat_mbw_max, rprops)) 804 mpam_write_partsel_reg(msc, MBW_MAX, MPAMCFG_MBW_MAX_MAX); 805 806 mutex_unlock(&msc->part_sel_lock); 807 } 808 809 /* 810 * Called via smp_call_on_cpu() to prevent migration, while still being 811 * pre-emptible. 812 */ 813 static int mpam_reset_ris(void *arg) 814 { 815 u16 partid, partid_max; 816 struct mpam_msc_ris *ris = arg; 817 818 WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu))); 819 820 if (ris->in_reset_state) 821 return 0; 822 823 spin_lock(&partid_max_lock); 824 partid_max = mpam_partid_max; 825 spin_unlock(&partid_max_lock); 826 for (partid = 0; partid <= partid_max; partid++) 827 mpam_reset_ris_partid(ris, partid); 828 829 return 0; 830 } 831 832 /* 833 * Get the preferred CPU for this MSC. If it is accessible from this CPU, 834 * this CPU is preferred. This can be preempted/migrated, it will only result 835 * in more work. 836 */ 837 static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) 838 { 839 int cpu = raw_smp_processor_id(); 840 841 if (cpumask_test_cpu(cpu, &msc->accessibility)) 842 return cpu; 843 844 return cpumask_first_and(&msc->accessibility, cpu_online_mask); 845 } 846 847 static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) 848 { 849 lockdep_assert_irqs_enabled(); 850 lockdep_assert_cpus_held(); 851 WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu))); 852 853 return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); 854 } 855 856 static void mpam_reset_msc(struct mpam_msc *msc, bool online) 857 { 858 struct mpam_msc_ris *ris; 859 860 list_for_each_entry_srcu(ris, &msc->ris, msc_list, srcu_read_lock_held(&mpam_srcu)) { 861 mpam_touch_msc(msc, &mpam_reset_ris, ris); 862 863 /* 864 * Set in_reset_state when coming online. The reset state 865 * for non-zero partid may be lost while the CPUs are offline. 866 */ 867 ris->in_reset_state = online; 868 } 869 } 870 871 static int mpam_cpu_online(unsigned int cpu) 872 { 873 struct mpam_msc *msc; 874 875 guard(srcu)(&mpam_srcu); 876 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 877 srcu_read_lock_held(&mpam_srcu)) { 878 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 879 continue; 880 881 if (atomic_fetch_inc(&msc->online_refs) == 0) 882 mpam_reset_msc(msc, true); 883 } 884 885 return 0; 886 } 887 888 /* Before mpam is enabled, try to probe new MSC */ 889 static int mpam_discovery_cpu_online(unsigned int cpu) 890 { 891 int err = 0; 892 struct mpam_msc *msc; 893 bool new_device_probed = false; 894 895 guard(srcu)(&mpam_srcu); 896 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 897 srcu_read_lock_held(&mpam_srcu)) { 898 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 899 continue; 900 901 mutex_lock(&msc->probe_lock); 902 if (!msc->probed) 903 err = mpam_msc_hw_probe(msc); 904 mutex_unlock(&msc->probe_lock); 905 906 if (err) 907 break; 908 new_device_probed = true; 909 } 910 911 if (new_device_probed && !err) 912 schedule_work(&mpam_enable_work); 913 if (err) { 914 mpam_disable_reason = "error during probing"; 915 schedule_work(&mpam_broken_work); 916 } 917 918 return err; 919 } 920 921 static int mpam_cpu_offline(unsigned int cpu) 922 { 923 struct mpam_msc *msc; 924 925 guard(srcu)(&mpam_srcu); 926 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 927 srcu_read_lock_held(&mpam_srcu)) { 928 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 929 continue; 930 931 if (atomic_dec_and_test(&msc->online_refs)) 932 mpam_reset_msc(msc, false); 933 } 934 935 return 0; 936 } 937 938 static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online), 939 int (*offline)(unsigned int offline), 940 char *name) 941 { 942 mutex_lock(&mpam_cpuhp_state_lock); 943 if (mpam_cpuhp_state) { 944 cpuhp_remove_state(mpam_cpuhp_state); 945 mpam_cpuhp_state = 0; 946 } 947 948 mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, name, online, 949 offline); 950 if (mpam_cpuhp_state <= 0) { 951 pr_err("Failed to register cpuhp callbacks"); 952 mpam_cpuhp_state = 0; 953 } 954 mutex_unlock(&mpam_cpuhp_state_lock); 955 } 956 957 /* 958 * An MSC can control traffic from a set of CPUs, but may only be accessible 959 * from a (hopefully wider) set of CPUs. The common reason for this is power 960 * management. If all the CPUs in a cluster are in PSCI:CPU_SUSPEND, the 961 * corresponding cache may also be powered off. By making accesses from 962 * one of those CPUs, we ensure we don't access a cache that's powered off. 963 */ 964 static void update_msc_accessibility(struct mpam_msc *msc) 965 { 966 u32 affinity_id; 967 int err; 968 969 err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", 970 &affinity_id); 971 if (err) 972 cpumask_copy(&msc->accessibility, cpu_possible_mask); 973 else 974 acpi_pptt_get_cpus_from_container(affinity_id, &msc->accessibility); 975 } 976 977 /* 978 * There are two ways of reaching a struct mpam_msc_ris. Via the 979 * class->component->vmsc->ris, or via the msc. 980 * When destroying the msc, the other side needs unlinking and cleaning up too. 981 */ 982 static void mpam_msc_destroy(struct mpam_msc *msc) 983 { 984 struct platform_device *pdev = msc->pdev; 985 struct mpam_msc_ris *ris, *tmp; 986 987 lockdep_assert_held(&mpam_list_lock); 988 989 list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) 990 mpam_ris_destroy(ris); 991 992 list_del_rcu(&msc->all_msc_list); 993 platform_set_drvdata(pdev, NULL); 994 995 add_to_garbage(msc); 996 } 997 998 static void mpam_msc_drv_remove(struct platform_device *pdev) 999 { 1000 struct mpam_msc *msc = platform_get_drvdata(pdev); 1001 1002 mutex_lock(&mpam_list_lock); 1003 mpam_msc_destroy(msc); 1004 mutex_unlock(&mpam_list_lock); 1005 1006 mpam_free_garbage(); 1007 } 1008 1009 static struct mpam_msc *do_mpam_msc_drv_probe(struct platform_device *pdev) 1010 { 1011 int err; 1012 u32 tmp; 1013 struct mpam_msc *msc; 1014 struct resource *msc_res; 1015 struct device *dev = &pdev->dev; 1016 1017 lockdep_assert_held(&mpam_list_lock); 1018 1019 msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); 1020 if (!msc) 1021 return ERR_PTR(-ENOMEM); 1022 init_garbage(&msc->garbage); 1023 msc->garbage.pdev = pdev; 1024 1025 err = devm_mutex_init(dev, &msc->probe_lock); 1026 if (err) 1027 return ERR_PTR(err); 1028 1029 err = devm_mutex_init(dev, &msc->part_sel_lock); 1030 if (err) 1031 return ERR_PTR(err); 1032 1033 mpam_mon_sel_lock_init(msc); 1034 msc->id = pdev->id; 1035 msc->pdev = pdev; 1036 INIT_LIST_HEAD_RCU(&msc->all_msc_list); 1037 INIT_LIST_HEAD_RCU(&msc->ris); 1038 1039 update_msc_accessibility(msc); 1040 if (cpumask_empty(&msc->accessibility)) { 1041 dev_err_once(dev, "MSC is not accessible from any CPU!"); 1042 return ERR_PTR(-EINVAL); 1043 } 1044 1045 if (device_property_read_u32(&pdev->dev, "pcc-channel", &tmp)) 1046 msc->iface = MPAM_IFACE_MMIO; 1047 else 1048 msc->iface = MPAM_IFACE_PCC; 1049 1050 if (msc->iface == MPAM_IFACE_MMIO) { 1051 void __iomem *io; 1052 1053 io = devm_platform_get_and_ioremap_resource(pdev, 0, 1054 &msc_res); 1055 if (IS_ERR(io)) { 1056 dev_err_once(dev, "Failed to map MSC base address\n"); 1057 return ERR_CAST(io); 1058 } 1059 msc->mapped_hwpage_sz = msc_res->end - msc_res->start; 1060 msc->mapped_hwpage = io; 1061 } else { 1062 return ERR_PTR(-EINVAL); 1063 } 1064 1065 list_add_rcu(&msc->all_msc_list, &mpam_all_msc); 1066 platform_set_drvdata(pdev, msc); 1067 1068 return msc; 1069 } 1070 1071 static int fw_num_msc; 1072 1073 static int mpam_msc_drv_probe(struct platform_device *pdev) 1074 { 1075 int err; 1076 struct mpam_msc *msc = NULL; 1077 void *plat_data = pdev->dev.platform_data; 1078 1079 mutex_lock(&mpam_list_lock); 1080 msc = do_mpam_msc_drv_probe(pdev); 1081 mutex_unlock(&mpam_list_lock); 1082 1083 if (IS_ERR(msc)) 1084 return PTR_ERR(msc); 1085 1086 /* Create RIS entries described by firmware */ 1087 err = acpi_mpam_parse_resources(msc, plat_data); 1088 if (err) { 1089 mpam_msc_drv_remove(pdev); 1090 return err; 1091 } 1092 1093 if (atomic_add_return(1, &mpam_num_msc) == fw_num_msc) 1094 mpam_register_cpuhp_callbacks(mpam_discovery_cpu_online, NULL, 1095 "mpam:drv_probe"); 1096 1097 return 0; 1098 } 1099 1100 static struct platform_driver mpam_msc_driver = { 1101 .driver = { 1102 .name = "mpam_msc", 1103 }, 1104 .probe = mpam_msc_drv_probe, 1105 .remove = mpam_msc_drv_remove, 1106 }; 1107 1108 /* Any of these features mean the BWA_WD field is valid. */ 1109 static bool mpam_has_bwa_wd_feature(struct mpam_props *props) 1110 { 1111 if (mpam_has_feature(mpam_feat_mbw_min, props)) 1112 return true; 1113 if (mpam_has_feature(mpam_feat_mbw_max, props)) 1114 return true; 1115 return false; 1116 } 1117 1118 #define MISMATCHED_HELPER(parent, child, helper, field, alias) \ 1119 helper(parent) && \ 1120 ((helper(child) && (parent)->field != (child)->field) || \ 1121 (!helper(child) && !(alias))) 1122 1123 #define MISMATCHED_FEAT(parent, child, feat, field, alias) \ 1124 mpam_has_feature((feat), (parent)) && \ 1125 ((mpam_has_feature((feat), (child)) && (parent)->field != (child)->field) || \ 1126 (!mpam_has_feature((feat), (child)) && !(alias))) 1127 1128 #define CAN_MERGE_FEAT(parent, child, feat, alias) \ 1129 (alias) && !mpam_has_feature((feat), (parent)) && \ 1130 mpam_has_feature((feat), (child)) 1131 1132 /* 1133 * Combine two props fields. 1134 * If this is for controls that alias the same resource, it is safe to just 1135 * copy the values over. If two aliasing controls implement the same scheme 1136 * a safe value must be picked. 1137 * For non-aliasing controls, these control different resources, and the 1138 * resulting safe value must be compatible with both. When merging values in 1139 * the tree, all the aliasing resources must be handled first. 1140 * On mismatch, parent is modified. 1141 */ 1142 static void __props_mismatch(struct mpam_props *parent, 1143 struct mpam_props *child, bool alias) 1144 { 1145 if (CAN_MERGE_FEAT(parent, child, mpam_feat_cpor_part, alias)) { 1146 parent->cpbm_wd = child->cpbm_wd; 1147 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_cpor_part, 1148 cpbm_wd, alias)) { 1149 pr_debug("cleared cpor_part\n"); 1150 mpam_clear_feature(mpam_feat_cpor_part, parent); 1151 parent->cpbm_wd = 0; 1152 } 1153 1154 if (CAN_MERGE_FEAT(parent, child, mpam_feat_mbw_part, alias)) { 1155 parent->mbw_pbm_bits = child->mbw_pbm_bits; 1156 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_mbw_part, 1157 mbw_pbm_bits, alias)) { 1158 pr_debug("cleared mbw_part\n"); 1159 mpam_clear_feature(mpam_feat_mbw_part, parent); 1160 parent->mbw_pbm_bits = 0; 1161 } 1162 1163 /* bwa_wd is a count of bits, fewer bits means less precision */ 1164 if (alias && !mpam_has_bwa_wd_feature(parent) && 1165 mpam_has_bwa_wd_feature(child)) { 1166 parent->bwa_wd = child->bwa_wd; 1167 } else if (MISMATCHED_HELPER(parent, child, mpam_has_bwa_wd_feature, 1168 bwa_wd, alias)) { 1169 pr_debug("took the min bwa_wd\n"); 1170 parent->bwa_wd = min(parent->bwa_wd, child->bwa_wd); 1171 } 1172 1173 /* For num properties, take the minimum */ 1174 if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_csu, alias)) { 1175 parent->num_csu_mon = child->num_csu_mon; 1176 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_csu, 1177 num_csu_mon, alias)) { 1178 pr_debug("took the min num_csu_mon\n"); 1179 parent->num_csu_mon = min(parent->num_csu_mon, 1180 child->num_csu_mon); 1181 } 1182 1183 if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_mbwu, alias)) { 1184 parent->num_mbwu_mon = child->num_mbwu_mon; 1185 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_mbwu, 1186 num_mbwu_mon, alias)) { 1187 pr_debug("took the min num_mbwu_mon\n"); 1188 parent->num_mbwu_mon = min(parent->num_mbwu_mon, 1189 child->num_mbwu_mon); 1190 } 1191 1192 if (alias) { 1193 /* Merge features for aliased resources */ 1194 bitmap_or(parent->features, parent->features, child->features, MPAM_FEATURE_LAST); 1195 } else { 1196 /* Clear missing features for non aliasing */ 1197 bitmap_and(parent->features, parent->features, child->features, MPAM_FEATURE_LAST); 1198 } 1199 } 1200 1201 /* 1202 * If a vmsc doesn't match class feature/configuration, do the right thing(tm). 1203 * For 'num' properties we can just take the minimum. 1204 * For properties where the mismatched unused bits would make a difference, we 1205 * nobble the class feature, as we can't configure all the resources. 1206 * e.g. The L3 cache is composed of two resources with 13 and 17 portion 1207 * bitmaps respectively. 1208 */ 1209 static void 1210 __class_props_mismatch(struct mpam_class *class, struct mpam_vmsc *vmsc) 1211 { 1212 struct mpam_props *cprops = &class->props; 1213 struct mpam_props *vprops = &vmsc->props; 1214 struct device *dev = &vmsc->msc->pdev->dev; 1215 1216 lockdep_assert_held(&mpam_list_lock); /* we modify class */ 1217 1218 dev_dbg(dev, "Merging features for class:0x%lx &= vmsc:0x%lx\n", 1219 (long)cprops->features, (long)vprops->features); 1220 1221 /* Take the safe value for any common features */ 1222 __props_mismatch(cprops, vprops, false); 1223 } 1224 1225 static void 1226 __vmsc_props_mismatch(struct mpam_vmsc *vmsc, struct mpam_msc_ris *ris) 1227 { 1228 struct mpam_props *rprops = &ris->props; 1229 struct mpam_props *vprops = &vmsc->props; 1230 struct device *dev = &vmsc->msc->pdev->dev; 1231 1232 lockdep_assert_held(&mpam_list_lock); /* we modify vmsc */ 1233 1234 dev_dbg(dev, "Merging features for vmsc:0x%lx |= ris:0x%lx\n", 1235 (long)vprops->features, (long)rprops->features); 1236 1237 /* 1238 * Merge mismatched features - Copy any features that aren't common, 1239 * but take the safe value for any common features. 1240 */ 1241 __props_mismatch(vprops, rprops, true); 1242 } 1243 1244 /* 1245 * Copy the first component's first vMSC's properties and features to the 1246 * class. __class_props_mismatch() will remove conflicts. 1247 * It is not possible to have a class with no components, or a component with 1248 * no resources. The vMSC properties have already been built. 1249 */ 1250 static void mpam_enable_init_class_features(struct mpam_class *class) 1251 { 1252 struct mpam_vmsc *vmsc; 1253 struct mpam_component *comp; 1254 1255 comp = list_first_entry(&class->components, 1256 struct mpam_component, class_list); 1257 vmsc = list_first_entry(&comp->vmsc, 1258 struct mpam_vmsc, comp_list); 1259 1260 class->props = vmsc->props; 1261 } 1262 1263 static void mpam_enable_merge_vmsc_features(struct mpam_component *comp) 1264 { 1265 struct mpam_vmsc *vmsc; 1266 struct mpam_msc_ris *ris; 1267 struct mpam_class *class = comp->class; 1268 1269 list_for_each_entry(vmsc, &comp->vmsc, comp_list) { 1270 list_for_each_entry(ris, &vmsc->ris, vmsc_list) { 1271 __vmsc_props_mismatch(vmsc, ris); 1272 class->nrdy_usec = max(class->nrdy_usec, 1273 vmsc->msc->nrdy_usec); 1274 } 1275 } 1276 } 1277 1278 static void mpam_enable_merge_class_features(struct mpam_component *comp) 1279 { 1280 struct mpam_vmsc *vmsc; 1281 struct mpam_class *class = comp->class; 1282 1283 list_for_each_entry(vmsc, &comp->vmsc, comp_list) 1284 __class_props_mismatch(class, vmsc); 1285 } 1286 1287 /* 1288 * Merge all the common resource features into class. 1289 * vmsc features are bitwise-or'd together by mpam_enable_merge_vmsc_features() 1290 * as the first step so that mpam_enable_init_class_features() can initialise 1291 * the class with a representative set of features. 1292 * Next the mpam_enable_merge_class_features() bitwise-and's all the vmsc 1293 * features to form the class features. 1294 * Other features are the min/max as appropriate. 1295 * 1296 * To avoid walking the whole tree twice, the class->nrdy_usec property is 1297 * updated when working with the vmsc as it is a max(), and doesn't need 1298 * initialising first. 1299 */ 1300 static void mpam_enable_merge_features(struct list_head *all_classes_list) 1301 { 1302 struct mpam_class *class; 1303 struct mpam_component *comp; 1304 1305 lockdep_assert_held(&mpam_list_lock); 1306 1307 list_for_each_entry(class, all_classes_list, classes_list) { 1308 list_for_each_entry(comp, &class->components, class_list) 1309 mpam_enable_merge_vmsc_features(comp); 1310 1311 mpam_enable_init_class_features(class); 1312 1313 list_for_each_entry(comp, &class->components, class_list) 1314 mpam_enable_merge_class_features(comp); 1315 } 1316 } 1317 1318 static void mpam_enable_once(void) 1319 { 1320 /* 1321 * Once the cpuhp callbacks have been changed, mpam_partid_max can no 1322 * longer change. 1323 */ 1324 spin_lock(&partid_max_lock); 1325 partid_max_published = true; 1326 spin_unlock(&partid_max_lock); 1327 1328 mutex_lock(&mpam_list_lock); 1329 mpam_enable_merge_features(&mpam_classes); 1330 mutex_unlock(&mpam_list_lock); 1331 1332 mpam_register_cpuhp_callbacks(mpam_cpu_online, mpam_cpu_offline, 1333 "mpam:online"); 1334 1335 /* Use printk() to avoid the pr_fmt adding the function name. */ 1336 printk(KERN_INFO "MPAM enabled with %u PARTIDs and %u PMGs\n", 1337 mpam_partid_max + 1, mpam_pmg_max + 1); 1338 } 1339 1340 void mpam_disable(struct work_struct *ignored) 1341 { 1342 struct mpam_msc *msc, *tmp; 1343 1344 mutex_lock(&mpam_cpuhp_state_lock); 1345 if (mpam_cpuhp_state) { 1346 cpuhp_remove_state(mpam_cpuhp_state); 1347 mpam_cpuhp_state = 0; 1348 } 1349 mutex_unlock(&mpam_cpuhp_state_lock); 1350 1351 mutex_lock(&mpam_list_lock); 1352 list_for_each_entry_safe(msc, tmp, &mpam_all_msc, all_msc_list) 1353 mpam_msc_destroy(msc); 1354 mutex_unlock(&mpam_list_lock); 1355 mpam_free_garbage(); 1356 1357 pr_err_once("MPAM disabled due to %s\n", mpam_disable_reason); 1358 } 1359 1360 /* 1361 * Enable mpam once all devices have been probed. 1362 * Scheduled by mpam_discovery_cpu_online() once all devices have been created. 1363 * Also scheduled when new devices are probed when new CPUs come online. 1364 */ 1365 void mpam_enable(struct work_struct *work) 1366 { 1367 static atomic_t once; 1368 struct mpam_msc *msc; 1369 bool all_devices_probed = true; 1370 1371 /* Have we probed all the hw devices? */ 1372 guard(srcu)(&mpam_srcu); 1373 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 1374 srcu_read_lock_held(&mpam_srcu)) { 1375 mutex_lock(&msc->probe_lock); 1376 if (!msc->probed) 1377 all_devices_probed = false; 1378 mutex_unlock(&msc->probe_lock); 1379 1380 if (!all_devices_probed) 1381 break; 1382 } 1383 1384 if (all_devices_probed && !atomic_fetch_inc(&once)) 1385 mpam_enable_once(); 1386 } 1387 1388 static int __init mpam_msc_driver_init(void) 1389 { 1390 if (!system_supports_mpam()) 1391 return -EOPNOTSUPP; 1392 1393 init_srcu_struct(&mpam_srcu); 1394 1395 fw_num_msc = acpi_mpam_count_msc(); 1396 if (fw_num_msc <= 0) { 1397 pr_err("No MSC devices found in firmware\n"); 1398 return -EINVAL; 1399 } 1400 1401 return platform_driver_register(&mpam_msc_driver); 1402 } 1403 1404 /* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ 1405 subsys_initcall(mpam_msc_driver_init); 1406