1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2025 Arm Ltd. 3 4 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ 5 6 #include <linux/acpi.h> 7 #include <linux/atomic.h> 8 #include <linux/arm_mpam.h> 9 #include <linux/bitfield.h> 10 #include <linux/bitmap.h> 11 #include <linux/cacheinfo.h> 12 #include <linux/cpu.h> 13 #include <linux/cpumask.h> 14 #include <linux/device.h> 15 #include <linux/errno.h> 16 #include <linux/gfp.h> 17 #include <linux/list.h> 18 #include <linux/lockdep.h> 19 #include <linux/mutex.h> 20 #include <linux/platform_device.h> 21 #include <linux/printk.h> 22 #include <linux/srcu.h> 23 #include <linux/spinlock.h> 24 #include <linux/types.h> 25 #include <linux/workqueue.h> 26 27 #include "mpam_internal.h" 28 29 /* 30 * mpam_list_lock protects the SRCU lists when writing. Once the 31 * mpam_enabled key is enabled these lists are read-only, 32 * unless the error interrupt disables the driver. 33 */ 34 static DEFINE_MUTEX(mpam_list_lock); 35 static LIST_HEAD(mpam_all_msc); 36 37 struct srcu_struct mpam_srcu; 38 39 /* 40 * Number of MSCs that have been probed. Once all MSCs have been probed MPAM 41 * can be enabled. 42 */ 43 static atomic_t mpam_num_msc; 44 45 static int mpam_cpuhp_state; 46 static DEFINE_MUTEX(mpam_cpuhp_state_lock); 47 48 /* 49 * The smallest common values for any CPU or MSC in the system. 50 * Generating traffic outside this range will result in screaming interrupts. 51 */ 52 u16 mpam_partid_max; 53 u8 mpam_pmg_max; 54 static bool partid_max_init, partid_max_published; 55 static DEFINE_SPINLOCK(partid_max_lock); 56 57 /* 58 * mpam is enabled once all devices have been probed from CPU online callbacks, 59 * scheduled via this work_struct. If access to an MSC depends on a CPU that 60 * was not brought online at boot, this can happen surprisingly late. 61 */ 62 static DECLARE_WORK(mpam_enable_work, &mpam_enable); 63 64 /* 65 * All mpam error interrupts indicate a software bug. On receipt, disable the 66 * driver. 67 */ 68 static DECLARE_WORK(mpam_broken_work, &mpam_disable); 69 70 /* When mpam is disabled, the printed reason to aid debugging */ 71 static char *mpam_disable_reason; 72 73 /* 74 * An MSC is a physical container for controls and monitors, each identified by 75 * their RIS index. These share a base-address, interrupts and some MMIO 76 * registers. A vMSC is a virtual container for RIS in an MSC that control or 77 * monitor the same thing. Members of a vMSC are all RIS in the same MSC, but 78 * not all RIS in an MSC share a vMSC. 79 * 80 * Components are a group of vMSC that control or monitor the same thing but 81 * are from different MSC, so have different base-address, interrupts etc. 82 * Classes are the set components of the same type. 83 * 84 * The features of a vMSC is the union of the RIS it contains. 85 * The features of a Class and Component are the common subset of the vMSC 86 * they contain. 87 * 88 * e.g. The system cache may have bandwidth controls on multiple interfaces, 89 * for regulating traffic from devices independently of traffic from CPUs. 90 * If these are two RIS in one MSC, they will be treated as controlling 91 * different things, and will not share a vMSC/component/class. 92 * 93 * e.g. The L2 may have one MSC and two RIS, one for cache-controls another 94 * for bandwidth. These two RIS are members of the same vMSC. 95 * 96 * e.g. The set of RIS that make up the L2 are grouped as a component. These 97 * are sometimes termed slices. They should be configured the same, as if there 98 * were only one. 99 * 100 * e.g. The SoC probably has more than one L2, each attached to a distinct set 101 * of CPUs. All the L2 components are grouped as a class. 102 * 103 * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, 104 * then linked via struct mpam_ris to a vmsc, component and class. 105 * The same MSC may exist under different class->component->vmsc paths, but the 106 * RIS index will be unique. 107 */ 108 LIST_HEAD(mpam_classes); 109 110 /* List of all objects that can be free()d after synchronise_srcu() */ 111 static LLIST_HEAD(mpam_garbage); 112 113 static inline void init_garbage(struct mpam_garbage *garbage) 114 { 115 init_llist_node(&garbage->llist); 116 } 117 118 #define add_to_garbage(x) \ 119 do { \ 120 __typeof__(x) _x = (x); \ 121 _x->garbage.to_free = _x; \ 122 llist_add(&_x->garbage.llist, &mpam_garbage); \ 123 } while (0) 124 125 static void mpam_free_garbage(void) 126 { 127 struct mpam_garbage *iter, *tmp; 128 struct llist_node *to_free = llist_del_all(&mpam_garbage); 129 130 if (!to_free) 131 return; 132 133 synchronize_srcu(&mpam_srcu); 134 135 llist_for_each_entry_safe(iter, tmp, to_free, llist) { 136 if (iter->pdev) 137 devm_kfree(&iter->pdev->dev, iter->to_free); 138 else 139 kfree(iter->to_free); 140 } 141 } 142 143 static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) 144 { 145 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); 146 147 return readl_relaxed(msc->mapped_hwpage + reg); 148 } 149 150 static inline u32 _mpam_read_partsel_reg(struct mpam_msc *msc, u16 reg) 151 { 152 lockdep_assert_held_once(&msc->part_sel_lock); 153 return __mpam_read_reg(msc, reg); 154 } 155 156 #define mpam_read_partsel_reg(msc, reg) _mpam_read_partsel_reg(msc, MPAMF_##reg) 157 158 static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) 159 { 160 WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); 161 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); 162 163 writel_relaxed(val, msc->mapped_hwpage + reg); 164 } 165 166 static inline void _mpam_write_partsel_reg(struct mpam_msc *msc, u16 reg, u32 val) 167 { 168 lockdep_assert_held_once(&msc->part_sel_lock); 169 __mpam_write_reg(msc, reg, val); 170 } 171 172 #define mpam_write_partsel_reg(msc, reg, val) _mpam_write_partsel_reg(msc, MPAMCFG_##reg, val) 173 174 static inline u32 _mpam_read_monsel_reg(struct mpam_msc *msc, u16 reg) 175 { 176 mpam_mon_sel_lock_held(msc); 177 return __mpam_read_reg(msc, reg); 178 } 179 180 #define mpam_read_monsel_reg(msc, reg) _mpam_read_monsel_reg(msc, MSMON_##reg) 181 182 static inline void _mpam_write_monsel_reg(struct mpam_msc *msc, u16 reg, u32 val) 183 { 184 mpam_mon_sel_lock_held(msc); 185 __mpam_write_reg(msc, reg, val); 186 } 187 188 #define mpam_write_monsel_reg(msc, reg, val) _mpam_write_monsel_reg(msc, MSMON_##reg, val) 189 190 static u64 mpam_msc_read_idr(struct mpam_msc *msc) 191 { 192 u64 idr_high = 0, idr_low; 193 194 lockdep_assert_held(&msc->part_sel_lock); 195 196 idr_low = mpam_read_partsel_reg(msc, IDR); 197 if (FIELD_GET(MPAMF_IDR_EXT, idr_low)) 198 idr_high = mpam_read_partsel_reg(msc, IDR + 4); 199 200 return (idr_high << 32) | idr_low; 201 } 202 203 static void __mpam_part_sel_raw(u32 partsel, struct mpam_msc *msc) 204 { 205 lockdep_assert_held(&msc->part_sel_lock); 206 207 mpam_write_partsel_reg(msc, PART_SEL, partsel); 208 } 209 210 static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) 211 { 212 u32 partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | 213 FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); 214 215 __mpam_part_sel_raw(partsel, msc); 216 } 217 218 int mpam_register_requestor(u16 partid_max, u8 pmg_max) 219 { 220 guard(spinlock)(&partid_max_lock); 221 if (!partid_max_init) { 222 mpam_partid_max = partid_max; 223 mpam_pmg_max = pmg_max; 224 partid_max_init = true; 225 } else if (!partid_max_published) { 226 mpam_partid_max = min(mpam_partid_max, partid_max); 227 mpam_pmg_max = min(mpam_pmg_max, pmg_max); 228 } else { 229 /* New requestors can't lower the values */ 230 if (partid_max < mpam_partid_max || pmg_max < mpam_pmg_max) 231 return -EBUSY; 232 } 233 234 return 0; 235 } 236 EXPORT_SYMBOL(mpam_register_requestor); 237 238 static struct mpam_class * 239 mpam_class_alloc(u8 level_idx, enum mpam_class_types type) 240 { 241 struct mpam_class *class; 242 243 lockdep_assert_held(&mpam_list_lock); 244 245 class = kzalloc(sizeof(*class), GFP_KERNEL); 246 if (!class) 247 return ERR_PTR(-ENOMEM); 248 init_garbage(&class->garbage); 249 250 INIT_LIST_HEAD_RCU(&class->components); 251 /* Affinity is updated when ris are added */ 252 class->level = level_idx; 253 class->type = type; 254 INIT_LIST_HEAD_RCU(&class->classes_list); 255 256 list_add_rcu(&class->classes_list, &mpam_classes); 257 258 return class; 259 } 260 261 static void mpam_class_destroy(struct mpam_class *class) 262 { 263 lockdep_assert_held(&mpam_list_lock); 264 265 list_del_rcu(&class->classes_list); 266 add_to_garbage(class); 267 } 268 269 static struct mpam_class * 270 mpam_class_find(u8 level_idx, enum mpam_class_types type) 271 { 272 struct mpam_class *class; 273 274 lockdep_assert_held(&mpam_list_lock); 275 276 list_for_each_entry(class, &mpam_classes, classes_list) { 277 if (class->type == type && class->level == level_idx) 278 return class; 279 } 280 281 return mpam_class_alloc(level_idx, type); 282 } 283 284 static struct mpam_component * 285 mpam_component_alloc(struct mpam_class *class, int id) 286 { 287 struct mpam_component *comp; 288 289 lockdep_assert_held(&mpam_list_lock); 290 291 comp = kzalloc(sizeof(*comp), GFP_KERNEL); 292 if (!comp) 293 return ERR_PTR(-ENOMEM); 294 init_garbage(&comp->garbage); 295 296 comp->comp_id = id; 297 INIT_LIST_HEAD_RCU(&comp->vmsc); 298 /* Affinity is updated when RIS are added */ 299 INIT_LIST_HEAD_RCU(&comp->class_list); 300 comp->class = class; 301 302 list_add_rcu(&comp->class_list, &class->components); 303 304 return comp; 305 } 306 307 static void mpam_component_destroy(struct mpam_component *comp) 308 { 309 struct mpam_class *class = comp->class; 310 311 lockdep_assert_held(&mpam_list_lock); 312 313 list_del_rcu(&comp->class_list); 314 add_to_garbage(comp); 315 316 if (list_empty(&class->components)) 317 mpam_class_destroy(class); 318 } 319 320 static struct mpam_component * 321 mpam_component_find(struct mpam_class *class, int id) 322 { 323 struct mpam_component *comp; 324 325 lockdep_assert_held(&mpam_list_lock); 326 327 list_for_each_entry(comp, &class->components, class_list) { 328 if (comp->comp_id == id) 329 return comp; 330 } 331 332 return mpam_component_alloc(class, id); 333 } 334 335 static struct mpam_vmsc * 336 mpam_vmsc_alloc(struct mpam_component *comp, struct mpam_msc *msc) 337 { 338 struct mpam_vmsc *vmsc; 339 340 lockdep_assert_held(&mpam_list_lock); 341 342 vmsc = kzalloc(sizeof(*vmsc), GFP_KERNEL); 343 if (!vmsc) 344 return ERR_PTR(-ENOMEM); 345 init_garbage(&vmsc->garbage); 346 347 INIT_LIST_HEAD_RCU(&vmsc->ris); 348 INIT_LIST_HEAD_RCU(&vmsc->comp_list); 349 vmsc->comp = comp; 350 vmsc->msc = msc; 351 352 list_add_rcu(&vmsc->comp_list, &comp->vmsc); 353 354 return vmsc; 355 } 356 357 static void mpam_vmsc_destroy(struct mpam_vmsc *vmsc) 358 { 359 struct mpam_component *comp = vmsc->comp; 360 361 lockdep_assert_held(&mpam_list_lock); 362 363 list_del_rcu(&vmsc->comp_list); 364 add_to_garbage(vmsc); 365 366 if (list_empty(&comp->vmsc)) 367 mpam_component_destroy(comp); 368 } 369 370 static struct mpam_vmsc * 371 mpam_vmsc_find(struct mpam_component *comp, struct mpam_msc *msc) 372 { 373 struct mpam_vmsc *vmsc; 374 375 lockdep_assert_held(&mpam_list_lock); 376 377 list_for_each_entry(vmsc, &comp->vmsc, comp_list) { 378 if (vmsc->msc->id == msc->id) 379 return vmsc; 380 } 381 382 return mpam_vmsc_alloc(comp, msc); 383 } 384 385 /* 386 * The cacheinfo structures are only populated when CPUs are online. 387 * This helper walks the acpi tables to include offline CPUs too. 388 */ 389 int mpam_get_cpumask_from_cache_id(unsigned long cache_id, u32 cache_level, 390 cpumask_t *affinity) 391 { 392 return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); 393 } 394 395 /* 396 * cpumask_of_node() only knows about online CPUs. This can't tell us whether 397 * a class is represented on all possible CPUs. 398 */ 399 static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) 400 { 401 int cpu; 402 403 for_each_possible_cpu(cpu) { 404 if (node_id == cpu_to_node(cpu)) 405 cpumask_set_cpu(cpu, affinity); 406 } 407 } 408 409 static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, 410 enum mpam_class_types type, 411 struct mpam_class *class, 412 struct mpam_component *comp) 413 { 414 int err; 415 416 switch (type) { 417 case MPAM_CLASS_CACHE: 418 err = mpam_get_cpumask_from_cache_id(comp->comp_id, class->level, 419 affinity); 420 if (err) { 421 dev_warn_once(&msc->pdev->dev, 422 "Failed to determine CPU affinity\n"); 423 return err; 424 } 425 426 if (cpumask_empty(affinity)) 427 dev_warn_once(&msc->pdev->dev, "no CPUs associated with cache node\n"); 428 429 break; 430 case MPAM_CLASS_MEMORY: 431 get_cpumask_from_node_id(comp->comp_id, affinity); 432 /* affinity may be empty for CPU-less memory nodes */ 433 break; 434 case MPAM_CLASS_UNKNOWN: 435 return 0; 436 } 437 438 cpumask_and(affinity, affinity, &msc->accessibility); 439 440 return 0; 441 } 442 443 static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, 444 enum mpam_class_types type, u8 class_id, 445 int component_id) 446 { 447 int err; 448 struct mpam_vmsc *vmsc; 449 struct mpam_msc_ris *ris; 450 struct mpam_class *class; 451 struct mpam_component *comp; 452 struct platform_device *pdev = msc->pdev; 453 454 lockdep_assert_held(&mpam_list_lock); 455 456 if (ris_idx > MPAM_MSC_MAX_NUM_RIS) 457 return -EINVAL; 458 459 if (test_and_set_bit(ris_idx, &msc->ris_idxs)) 460 return -EBUSY; 461 462 ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), GFP_KERNEL); 463 if (!ris) 464 return -ENOMEM; 465 init_garbage(&ris->garbage); 466 ris->garbage.pdev = pdev; 467 468 class = mpam_class_find(class_id, type); 469 if (IS_ERR(class)) 470 return PTR_ERR(class); 471 472 comp = mpam_component_find(class, component_id); 473 if (IS_ERR(comp)) { 474 if (list_empty(&class->components)) 475 mpam_class_destroy(class); 476 return PTR_ERR(comp); 477 } 478 479 vmsc = mpam_vmsc_find(comp, msc); 480 if (IS_ERR(vmsc)) { 481 if (list_empty(&comp->vmsc)) 482 mpam_component_destroy(comp); 483 return PTR_ERR(vmsc); 484 } 485 486 err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); 487 if (err) { 488 if (list_empty(&vmsc->ris)) 489 mpam_vmsc_destroy(vmsc); 490 return err; 491 } 492 493 ris->ris_idx = ris_idx; 494 INIT_LIST_HEAD_RCU(&ris->msc_list); 495 INIT_LIST_HEAD_RCU(&ris->vmsc_list); 496 ris->vmsc = vmsc; 497 498 cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); 499 cpumask_or(&class->affinity, &class->affinity, &ris->affinity); 500 list_add_rcu(&ris->vmsc_list, &vmsc->ris); 501 list_add_rcu(&ris->msc_list, &msc->ris); 502 503 return 0; 504 } 505 506 static void mpam_ris_destroy(struct mpam_msc_ris *ris) 507 { 508 struct mpam_vmsc *vmsc = ris->vmsc; 509 struct mpam_msc *msc = vmsc->msc; 510 struct mpam_component *comp = vmsc->comp; 511 struct mpam_class *class = comp->class; 512 513 lockdep_assert_held(&mpam_list_lock); 514 515 /* 516 * It is assumed affinities don't overlap. If they do the class becomes 517 * unusable immediately. 518 */ 519 cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); 520 cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); 521 clear_bit(ris->ris_idx, &msc->ris_idxs); 522 list_del_rcu(&ris->msc_list); 523 list_del_rcu(&ris->vmsc_list); 524 add_to_garbage(ris); 525 526 if (list_empty(&vmsc->ris)) 527 mpam_vmsc_destroy(vmsc); 528 } 529 530 int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, 531 enum mpam_class_types type, u8 class_id, int component_id) 532 { 533 int err; 534 535 mutex_lock(&mpam_list_lock); 536 err = mpam_ris_create_locked(msc, ris_idx, type, class_id, 537 component_id); 538 mutex_unlock(&mpam_list_lock); 539 if (err) 540 mpam_free_garbage(); 541 542 return err; 543 } 544 545 static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, 546 u8 ris_idx) 547 { 548 int err; 549 struct mpam_msc_ris *ris; 550 551 lockdep_assert_held(&mpam_list_lock); 552 553 if (!test_bit(ris_idx, &msc->ris_idxs)) { 554 err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, 555 0, 0); 556 if (err) 557 return ERR_PTR(err); 558 } 559 560 list_for_each_entry(ris, &msc->ris, msc_list) { 561 if (ris->ris_idx == ris_idx) 562 return ris; 563 } 564 565 return ERR_PTR(-ENOENT); 566 } 567 568 /* 569 * IHI009A.a has this nugget: "If a monitor does not support automatic behaviour 570 * of NRDY, software can use this bit for any purpose" - so hardware might not 571 * implement this - but it isn't RES0. 572 * 573 * Try and see what values stick in this bit. If we can write either value, 574 * its probably not implemented by hardware. 575 */ 576 static bool _mpam_ris_hw_probe_hw_nrdy(struct mpam_msc_ris *ris, u32 mon_reg) 577 { 578 u32 now; 579 u64 mon_sel; 580 bool can_set, can_clear; 581 struct mpam_msc *msc = ris->vmsc->msc; 582 583 if (WARN_ON_ONCE(!mpam_mon_sel_lock(msc))) 584 return false; 585 586 mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, 0) | 587 FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); 588 _mpam_write_monsel_reg(msc, mon_reg, mon_sel); 589 590 _mpam_write_monsel_reg(msc, mon_reg, MSMON___NRDY); 591 now = _mpam_read_monsel_reg(msc, mon_reg); 592 can_set = now & MSMON___NRDY; 593 594 _mpam_write_monsel_reg(msc, mon_reg, 0); 595 now = _mpam_read_monsel_reg(msc, mon_reg); 596 can_clear = !(now & MSMON___NRDY); 597 mpam_mon_sel_unlock(msc); 598 599 return (!can_set || !can_clear); 600 } 601 602 #define mpam_ris_hw_probe_hw_nrdy(_ris, _mon_reg) \ 603 _mpam_ris_hw_probe_hw_nrdy(_ris, MSMON_##_mon_reg) 604 605 static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) 606 { 607 int err; 608 struct mpam_msc *msc = ris->vmsc->msc; 609 struct device *dev = &msc->pdev->dev; 610 struct mpam_props *props = &ris->props; 611 612 lockdep_assert_held(&msc->probe_lock); 613 lockdep_assert_held(&msc->part_sel_lock); 614 615 /* Cache Portion partitioning */ 616 if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { 617 u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); 618 619 props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); 620 if (props->cpbm_wd) 621 mpam_set_feature(mpam_feat_cpor_part, props); 622 } 623 624 /* Memory bandwidth partitioning */ 625 if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { 626 u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); 627 628 /* portion bitmap resolution */ 629 props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); 630 if (props->mbw_pbm_bits && 631 FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) 632 mpam_set_feature(mpam_feat_mbw_part, props); 633 634 props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); 635 if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) 636 mpam_set_feature(mpam_feat_mbw_max, props); 637 } 638 639 /* Performance Monitoring */ 640 if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { 641 u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); 642 643 /* 644 * If the firmware max-nrdy-us property is missing, the 645 * CSU counters can't be used. Should we wait forever? 646 */ 647 err = device_property_read_u32(&msc->pdev->dev, 648 "arm,not-ready-us", 649 &msc->nrdy_usec); 650 651 if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { 652 u32 csumonidr; 653 654 csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); 655 props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); 656 if (props->num_csu_mon) { 657 bool hw_managed; 658 659 mpam_set_feature(mpam_feat_msmon_csu, props); 660 661 /* Is NRDY hardware managed? */ 662 hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, CSU); 663 if (hw_managed) 664 mpam_set_feature(mpam_feat_msmon_csu_hw_nrdy, props); 665 } 666 667 /* 668 * Accept the missing firmware property if NRDY appears 669 * un-implemented. 670 */ 671 if (err && mpam_has_feature(mpam_feat_msmon_csu_hw_nrdy, props)) 672 dev_err_once(dev, "Counters are not usable because not-ready timeout was not provided by firmware."); 673 } 674 if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { 675 bool hw_managed; 676 u32 mbwumon_idr = mpam_read_partsel_reg(msc, MBWUMON_IDR); 677 678 props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumon_idr); 679 if (props->num_mbwu_mon) 680 mpam_set_feature(mpam_feat_msmon_mbwu, props); 681 682 /* Is NRDY hardware managed? */ 683 hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, MBWU); 684 if (hw_managed) 685 mpam_set_feature(mpam_feat_msmon_mbwu_hw_nrdy, props); 686 687 /* 688 * Don't warn about any missing firmware property for 689 * MBWU NRDY - it doesn't make any sense! 690 */ 691 } 692 } 693 } 694 695 static int mpam_msc_hw_probe(struct mpam_msc *msc) 696 { 697 u64 idr; 698 u16 partid_max; 699 u8 ris_idx, pmg_max; 700 struct mpam_msc_ris *ris; 701 struct device *dev = &msc->pdev->dev; 702 703 lockdep_assert_held(&msc->probe_lock); 704 705 idr = __mpam_read_reg(msc, MPAMF_AIDR); 706 if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { 707 dev_err_once(dev, "MSC does not match MPAM architecture v1.x\n"); 708 return -EIO; 709 } 710 711 /* Grab an IDR value to find out how many RIS there are */ 712 mutex_lock(&msc->part_sel_lock); 713 idr = mpam_msc_read_idr(msc); 714 mutex_unlock(&msc->part_sel_lock); 715 716 msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); 717 718 /* Use these values so partid/pmg always starts with a valid value */ 719 msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); 720 msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); 721 722 for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { 723 mutex_lock(&msc->part_sel_lock); 724 __mpam_part_sel(ris_idx, 0, msc); 725 idr = mpam_msc_read_idr(msc); 726 mutex_unlock(&msc->part_sel_lock); 727 728 partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); 729 pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); 730 msc->partid_max = min(msc->partid_max, partid_max); 731 msc->pmg_max = min(msc->pmg_max, pmg_max); 732 733 mutex_lock(&mpam_list_lock); 734 ris = mpam_get_or_create_ris(msc, ris_idx); 735 mutex_unlock(&mpam_list_lock); 736 if (IS_ERR(ris)) 737 return PTR_ERR(ris); 738 ris->idr = idr; 739 740 mutex_lock(&msc->part_sel_lock); 741 __mpam_part_sel(ris_idx, 0, msc); 742 mpam_ris_hw_probe(ris); 743 mutex_unlock(&msc->part_sel_lock); 744 } 745 746 spin_lock(&partid_max_lock); 747 mpam_partid_max = min(mpam_partid_max, msc->partid_max); 748 mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); 749 spin_unlock(&partid_max_lock); 750 751 msc->probed = true; 752 753 return 0; 754 } 755 756 static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) 757 { 758 u32 num_words, msb; 759 u32 bm = ~0; 760 int i; 761 762 lockdep_assert_held(&msc->part_sel_lock); 763 764 if (wd == 0) 765 return; 766 767 /* 768 * Write all ~0 to all but the last 32bit-word, which may 769 * have fewer bits... 770 */ 771 num_words = DIV_ROUND_UP(wd, 32); 772 for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) 773 __mpam_write_reg(msc, reg, bm); 774 775 /* 776 * ....and then the last (maybe) partial 32bit word. When wd is a 777 * multiple of 32, msb should be 31 to write a full 32bit word. 778 */ 779 msb = (wd - 1) % 32; 780 bm = GENMASK(msb, 0); 781 __mpam_write_reg(msc, reg, bm); 782 } 783 784 static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) 785 { 786 struct mpam_msc *msc = ris->vmsc->msc; 787 struct mpam_props *rprops = &ris->props; 788 789 WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu))); 790 791 mutex_lock(&msc->part_sel_lock); 792 __mpam_part_sel(ris->ris_idx, partid, msc); 793 794 if (mpam_has_feature(mpam_feat_cpor_part, rprops)) 795 mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); 796 797 if (mpam_has_feature(mpam_feat_mbw_part, rprops)) 798 mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); 799 800 if (mpam_has_feature(mpam_feat_mbw_min, rprops)) 801 mpam_write_partsel_reg(msc, MBW_MIN, 0); 802 803 if (mpam_has_feature(mpam_feat_mbw_max, rprops)) 804 mpam_write_partsel_reg(msc, MBW_MAX, MPAMCFG_MBW_MAX_MAX); 805 806 mutex_unlock(&msc->part_sel_lock); 807 } 808 809 static void mpam_reset_ris(struct mpam_msc_ris *ris) 810 { 811 u16 partid, partid_max; 812 813 WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu))); 814 815 if (ris->in_reset_state) 816 return; 817 818 spin_lock(&partid_max_lock); 819 partid_max = mpam_partid_max; 820 spin_unlock(&partid_max_lock); 821 for (partid = 0; partid <= partid_max; partid++) 822 mpam_reset_ris_partid(ris, partid); 823 } 824 825 static void mpam_reset_msc(struct mpam_msc *msc, bool online) 826 { 827 struct mpam_msc_ris *ris; 828 829 list_for_each_entry_srcu(ris, &msc->ris, msc_list, srcu_read_lock_held(&mpam_srcu)) { 830 mpam_reset_ris(ris); 831 832 /* 833 * Set in_reset_state when coming online. The reset state 834 * for non-zero partid may be lost while the CPUs are offline. 835 */ 836 ris->in_reset_state = online; 837 } 838 } 839 840 static int mpam_cpu_online(unsigned int cpu) 841 { 842 struct mpam_msc *msc; 843 844 guard(srcu)(&mpam_srcu); 845 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 846 srcu_read_lock_held(&mpam_srcu)) { 847 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 848 continue; 849 850 if (atomic_fetch_inc(&msc->online_refs) == 0) 851 mpam_reset_msc(msc, true); 852 } 853 854 return 0; 855 } 856 857 /* Before mpam is enabled, try to probe new MSC */ 858 static int mpam_discovery_cpu_online(unsigned int cpu) 859 { 860 int err = 0; 861 struct mpam_msc *msc; 862 bool new_device_probed = false; 863 864 guard(srcu)(&mpam_srcu); 865 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 866 srcu_read_lock_held(&mpam_srcu)) { 867 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 868 continue; 869 870 mutex_lock(&msc->probe_lock); 871 if (!msc->probed) 872 err = mpam_msc_hw_probe(msc); 873 mutex_unlock(&msc->probe_lock); 874 875 if (err) 876 break; 877 new_device_probed = true; 878 } 879 880 if (new_device_probed && !err) 881 schedule_work(&mpam_enable_work); 882 if (err) { 883 mpam_disable_reason = "error during probing"; 884 schedule_work(&mpam_broken_work); 885 } 886 887 return err; 888 } 889 890 static int mpam_cpu_offline(unsigned int cpu) 891 { 892 struct mpam_msc *msc; 893 894 guard(srcu)(&mpam_srcu); 895 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 896 srcu_read_lock_held(&mpam_srcu)) { 897 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 898 continue; 899 900 if (atomic_dec_and_test(&msc->online_refs)) 901 mpam_reset_msc(msc, false); 902 } 903 904 return 0; 905 } 906 907 static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online), 908 int (*offline)(unsigned int offline), 909 char *name) 910 { 911 mutex_lock(&mpam_cpuhp_state_lock); 912 if (mpam_cpuhp_state) { 913 cpuhp_remove_state(mpam_cpuhp_state); 914 mpam_cpuhp_state = 0; 915 } 916 917 mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, name, online, 918 offline); 919 if (mpam_cpuhp_state <= 0) { 920 pr_err("Failed to register cpuhp callbacks"); 921 mpam_cpuhp_state = 0; 922 } 923 mutex_unlock(&mpam_cpuhp_state_lock); 924 } 925 926 /* 927 * An MSC can control traffic from a set of CPUs, but may only be accessible 928 * from a (hopefully wider) set of CPUs. The common reason for this is power 929 * management. If all the CPUs in a cluster are in PSCI:CPU_SUSPEND, the 930 * corresponding cache may also be powered off. By making accesses from 931 * one of those CPUs, we ensure we don't access a cache that's powered off. 932 */ 933 static void update_msc_accessibility(struct mpam_msc *msc) 934 { 935 u32 affinity_id; 936 int err; 937 938 err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", 939 &affinity_id); 940 if (err) 941 cpumask_copy(&msc->accessibility, cpu_possible_mask); 942 else 943 acpi_pptt_get_cpus_from_container(affinity_id, &msc->accessibility); 944 } 945 946 /* 947 * There are two ways of reaching a struct mpam_msc_ris. Via the 948 * class->component->vmsc->ris, or via the msc. 949 * When destroying the msc, the other side needs unlinking and cleaning up too. 950 */ 951 static void mpam_msc_destroy(struct mpam_msc *msc) 952 { 953 struct platform_device *pdev = msc->pdev; 954 struct mpam_msc_ris *ris, *tmp; 955 956 lockdep_assert_held(&mpam_list_lock); 957 958 list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) 959 mpam_ris_destroy(ris); 960 961 list_del_rcu(&msc->all_msc_list); 962 platform_set_drvdata(pdev, NULL); 963 964 add_to_garbage(msc); 965 } 966 967 static void mpam_msc_drv_remove(struct platform_device *pdev) 968 { 969 struct mpam_msc *msc = platform_get_drvdata(pdev); 970 971 mutex_lock(&mpam_list_lock); 972 mpam_msc_destroy(msc); 973 mutex_unlock(&mpam_list_lock); 974 975 mpam_free_garbage(); 976 } 977 978 static struct mpam_msc *do_mpam_msc_drv_probe(struct platform_device *pdev) 979 { 980 int err; 981 u32 tmp; 982 struct mpam_msc *msc; 983 struct resource *msc_res; 984 struct device *dev = &pdev->dev; 985 986 lockdep_assert_held(&mpam_list_lock); 987 988 msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); 989 if (!msc) 990 return ERR_PTR(-ENOMEM); 991 init_garbage(&msc->garbage); 992 msc->garbage.pdev = pdev; 993 994 err = devm_mutex_init(dev, &msc->probe_lock); 995 if (err) 996 return ERR_PTR(err); 997 998 err = devm_mutex_init(dev, &msc->part_sel_lock); 999 if (err) 1000 return ERR_PTR(err); 1001 1002 mpam_mon_sel_lock_init(msc); 1003 msc->id = pdev->id; 1004 msc->pdev = pdev; 1005 INIT_LIST_HEAD_RCU(&msc->all_msc_list); 1006 INIT_LIST_HEAD_RCU(&msc->ris); 1007 1008 update_msc_accessibility(msc); 1009 if (cpumask_empty(&msc->accessibility)) { 1010 dev_err_once(dev, "MSC is not accessible from any CPU!"); 1011 return ERR_PTR(-EINVAL); 1012 } 1013 1014 if (device_property_read_u32(&pdev->dev, "pcc-channel", &tmp)) 1015 msc->iface = MPAM_IFACE_MMIO; 1016 else 1017 msc->iface = MPAM_IFACE_PCC; 1018 1019 if (msc->iface == MPAM_IFACE_MMIO) { 1020 void __iomem *io; 1021 1022 io = devm_platform_get_and_ioremap_resource(pdev, 0, 1023 &msc_res); 1024 if (IS_ERR(io)) { 1025 dev_err_once(dev, "Failed to map MSC base address\n"); 1026 return ERR_CAST(io); 1027 } 1028 msc->mapped_hwpage_sz = msc_res->end - msc_res->start; 1029 msc->mapped_hwpage = io; 1030 } else { 1031 return ERR_PTR(-EINVAL); 1032 } 1033 1034 list_add_rcu(&msc->all_msc_list, &mpam_all_msc); 1035 platform_set_drvdata(pdev, msc); 1036 1037 return msc; 1038 } 1039 1040 static int fw_num_msc; 1041 1042 static int mpam_msc_drv_probe(struct platform_device *pdev) 1043 { 1044 int err; 1045 struct mpam_msc *msc = NULL; 1046 void *plat_data = pdev->dev.platform_data; 1047 1048 mutex_lock(&mpam_list_lock); 1049 msc = do_mpam_msc_drv_probe(pdev); 1050 mutex_unlock(&mpam_list_lock); 1051 1052 if (IS_ERR(msc)) 1053 return PTR_ERR(msc); 1054 1055 /* Create RIS entries described by firmware */ 1056 err = acpi_mpam_parse_resources(msc, plat_data); 1057 if (err) { 1058 mpam_msc_drv_remove(pdev); 1059 return err; 1060 } 1061 1062 if (atomic_add_return(1, &mpam_num_msc) == fw_num_msc) 1063 mpam_register_cpuhp_callbacks(mpam_discovery_cpu_online, NULL, 1064 "mpam:drv_probe"); 1065 1066 return 0; 1067 } 1068 1069 static struct platform_driver mpam_msc_driver = { 1070 .driver = { 1071 .name = "mpam_msc", 1072 }, 1073 .probe = mpam_msc_drv_probe, 1074 .remove = mpam_msc_drv_remove, 1075 }; 1076 1077 /* Any of these features mean the BWA_WD field is valid. */ 1078 static bool mpam_has_bwa_wd_feature(struct mpam_props *props) 1079 { 1080 if (mpam_has_feature(mpam_feat_mbw_min, props)) 1081 return true; 1082 if (mpam_has_feature(mpam_feat_mbw_max, props)) 1083 return true; 1084 return false; 1085 } 1086 1087 #define MISMATCHED_HELPER(parent, child, helper, field, alias) \ 1088 helper(parent) && \ 1089 ((helper(child) && (parent)->field != (child)->field) || \ 1090 (!helper(child) && !(alias))) 1091 1092 #define MISMATCHED_FEAT(parent, child, feat, field, alias) \ 1093 mpam_has_feature((feat), (parent)) && \ 1094 ((mpam_has_feature((feat), (child)) && (parent)->field != (child)->field) || \ 1095 (!mpam_has_feature((feat), (child)) && !(alias))) 1096 1097 #define CAN_MERGE_FEAT(parent, child, feat, alias) \ 1098 (alias) && !mpam_has_feature((feat), (parent)) && \ 1099 mpam_has_feature((feat), (child)) 1100 1101 /* 1102 * Combine two props fields. 1103 * If this is for controls that alias the same resource, it is safe to just 1104 * copy the values over. If two aliasing controls implement the same scheme 1105 * a safe value must be picked. 1106 * For non-aliasing controls, these control different resources, and the 1107 * resulting safe value must be compatible with both. When merging values in 1108 * the tree, all the aliasing resources must be handled first. 1109 * On mismatch, parent is modified. 1110 */ 1111 static void __props_mismatch(struct mpam_props *parent, 1112 struct mpam_props *child, bool alias) 1113 { 1114 if (CAN_MERGE_FEAT(parent, child, mpam_feat_cpor_part, alias)) { 1115 parent->cpbm_wd = child->cpbm_wd; 1116 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_cpor_part, 1117 cpbm_wd, alias)) { 1118 pr_debug("cleared cpor_part\n"); 1119 mpam_clear_feature(mpam_feat_cpor_part, parent); 1120 parent->cpbm_wd = 0; 1121 } 1122 1123 if (CAN_MERGE_FEAT(parent, child, mpam_feat_mbw_part, alias)) { 1124 parent->mbw_pbm_bits = child->mbw_pbm_bits; 1125 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_mbw_part, 1126 mbw_pbm_bits, alias)) { 1127 pr_debug("cleared mbw_part\n"); 1128 mpam_clear_feature(mpam_feat_mbw_part, parent); 1129 parent->mbw_pbm_bits = 0; 1130 } 1131 1132 /* bwa_wd is a count of bits, fewer bits means less precision */ 1133 if (alias && !mpam_has_bwa_wd_feature(parent) && 1134 mpam_has_bwa_wd_feature(child)) { 1135 parent->bwa_wd = child->bwa_wd; 1136 } else if (MISMATCHED_HELPER(parent, child, mpam_has_bwa_wd_feature, 1137 bwa_wd, alias)) { 1138 pr_debug("took the min bwa_wd\n"); 1139 parent->bwa_wd = min(parent->bwa_wd, child->bwa_wd); 1140 } 1141 1142 /* For num properties, take the minimum */ 1143 if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_csu, alias)) { 1144 parent->num_csu_mon = child->num_csu_mon; 1145 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_csu, 1146 num_csu_mon, alias)) { 1147 pr_debug("took the min num_csu_mon\n"); 1148 parent->num_csu_mon = min(parent->num_csu_mon, 1149 child->num_csu_mon); 1150 } 1151 1152 if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_mbwu, alias)) { 1153 parent->num_mbwu_mon = child->num_mbwu_mon; 1154 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_mbwu, 1155 num_mbwu_mon, alias)) { 1156 pr_debug("took the min num_mbwu_mon\n"); 1157 parent->num_mbwu_mon = min(parent->num_mbwu_mon, 1158 child->num_mbwu_mon); 1159 } 1160 1161 if (alias) { 1162 /* Merge features for aliased resources */ 1163 bitmap_or(parent->features, parent->features, child->features, MPAM_FEATURE_LAST); 1164 } else { 1165 /* Clear missing features for non aliasing */ 1166 bitmap_and(parent->features, parent->features, child->features, MPAM_FEATURE_LAST); 1167 } 1168 } 1169 1170 /* 1171 * If a vmsc doesn't match class feature/configuration, do the right thing(tm). 1172 * For 'num' properties we can just take the minimum. 1173 * For properties where the mismatched unused bits would make a difference, we 1174 * nobble the class feature, as we can't configure all the resources. 1175 * e.g. The L3 cache is composed of two resources with 13 and 17 portion 1176 * bitmaps respectively. 1177 */ 1178 static void 1179 __class_props_mismatch(struct mpam_class *class, struct mpam_vmsc *vmsc) 1180 { 1181 struct mpam_props *cprops = &class->props; 1182 struct mpam_props *vprops = &vmsc->props; 1183 struct device *dev = &vmsc->msc->pdev->dev; 1184 1185 lockdep_assert_held(&mpam_list_lock); /* we modify class */ 1186 1187 dev_dbg(dev, "Merging features for class:0x%lx &= vmsc:0x%lx\n", 1188 (long)cprops->features, (long)vprops->features); 1189 1190 /* Take the safe value for any common features */ 1191 __props_mismatch(cprops, vprops, false); 1192 } 1193 1194 static void 1195 __vmsc_props_mismatch(struct mpam_vmsc *vmsc, struct mpam_msc_ris *ris) 1196 { 1197 struct mpam_props *rprops = &ris->props; 1198 struct mpam_props *vprops = &vmsc->props; 1199 struct device *dev = &vmsc->msc->pdev->dev; 1200 1201 lockdep_assert_held(&mpam_list_lock); /* we modify vmsc */ 1202 1203 dev_dbg(dev, "Merging features for vmsc:0x%lx |= ris:0x%lx\n", 1204 (long)vprops->features, (long)rprops->features); 1205 1206 /* 1207 * Merge mismatched features - Copy any features that aren't common, 1208 * but take the safe value for any common features. 1209 */ 1210 __props_mismatch(vprops, rprops, true); 1211 } 1212 1213 /* 1214 * Copy the first component's first vMSC's properties and features to the 1215 * class. __class_props_mismatch() will remove conflicts. 1216 * It is not possible to have a class with no components, or a component with 1217 * no resources. The vMSC properties have already been built. 1218 */ 1219 static void mpam_enable_init_class_features(struct mpam_class *class) 1220 { 1221 struct mpam_vmsc *vmsc; 1222 struct mpam_component *comp; 1223 1224 comp = list_first_entry(&class->components, 1225 struct mpam_component, class_list); 1226 vmsc = list_first_entry(&comp->vmsc, 1227 struct mpam_vmsc, comp_list); 1228 1229 class->props = vmsc->props; 1230 } 1231 1232 static void mpam_enable_merge_vmsc_features(struct mpam_component *comp) 1233 { 1234 struct mpam_vmsc *vmsc; 1235 struct mpam_msc_ris *ris; 1236 struct mpam_class *class = comp->class; 1237 1238 list_for_each_entry(vmsc, &comp->vmsc, comp_list) { 1239 list_for_each_entry(ris, &vmsc->ris, vmsc_list) { 1240 __vmsc_props_mismatch(vmsc, ris); 1241 class->nrdy_usec = max(class->nrdy_usec, 1242 vmsc->msc->nrdy_usec); 1243 } 1244 } 1245 } 1246 1247 static void mpam_enable_merge_class_features(struct mpam_component *comp) 1248 { 1249 struct mpam_vmsc *vmsc; 1250 struct mpam_class *class = comp->class; 1251 1252 list_for_each_entry(vmsc, &comp->vmsc, comp_list) 1253 __class_props_mismatch(class, vmsc); 1254 } 1255 1256 /* 1257 * Merge all the common resource features into class. 1258 * vmsc features are bitwise-or'd together by mpam_enable_merge_vmsc_features() 1259 * as the first step so that mpam_enable_init_class_features() can initialise 1260 * the class with a representative set of features. 1261 * Next the mpam_enable_merge_class_features() bitwise-and's all the vmsc 1262 * features to form the class features. 1263 * Other features are the min/max as appropriate. 1264 * 1265 * To avoid walking the whole tree twice, the class->nrdy_usec property is 1266 * updated when working with the vmsc as it is a max(), and doesn't need 1267 * initialising first. 1268 */ 1269 static void mpam_enable_merge_features(struct list_head *all_classes_list) 1270 { 1271 struct mpam_class *class; 1272 struct mpam_component *comp; 1273 1274 lockdep_assert_held(&mpam_list_lock); 1275 1276 list_for_each_entry(class, all_classes_list, classes_list) { 1277 list_for_each_entry(comp, &class->components, class_list) 1278 mpam_enable_merge_vmsc_features(comp); 1279 1280 mpam_enable_init_class_features(class); 1281 1282 list_for_each_entry(comp, &class->components, class_list) 1283 mpam_enable_merge_class_features(comp); 1284 } 1285 } 1286 1287 static void mpam_enable_once(void) 1288 { 1289 /* 1290 * Once the cpuhp callbacks have been changed, mpam_partid_max can no 1291 * longer change. 1292 */ 1293 spin_lock(&partid_max_lock); 1294 partid_max_published = true; 1295 spin_unlock(&partid_max_lock); 1296 1297 mutex_lock(&mpam_list_lock); 1298 mpam_enable_merge_features(&mpam_classes); 1299 mutex_unlock(&mpam_list_lock); 1300 1301 mpam_register_cpuhp_callbacks(mpam_cpu_online, mpam_cpu_offline, 1302 "mpam:online"); 1303 1304 /* Use printk() to avoid the pr_fmt adding the function name. */ 1305 printk(KERN_INFO "MPAM enabled with %u PARTIDs and %u PMGs\n", 1306 mpam_partid_max + 1, mpam_pmg_max + 1); 1307 } 1308 1309 void mpam_disable(struct work_struct *ignored) 1310 { 1311 struct mpam_msc *msc, *tmp; 1312 1313 mutex_lock(&mpam_cpuhp_state_lock); 1314 if (mpam_cpuhp_state) { 1315 cpuhp_remove_state(mpam_cpuhp_state); 1316 mpam_cpuhp_state = 0; 1317 } 1318 mutex_unlock(&mpam_cpuhp_state_lock); 1319 1320 mutex_lock(&mpam_list_lock); 1321 list_for_each_entry_safe(msc, tmp, &mpam_all_msc, all_msc_list) 1322 mpam_msc_destroy(msc); 1323 mutex_unlock(&mpam_list_lock); 1324 mpam_free_garbage(); 1325 1326 pr_err_once("MPAM disabled due to %s\n", mpam_disable_reason); 1327 } 1328 1329 /* 1330 * Enable mpam once all devices have been probed. 1331 * Scheduled by mpam_discovery_cpu_online() once all devices have been created. 1332 * Also scheduled when new devices are probed when new CPUs come online. 1333 */ 1334 void mpam_enable(struct work_struct *work) 1335 { 1336 static atomic_t once; 1337 struct mpam_msc *msc; 1338 bool all_devices_probed = true; 1339 1340 /* Have we probed all the hw devices? */ 1341 guard(srcu)(&mpam_srcu); 1342 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 1343 srcu_read_lock_held(&mpam_srcu)) { 1344 mutex_lock(&msc->probe_lock); 1345 if (!msc->probed) 1346 all_devices_probed = false; 1347 mutex_unlock(&msc->probe_lock); 1348 1349 if (!all_devices_probed) 1350 break; 1351 } 1352 1353 if (all_devices_probed && !atomic_fetch_inc(&once)) 1354 mpam_enable_once(); 1355 } 1356 1357 static int __init mpam_msc_driver_init(void) 1358 { 1359 if (!system_supports_mpam()) 1360 return -EOPNOTSUPP; 1361 1362 init_srcu_struct(&mpam_srcu); 1363 1364 fw_num_msc = acpi_mpam_count_msc(); 1365 if (fw_num_msc <= 0) { 1366 pr_err("No MSC devices found in firmware\n"); 1367 return -EINVAL; 1368 } 1369 1370 return platform_driver_register(&mpam_msc_driver); 1371 } 1372 1373 /* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ 1374 subsys_initcall(mpam_msc_driver_init); 1375