1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2025 Arm Ltd. 3 4 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ 5 6 #include <linux/acpi.h> 7 #include <linux/atomic.h> 8 #include <linux/arm_mpam.h> 9 #include <linux/bitfield.h> 10 #include <linux/bitmap.h> 11 #include <linux/cacheinfo.h> 12 #include <linux/cpu.h> 13 #include <linux/cpumask.h> 14 #include <linux/device.h> 15 #include <linux/errno.h> 16 #include <linux/gfp.h> 17 #include <linux/interrupt.h> 18 #include <linux/irq.h> 19 #include <linux/irqdesc.h> 20 #include <linux/list.h> 21 #include <linux/lockdep.h> 22 #include <linux/mutex.h> 23 #include <linux/platform_device.h> 24 #include <linux/printk.h> 25 #include <linux/srcu.h> 26 #include <linux/spinlock.h> 27 #include <linux/types.h> 28 #include <linux/workqueue.h> 29 30 #include "mpam_internal.h" 31 32 DEFINE_STATIC_KEY_FALSE(mpam_enabled); /* This moves to arch code */ 33 34 /* 35 * mpam_list_lock protects the SRCU lists when writing. Once the 36 * mpam_enabled key is enabled these lists are read-only, 37 * unless the error interrupt disables the driver. 38 */ 39 static DEFINE_MUTEX(mpam_list_lock); 40 static LIST_HEAD(mpam_all_msc); 41 42 struct srcu_struct mpam_srcu; 43 44 /* 45 * Number of MSCs that have been probed. Once all MSCs have been probed MPAM 46 * can be enabled. 47 */ 48 static atomic_t mpam_num_msc; 49 50 static int mpam_cpuhp_state; 51 static DEFINE_MUTEX(mpam_cpuhp_state_lock); 52 53 /* 54 * The smallest common values for any CPU or MSC in the system. 55 * Generating traffic outside this range will result in screaming interrupts. 56 */ 57 u16 mpam_partid_max; 58 u8 mpam_pmg_max; 59 static bool partid_max_init, partid_max_published; 60 static DEFINE_SPINLOCK(partid_max_lock); 61 62 /* 63 * mpam is enabled once all devices have been probed from CPU online callbacks, 64 * scheduled via this work_struct. If access to an MSC depends on a CPU that 65 * was not brought online at boot, this can happen surprisingly late. 66 */ 67 static DECLARE_WORK(mpam_enable_work, &mpam_enable); 68 69 /* 70 * All mpam error interrupts indicate a software bug. On receipt, disable the 71 * driver. 72 */ 73 static DECLARE_WORK(mpam_broken_work, &mpam_disable); 74 75 /* When mpam is disabled, the printed reason to aid debugging */ 76 static char *mpam_disable_reason; 77 78 /* 79 * An MSC is a physical container for controls and monitors, each identified by 80 * their RIS index. These share a base-address, interrupts and some MMIO 81 * registers. A vMSC is a virtual container for RIS in an MSC that control or 82 * monitor the same thing. Members of a vMSC are all RIS in the same MSC, but 83 * not all RIS in an MSC share a vMSC. 84 * 85 * Components are a group of vMSC that control or monitor the same thing but 86 * are from different MSC, so have different base-address, interrupts etc. 87 * Classes are the set components of the same type. 88 * 89 * The features of a vMSC is the union of the RIS it contains. 90 * The features of a Class and Component are the common subset of the vMSC 91 * they contain. 92 * 93 * e.g. The system cache may have bandwidth controls on multiple interfaces, 94 * for regulating traffic from devices independently of traffic from CPUs. 95 * If these are two RIS in one MSC, they will be treated as controlling 96 * different things, and will not share a vMSC/component/class. 97 * 98 * e.g. The L2 may have one MSC and two RIS, one for cache-controls another 99 * for bandwidth. These two RIS are members of the same vMSC. 100 * 101 * e.g. The set of RIS that make up the L2 are grouped as a component. These 102 * are sometimes termed slices. They should be configured the same, as if there 103 * were only one. 104 * 105 * e.g. The SoC probably has more than one L2, each attached to a distinct set 106 * of CPUs. All the L2 components are grouped as a class. 107 * 108 * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, 109 * then linked via struct mpam_ris to a vmsc, component and class. 110 * The same MSC may exist under different class->component->vmsc paths, but the 111 * RIS index will be unique. 112 */ 113 LIST_HEAD(mpam_classes); 114 115 /* List of all objects that can be free()d after synchronise_srcu() */ 116 static LLIST_HEAD(mpam_garbage); 117 118 static inline void init_garbage(struct mpam_garbage *garbage) 119 { 120 init_llist_node(&garbage->llist); 121 } 122 123 #define add_to_garbage(x) \ 124 do { \ 125 __typeof__(x) _x = (x); \ 126 _x->garbage.to_free = _x; \ 127 llist_add(&_x->garbage.llist, &mpam_garbage); \ 128 } while (0) 129 130 static void mpam_free_garbage(void) 131 { 132 struct mpam_garbage *iter, *tmp; 133 struct llist_node *to_free = llist_del_all(&mpam_garbage); 134 135 if (!to_free) 136 return; 137 138 synchronize_srcu(&mpam_srcu); 139 140 llist_for_each_entry_safe(iter, tmp, to_free, llist) { 141 if (iter->pdev) 142 devm_kfree(&iter->pdev->dev, iter->to_free); 143 else 144 kfree(iter->to_free); 145 } 146 } 147 148 /* 149 * Once mpam is enabled, new requestors cannot further reduce the available 150 * partid. Assert that the size is fixed, and new requestors will be turned 151 * away. 152 */ 153 static void mpam_assert_partid_sizes_fixed(void) 154 { 155 WARN_ON_ONCE(!partid_max_published); 156 } 157 158 static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) 159 { 160 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); 161 162 return readl_relaxed(msc->mapped_hwpage + reg); 163 } 164 165 static inline u32 _mpam_read_partsel_reg(struct mpam_msc *msc, u16 reg) 166 { 167 lockdep_assert_held_once(&msc->part_sel_lock); 168 return __mpam_read_reg(msc, reg); 169 } 170 171 #define mpam_read_partsel_reg(msc, reg) _mpam_read_partsel_reg(msc, MPAMF_##reg) 172 173 static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) 174 { 175 WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); 176 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); 177 178 writel_relaxed(val, msc->mapped_hwpage + reg); 179 } 180 181 static inline void _mpam_write_partsel_reg(struct mpam_msc *msc, u16 reg, u32 val) 182 { 183 lockdep_assert_held_once(&msc->part_sel_lock); 184 __mpam_write_reg(msc, reg, val); 185 } 186 187 #define mpam_write_partsel_reg(msc, reg, val) _mpam_write_partsel_reg(msc, MPAMCFG_##reg, val) 188 189 static inline u32 _mpam_read_monsel_reg(struct mpam_msc *msc, u16 reg) 190 { 191 mpam_mon_sel_lock_held(msc); 192 return __mpam_read_reg(msc, reg); 193 } 194 195 #define mpam_read_monsel_reg(msc, reg) _mpam_read_monsel_reg(msc, MSMON_##reg) 196 197 static inline void _mpam_write_monsel_reg(struct mpam_msc *msc, u16 reg, u32 val) 198 { 199 mpam_mon_sel_lock_held(msc); 200 __mpam_write_reg(msc, reg, val); 201 } 202 203 #define mpam_write_monsel_reg(msc, reg, val) _mpam_write_monsel_reg(msc, MSMON_##reg, val) 204 205 static u64 mpam_msc_read_idr(struct mpam_msc *msc) 206 { 207 u64 idr_high = 0, idr_low; 208 209 lockdep_assert_held(&msc->part_sel_lock); 210 211 idr_low = mpam_read_partsel_reg(msc, IDR); 212 if (FIELD_GET(MPAMF_IDR_EXT, idr_low)) 213 idr_high = mpam_read_partsel_reg(msc, IDR + 4); 214 215 return (idr_high << 32) | idr_low; 216 } 217 218 static void mpam_msc_clear_esr(struct mpam_msc *msc) 219 { 220 u64 esr_low = __mpam_read_reg(msc, MPAMF_ESR); 221 222 if (!esr_low) 223 return; 224 225 /* 226 * Clearing the high/low bits of MPAMF_ESR can not be atomic. 227 * Clear the top half first, so that the pending error bits in the 228 * lower half prevent hardware from updating either half of the 229 * register. 230 */ 231 if (msc->has_extd_esr) 232 __mpam_write_reg(msc, MPAMF_ESR + 4, 0); 233 __mpam_write_reg(msc, MPAMF_ESR, 0); 234 } 235 236 static u64 mpam_msc_read_esr(struct mpam_msc *msc) 237 { 238 u64 esr_high = 0, esr_low; 239 240 esr_low = __mpam_read_reg(msc, MPAMF_ESR); 241 if (msc->has_extd_esr) 242 esr_high = __mpam_read_reg(msc, MPAMF_ESR + 4); 243 244 return (esr_high << 32) | esr_low; 245 } 246 247 static void __mpam_part_sel_raw(u32 partsel, struct mpam_msc *msc) 248 { 249 lockdep_assert_held(&msc->part_sel_lock); 250 251 mpam_write_partsel_reg(msc, PART_SEL, partsel); 252 } 253 254 static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) 255 { 256 u32 partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | 257 FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); 258 259 __mpam_part_sel_raw(partsel, msc); 260 } 261 262 static void __mpam_intpart_sel(u8 ris_idx, u16 intpartid, struct mpam_msc *msc) 263 { 264 u32 partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | 265 FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, intpartid) | 266 MPAMCFG_PART_SEL_INTERNAL; 267 268 __mpam_part_sel_raw(partsel, msc); 269 } 270 271 int mpam_register_requestor(u16 partid_max, u8 pmg_max) 272 { 273 guard(spinlock)(&partid_max_lock); 274 if (!partid_max_init) { 275 mpam_partid_max = partid_max; 276 mpam_pmg_max = pmg_max; 277 partid_max_init = true; 278 } else if (!partid_max_published) { 279 mpam_partid_max = min(mpam_partid_max, partid_max); 280 mpam_pmg_max = min(mpam_pmg_max, pmg_max); 281 } else { 282 /* New requestors can't lower the values */ 283 if (partid_max < mpam_partid_max || pmg_max < mpam_pmg_max) 284 return -EBUSY; 285 } 286 287 return 0; 288 } 289 EXPORT_SYMBOL(mpam_register_requestor); 290 291 static struct mpam_class * 292 mpam_class_alloc(u8 level_idx, enum mpam_class_types type) 293 { 294 struct mpam_class *class; 295 296 lockdep_assert_held(&mpam_list_lock); 297 298 class = kzalloc(sizeof(*class), GFP_KERNEL); 299 if (!class) 300 return ERR_PTR(-ENOMEM); 301 init_garbage(&class->garbage); 302 303 INIT_LIST_HEAD_RCU(&class->components); 304 /* Affinity is updated when ris are added */ 305 class->level = level_idx; 306 class->type = type; 307 INIT_LIST_HEAD_RCU(&class->classes_list); 308 ida_init(&class->ida_csu_mon); 309 ida_init(&class->ida_mbwu_mon); 310 311 list_add_rcu(&class->classes_list, &mpam_classes); 312 313 return class; 314 } 315 316 static void mpam_class_destroy(struct mpam_class *class) 317 { 318 lockdep_assert_held(&mpam_list_lock); 319 320 list_del_rcu(&class->classes_list); 321 add_to_garbage(class); 322 } 323 324 static struct mpam_class * 325 mpam_class_find(u8 level_idx, enum mpam_class_types type) 326 { 327 struct mpam_class *class; 328 329 lockdep_assert_held(&mpam_list_lock); 330 331 list_for_each_entry(class, &mpam_classes, classes_list) { 332 if (class->type == type && class->level == level_idx) 333 return class; 334 } 335 336 return mpam_class_alloc(level_idx, type); 337 } 338 339 static struct mpam_component * 340 mpam_component_alloc(struct mpam_class *class, int id) 341 { 342 struct mpam_component *comp; 343 344 lockdep_assert_held(&mpam_list_lock); 345 346 comp = kzalloc(sizeof(*comp), GFP_KERNEL); 347 if (!comp) 348 return ERR_PTR(-ENOMEM); 349 init_garbage(&comp->garbage); 350 351 comp->comp_id = id; 352 INIT_LIST_HEAD_RCU(&comp->vmsc); 353 /* Affinity is updated when RIS are added */ 354 INIT_LIST_HEAD_RCU(&comp->class_list); 355 comp->class = class; 356 357 list_add_rcu(&comp->class_list, &class->components); 358 359 return comp; 360 } 361 362 static void __destroy_component_cfg(struct mpam_component *comp); 363 364 static void mpam_component_destroy(struct mpam_component *comp) 365 { 366 struct mpam_class *class = comp->class; 367 368 lockdep_assert_held(&mpam_list_lock); 369 370 __destroy_component_cfg(comp); 371 372 list_del_rcu(&comp->class_list); 373 add_to_garbage(comp); 374 375 if (list_empty(&class->components)) 376 mpam_class_destroy(class); 377 } 378 379 static struct mpam_component * 380 mpam_component_find(struct mpam_class *class, int id) 381 { 382 struct mpam_component *comp; 383 384 lockdep_assert_held(&mpam_list_lock); 385 386 list_for_each_entry(comp, &class->components, class_list) { 387 if (comp->comp_id == id) 388 return comp; 389 } 390 391 return mpam_component_alloc(class, id); 392 } 393 394 static struct mpam_vmsc * 395 mpam_vmsc_alloc(struct mpam_component *comp, struct mpam_msc *msc) 396 { 397 struct mpam_vmsc *vmsc; 398 399 lockdep_assert_held(&mpam_list_lock); 400 401 vmsc = kzalloc(sizeof(*vmsc), GFP_KERNEL); 402 if (!vmsc) 403 return ERR_PTR(-ENOMEM); 404 init_garbage(&vmsc->garbage); 405 406 INIT_LIST_HEAD_RCU(&vmsc->ris); 407 INIT_LIST_HEAD_RCU(&vmsc->comp_list); 408 vmsc->comp = comp; 409 vmsc->msc = msc; 410 411 list_add_rcu(&vmsc->comp_list, &comp->vmsc); 412 413 return vmsc; 414 } 415 416 static void mpam_vmsc_destroy(struct mpam_vmsc *vmsc) 417 { 418 struct mpam_component *comp = vmsc->comp; 419 420 lockdep_assert_held(&mpam_list_lock); 421 422 list_del_rcu(&vmsc->comp_list); 423 add_to_garbage(vmsc); 424 425 if (list_empty(&comp->vmsc)) 426 mpam_component_destroy(comp); 427 } 428 429 static struct mpam_vmsc * 430 mpam_vmsc_find(struct mpam_component *comp, struct mpam_msc *msc) 431 { 432 struct mpam_vmsc *vmsc; 433 434 lockdep_assert_held(&mpam_list_lock); 435 436 list_for_each_entry(vmsc, &comp->vmsc, comp_list) { 437 if (vmsc->msc->id == msc->id) 438 return vmsc; 439 } 440 441 return mpam_vmsc_alloc(comp, msc); 442 } 443 444 /* 445 * The cacheinfo structures are only populated when CPUs are online. 446 * This helper walks the acpi tables to include offline CPUs too. 447 */ 448 int mpam_get_cpumask_from_cache_id(unsigned long cache_id, u32 cache_level, 449 cpumask_t *affinity) 450 { 451 return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); 452 } 453 454 /* 455 * cpumask_of_node() only knows about online CPUs. This can't tell us whether 456 * a class is represented on all possible CPUs. 457 */ 458 static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) 459 { 460 int cpu; 461 462 for_each_possible_cpu(cpu) { 463 if (node_id == cpu_to_node(cpu)) 464 cpumask_set_cpu(cpu, affinity); 465 } 466 } 467 468 static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, 469 enum mpam_class_types type, 470 struct mpam_class *class, 471 struct mpam_component *comp) 472 { 473 int err; 474 475 switch (type) { 476 case MPAM_CLASS_CACHE: 477 err = mpam_get_cpumask_from_cache_id(comp->comp_id, class->level, 478 affinity); 479 if (err) { 480 dev_warn_once(&msc->pdev->dev, 481 "Failed to determine CPU affinity\n"); 482 return err; 483 } 484 485 if (cpumask_empty(affinity)) 486 dev_warn_once(&msc->pdev->dev, "no CPUs associated with cache node\n"); 487 488 break; 489 case MPAM_CLASS_MEMORY: 490 get_cpumask_from_node_id(comp->comp_id, affinity); 491 /* affinity may be empty for CPU-less memory nodes */ 492 break; 493 case MPAM_CLASS_UNKNOWN: 494 return 0; 495 } 496 497 cpumask_and(affinity, affinity, &msc->accessibility); 498 499 return 0; 500 } 501 502 static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, 503 enum mpam_class_types type, u8 class_id, 504 int component_id) 505 { 506 int err; 507 struct mpam_vmsc *vmsc; 508 struct mpam_msc_ris *ris; 509 struct mpam_class *class; 510 struct mpam_component *comp; 511 struct platform_device *pdev = msc->pdev; 512 513 lockdep_assert_held(&mpam_list_lock); 514 515 if (ris_idx > MPAM_MSC_MAX_NUM_RIS) 516 return -EINVAL; 517 518 if (test_and_set_bit(ris_idx, &msc->ris_idxs)) 519 return -EBUSY; 520 521 ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), GFP_KERNEL); 522 if (!ris) 523 return -ENOMEM; 524 init_garbage(&ris->garbage); 525 ris->garbage.pdev = pdev; 526 527 class = mpam_class_find(class_id, type); 528 if (IS_ERR(class)) 529 return PTR_ERR(class); 530 531 comp = mpam_component_find(class, component_id); 532 if (IS_ERR(comp)) { 533 if (list_empty(&class->components)) 534 mpam_class_destroy(class); 535 return PTR_ERR(comp); 536 } 537 538 vmsc = mpam_vmsc_find(comp, msc); 539 if (IS_ERR(vmsc)) { 540 if (list_empty(&comp->vmsc)) 541 mpam_component_destroy(comp); 542 return PTR_ERR(vmsc); 543 } 544 545 err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); 546 if (err) { 547 if (list_empty(&vmsc->ris)) 548 mpam_vmsc_destroy(vmsc); 549 return err; 550 } 551 552 ris->ris_idx = ris_idx; 553 INIT_LIST_HEAD_RCU(&ris->msc_list); 554 INIT_LIST_HEAD_RCU(&ris->vmsc_list); 555 ris->vmsc = vmsc; 556 557 cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); 558 cpumask_or(&class->affinity, &class->affinity, &ris->affinity); 559 list_add_rcu(&ris->vmsc_list, &vmsc->ris); 560 list_add_rcu(&ris->msc_list, &msc->ris); 561 562 return 0; 563 } 564 565 static void mpam_ris_destroy(struct mpam_msc_ris *ris) 566 { 567 struct mpam_vmsc *vmsc = ris->vmsc; 568 struct mpam_msc *msc = vmsc->msc; 569 struct mpam_component *comp = vmsc->comp; 570 struct mpam_class *class = comp->class; 571 572 lockdep_assert_held(&mpam_list_lock); 573 574 /* 575 * It is assumed affinities don't overlap. If they do the class becomes 576 * unusable immediately. 577 */ 578 cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); 579 cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); 580 clear_bit(ris->ris_idx, &msc->ris_idxs); 581 list_del_rcu(&ris->msc_list); 582 list_del_rcu(&ris->vmsc_list); 583 add_to_garbage(ris); 584 585 if (list_empty(&vmsc->ris)) 586 mpam_vmsc_destroy(vmsc); 587 } 588 589 int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, 590 enum mpam_class_types type, u8 class_id, int component_id) 591 { 592 int err; 593 594 mutex_lock(&mpam_list_lock); 595 err = mpam_ris_create_locked(msc, ris_idx, type, class_id, 596 component_id); 597 mutex_unlock(&mpam_list_lock); 598 if (err) 599 mpam_free_garbage(); 600 601 return err; 602 } 603 604 static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, 605 u8 ris_idx) 606 { 607 int err; 608 struct mpam_msc_ris *ris; 609 610 lockdep_assert_held(&mpam_list_lock); 611 612 if (!test_bit(ris_idx, &msc->ris_idxs)) { 613 err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, 614 0, 0); 615 if (err) 616 return ERR_PTR(err); 617 } 618 619 list_for_each_entry(ris, &msc->ris, msc_list) { 620 if (ris->ris_idx == ris_idx) 621 return ris; 622 } 623 624 return ERR_PTR(-ENOENT); 625 } 626 627 /* 628 * IHI009A.a has this nugget: "If a monitor does not support automatic behaviour 629 * of NRDY, software can use this bit for any purpose" - so hardware might not 630 * implement this - but it isn't RES0. 631 * 632 * Try and see what values stick in this bit. If we can write either value, 633 * its probably not implemented by hardware. 634 */ 635 static bool _mpam_ris_hw_probe_hw_nrdy(struct mpam_msc_ris *ris, u32 mon_reg) 636 { 637 u32 now; 638 u64 mon_sel; 639 bool can_set, can_clear; 640 struct mpam_msc *msc = ris->vmsc->msc; 641 642 if (WARN_ON_ONCE(!mpam_mon_sel_lock(msc))) 643 return false; 644 645 mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, 0) | 646 FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); 647 _mpam_write_monsel_reg(msc, mon_reg, mon_sel); 648 649 _mpam_write_monsel_reg(msc, mon_reg, MSMON___NRDY); 650 now = _mpam_read_monsel_reg(msc, mon_reg); 651 can_set = now & MSMON___NRDY; 652 653 _mpam_write_monsel_reg(msc, mon_reg, 0); 654 now = _mpam_read_monsel_reg(msc, mon_reg); 655 can_clear = !(now & MSMON___NRDY); 656 mpam_mon_sel_unlock(msc); 657 658 return (!can_set || !can_clear); 659 } 660 661 #define mpam_ris_hw_probe_hw_nrdy(_ris, _mon_reg) \ 662 _mpam_ris_hw_probe_hw_nrdy(_ris, MSMON_##_mon_reg) 663 664 static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) 665 { 666 int err; 667 struct mpam_msc *msc = ris->vmsc->msc; 668 struct device *dev = &msc->pdev->dev; 669 struct mpam_props *props = &ris->props; 670 struct mpam_class *class = ris->vmsc->comp->class; 671 672 lockdep_assert_held(&msc->probe_lock); 673 lockdep_assert_held(&msc->part_sel_lock); 674 675 /* Cache Capacity Partitioning */ 676 if (FIELD_GET(MPAMF_IDR_HAS_CCAP_PART, ris->idr)) { 677 u32 ccap_features = mpam_read_partsel_reg(msc, CCAP_IDR); 678 679 props->cmax_wd = FIELD_GET(MPAMF_CCAP_IDR_CMAX_WD, ccap_features); 680 if (props->cmax_wd && 681 FIELD_GET(MPAMF_CCAP_IDR_HAS_CMAX_SOFTLIM, ccap_features)) 682 mpam_set_feature(mpam_feat_cmax_softlim, props); 683 684 if (props->cmax_wd && 685 !FIELD_GET(MPAMF_CCAP_IDR_NO_CMAX, ccap_features)) 686 mpam_set_feature(mpam_feat_cmax_cmax, props); 687 688 if (props->cmax_wd && 689 FIELD_GET(MPAMF_CCAP_IDR_HAS_CMIN, ccap_features)) 690 mpam_set_feature(mpam_feat_cmax_cmin, props); 691 692 props->cassoc_wd = FIELD_GET(MPAMF_CCAP_IDR_CASSOC_WD, ccap_features); 693 if (props->cassoc_wd && 694 FIELD_GET(MPAMF_CCAP_IDR_HAS_CASSOC, ccap_features)) 695 mpam_set_feature(mpam_feat_cmax_cassoc, props); 696 } 697 698 /* Cache Portion partitioning */ 699 if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { 700 u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); 701 702 props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); 703 if (props->cpbm_wd) 704 mpam_set_feature(mpam_feat_cpor_part, props); 705 } 706 707 /* Memory bandwidth partitioning */ 708 if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { 709 u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); 710 711 /* portion bitmap resolution */ 712 props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); 713 if (props->mbw_pbm_bits && 714 FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) 715 mpam_set_feature(mpam_feat_mbw_part, props); 716 717 props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); 718 if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) 719 mpam_set_feature(mpam_feat_mbw_max, props); 720 721 if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MIN, mbw_features)) 722 mpam_set_feature(mpam_feat_mbw_min, props); 723 724 if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_PROP, mbw_features)) 725 mpam_set_feature(mpam_feat_mbw_prop, props); 726 } 727 728 /* Priority partitioning */ 729 if (FIELD_GET(MPAMF_IDR_HAS_PRI_PART, ris->idr)) { 730 u32 pri_features = mpam_read_partsel_reg(msc, PRI_IDR); 731 732 props->intpri_wd = FIELD_GET(MPAMF_PRI_IDR_INTPRI_WD, pri_features); 733 if (props->intpri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_INTPRI, pri_features)) { 734 mpam_set_feature(mpam_feat_intpri_part, props); 735 if (FIELD_GET(MPAMF_PRI_IDR_INTPRI_0_IS_LOW, pri_features)) 736 mpam_set_feature(mpam_feat_intpri_part_0_low, props); 737 } 738 739 props->dspri_wd = FIELD_GET(MPAMF_PRI_IDR_DSPRI_WD, pri_features); 740 if (props->dspri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_DSPRI, pri_features)) { 741 mpam_set_feature(mpam_feat_dspri_part, props); 742 if (FIELD_GET(MPAMF_PRI_IDR_DSPRI_0_IS_LOW, pri_features)) 743 mpam_set_feature(mpam_feat_dspri_part_0_low, props); 744 } 745 } 746 747 /* Performance Monitoring */ 748 if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { 749 u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); 750 751 /* 752 * If the firmware max-nrdy-us property is missing, the 753 * CSU counters can't be used. Should we wait forever? 754 */ 755 err = device_property_read_u32(&msc->pdev->dev, 756 "arm,not-ready-us", 757 &msc->nrdy_usec); 758 759 if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { 760 u32 csumonidr; 761 762 csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); 763 props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); 764 if (props->num_csu_mon) { 765 bool hw_managed; 766 767 mpam_set_feature(mpam_feat_msmon_csu, props); 768 769 if (FIELD_GET(MPAMF_CSUMON_IDR_HAS_XCL, csumonidr)) 770 mpam_set_feature(mpam_feat_msmon_csu_xcl, props); 771 772 /* Is NRDY hardware managed? */ 773 hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, CSU); 774 if (hw_managed) 775 mpam_set_feature(mpam_feat_msmon_csu_hw_nrdy, props); 776 } 777 778 /* 779 * Accept the missing firmware property if NRDY appears 780 * un-implemented. 781 */ 782 if (err && mpam_has_feature(mpam_feat_msmon_csu_hw_nrdy, props)) 783 dev_err_once(dev, "Counters are not usable because not-ready timeout was not provided by firmware."); 784 } 785 if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { 786 bool hw_managed; 787 u32 mbwumon_idr = mpam_read_partsel_reg(msc, MBWUMON_IDR); 788 789 props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumon_idr); 790 if (props->num_mbwu_mon) 791 mpam_set_feature(mpam_feat_msmon_mbwu, props); 792 793 if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumon_idr)) 794 mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); 795 796 /* Is NRDY hardware managed? */ 797 hw_managed = mpam_ris_hw_probe_hw_nrdy(ris, MBWU); 798 if (hw_managed) 799 mpam_set_feature(mpam_feat_msmon_mbwu_hw_nrdy, props); 800 801 /* 802 * Don't warn about any missing firmware property for 803 * MBWU NRDY - it doesn't make any sense! 804 */ 805 } 806 } 807 808 /* 809 * RIS with PARTID narrowing don't have enough storage for one 810 * configuration per PARTID. If these are in a class we could use, 811 * reduce the supported partid_max to match the number of intpartid. 812 * If the class is unknown, just ignore it. 813 */ 814 if (FIELD_GET(MPAMF_IDR_HAS_PARTID_NRW, ris->idr) && 815 class->type != MPAM_CLASS_UNKNOWN) { 816 u32 nrwidr = mpam_read_partsel_reg(msc, PARTID_NRW_IDR); 817 u16 partid_max = FIELD_GET(MPAMF_PARTID_NRW_IDR_INTPARTID_MAX, nrwidr); 818 819 mpam_set_feature(mpam_feat_partid_nrw, props); 820 msc->partid_max = min(msc->partid_max, partid_max); 821 } 822 } 823 824 static int mpam_msc_hw_probe(struct mpam_msc *msc) 825 { 826 u64 idr; 827 u16 partid_max; 828 u8 ris_idx, pmg_max; 829 struct mpam_msc_ris *ris; 830 struct device *dev = &msc->pdev->dev; 831 832 lockdep_assert_held(&msc->probe_lock); 833 834 idr = __mpam_read_reg(msc, MPAMF_AIDR); 835 if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { 836 dev_err_once(dev, "MSC does not match MPAM architecture v1.x\n"); 837 return -EIO; 838 } 839 840 /* Grab an IDR value to find out how many RIS there are */ 841 mutex_lock(&msc->part_sel_lock); 842 idr = mpam_msc_read_idr(msc); 843 mutex_unlock(&msc->part_sel_lock); 844 845 msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); 846 847 /* Use these values so partid/pmg always starts with a valid value */ 848 msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); 849 msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); 850 851 for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { 852 mutex_lock(&msc->part_sel_lock); 853 __mpam_part_sel(ris_idx, 0, msc); 854 idr = mpam_msc_read_idr(msc); 855 mutex_unlock(&msc->part_sel_lock); 856 857 partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); 858 pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); 859 msc->partid_max = min(msc->partid_max, partid_max); 860 msc->pmg_max = min(msc->pmg_max, pmg_max); 861 msc->has_extd_esr = FIELD_GET(MPAMF_IDR_HAS_EXTD_ESR, idr); 862 863 mutex_lock(&mpam_list_lock); 864 ris = mpam_get_or_create_ris(msc, ris_idx); 865 mutex_unlock(&mpam_list_lock); 866 if (IS_ERR(ris)) 867 return PTR_ERR(ris); 868 ris->idr = idr; 869 870 mutex_lock(&msc->part_sel_lock); 871 __mpam_part_sel(ris_idx, 0, msc); 872 mpam_ris_hw_probe(ris); 873 mutex_unlock(&msc->part_sel_lock); 874 } 875 876 /* Clear any stale errors */ 877 mpam_msc_clear_esr(msc); 878 879 spin_lock(&partid_max_lock); 880 mpam_partid_max = min(mpam_partid_max, msc->partid_max); 881 mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); 882 spin_unlock(&partid_max_lock); 883 884 msc->probed = true; 885 886 return 0; 887 } 888 889 struct mon_read { 890 struct mpam_msc_ris *ris; 891 struct mon_cfg *ctx; 892 enum mpam_device_features type; 893 u64 *val; 894 int err; 895 }; 896 897 static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, 898 u32 *flt_val) 899 { 900 struct mon_cfg *ctx = m->ctx; 901 902 /* 903 * For CSU counters its implementation-defined what happens when not 904 * filtering by partid. 905 */ 906 *ctl_val = MSMON_CFG_x_CTL_MATCH_PARTID; 907 908 *flt_val = FIELD_PREP(MSMON_CFG_x_FLT_PARTID, ctx->partid); 909 910 if (m->ctx->match_pmg) { 911 *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; 912 *flt_val |= FIELD_PREP(MSMON_CFG_x_FLT_PMG, ctx->pmg); 913 } 914 915 switch (m->type) { 916 case mpam_feat_msmon_csu: 917 *ctl_val |= MSMON_CFG_CSU_CTL_TYPE_CSU; 918 919 if (mpam_has_feature(mpam_feat_msmon_csu_xcl, &m->ris->props)) 920 *flt_val |= FIELD_PREP(MSMON_CFG_CSU_FLT_XCL, ctx->csu_exclude_clean); 921 922 break; 923 case mpam_feat_msmon_mbwu: 924 *ctl_val |= MSMON_CFG_MBWU_CTL_TYPE_MBWU; 925 926 if (mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, &m->ris->props)) 927 *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); 928 929 break; 930 default: 931 pr_warn("Unexpected monitor type %d\n", m->type); 932 } 933 } 934 935 static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, 936 u32 *flt_val) 937 { 938 struct mpam_msc *msc = m->ris->vmsc->msc; 939 940 switch (m->type) { 941 case mpam_feat_msmon_csu: 942 *ctl_val = mpam_read_monsel_reg(msc, CFG_CSU_CTL); 943 *flt_val = mpam_read_monsel_reg(msc, CFG_CSU_FLT); 944 break; 945 case mpam_feat_msmon_mbwu: 946 *ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); 947 *flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); 948 break; 949 default: 950 pr_warn("Unexpected monitor type %d\n", m->type); 951 } 952 } 953 954 /* Remove values set by the hardware to prevent apparent mismatches. */ 955 static inline void clean_msmon_ctl_val(u32 *cur_ctl) 956 { 957 *cur_ctl &= ~MSMON_CFG_x_CTL_OFLOW_STATUS; 958 } 959 960 static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, 961 u32 flt_val) 962 { 963 struct mpam_msc *msc = m->ris->vmsc->msc; 964 965 /* 966 * Write the ctl_val with the enable bit cleared, reset the counter, 967 * then enable counter. 968 */ 969 switch (m->type) { 970 case mpam_feat_msmon_csu: 971 mpam_write_monsel_reg(msc, CFG_CSU_FLT, flt_val); 972 mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val); 973 mpam_write_monsel_reg(msc, CSU, 0); 974 mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val | MSMON_CFG_x_CTL_EN); 975 break; 976 case mpam_feat_msmon_mbwu: 977 mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); 978 mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); 979 mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val | MSMON_CFG_x_CTL_EN); 980 /* Counting monitors require NRDY to be reset by software */ 981 mpam_write_monsel_reg(msc, MBWU, 0); 982 break; 983 default: 984 pr_warn("Unexpected monitor type %d\n", m->type); 985 } 986 } 987 988 static void __ris_msmon_read(void *arg) 989 { 990 u64 now; 991 bool nrdy = false; 992 bool config_mismatch; 993 struct mon_read *m = arg; 994 struct mon_cfg *ctx = m->ctx; 995 struct mpam_msc_ris *ris = m->ris; 996 struct mpam_props *rprops = &ris->props; 997 struct mpam_msc *msc = m->ris->vmsc->msc; 998 u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; 999 1000 if (!mpam_mon_sel_lock(msc)) { 1001 m->err = -EIO; 1002 return; 1003 } 1004 mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, ctx->mon) | 1005 FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); 1006 mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); 1007 1008 /* 1009 * Read the existing configuration to avoid re-writing the same values. 1010 * This saves waiting for 'nrdy' on subsequent reads. 1011 */ 1012 read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); 1013 clean_msmon_ctl_val(&cur_ctl); 1014 gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); 1015 config_mismatch = cur_flt != flt_val || 1016 cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN); 1017 1018 if (config_mismatch) 1019 write_msmon_ctl_flt_vals(m, ctl_val, flt_val); 1020 1021 switch (m->type) { 1022 case mpam_feat_msmon_csu: 1023 now = mpam_read_monsel_reg(msc, CSU); 1024 if (mpam_has_feature(mpam_feat_msmon_csu_hw_nrdy, rprops)) 1025 nrdy = now & MSMON___NRDY; 1026 break; 1027 case mpam_feat_msmon_mbwu: 1028 now = mpam_read_monsel_reg(msc, MBWU); 1029 if (mpam_has_feature(mpam_feat_msmon_mbwu_hw_nrdy, rprops)) 1030 nrdy = now & MSMON___NRDY; 1031 break; 1032 default: 1033 m->err = -EINVAL; 1034 } 1035 mpam_mon_sel_unlock(msc); 1036 1037 if (nrdy) { 1038 m->err = -EBUSY; 1039 return; 1040 } 1041 1042 now = FIELD_GET(MSMON___VALUE, now); 1043 *m->val += now; 1044 } 1045 1046 static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) 1047 { 1048 int err, any_err = 0; 1049 struct mpam_vmsc *vmsc; 1050 1051 guard(srcu)(&mpam_srcu); 1052 list_for_each_entry_srcu(vmsc, &comp->vmsc, comp_list, 1053 srcu_read_lock_held(&mpam_srcu)) { 1054 struct mpam_msc *msc = vmsc->msc; 1055 struct mpam_msc_ris *ris; 1056 1057 list_for_each_entry_srcu(ris, &vmsc->ris, vmsc_list, 1058 srcu_read_lock_held(&mpam_srcu)) { 1059 arg->ris = ris; 1060 1061 err = smp_call_function_any(&msc->accessibility, 1062 __ris_msmon_read, arg, 1063 true); 1064 if (!err && arg->err) 1065 err = arg->err; 1066 1067 /* 1068 * Save one error to be returned to the caller, but 1069 * keep reading counters so that get reprogrammed. On 1070 * platforms with NRDY this lets us wait once. 1071 */ 1072 if (err) 1073 any_err = err; 1074 } 1075 } 1076 1077 return any_err; 1078 } 1079 1080 int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, 1081 enum mpam_device_features type, u64 *val) 1082 { 1083 int err; 1084 struct mon_read arg; 1085 u64 wait_jiffies = 0; 1086 struct mpam_props *cprops = &comp->class->props; 1087 1088 might_sleep(); 1089 1090 if (!mpam_is_enabled()) 1091 return -EIO; 1092 1093 if (!mpam_has_feature(type, cprops)) 1094 return -EOPNOTSUPP; 1095 1096 arg = (struct mon_read) { 1097 .ctx = ctx, 1098 .type = type, 1099 .val = val, 1100 }; 1101 *val = 0; 1102 1103 err = _msmon_read(comp, &arg); 1104 if (err == -EBUSY && comp->class->nrdy_usec) 1105 wait_jiffies = usecs_to_jiffies(comp->class->nrdy_usec); 1106 1107 while (wait_jiffies) 1108 wait_jiffies = schedule_timeout_uninterruptible(wait_jiffies); 1109 1110 if (err == -EBUSY) { 1111 arg = (struct mon_read) { 1112 .ctx = ctx, 1113 .type = type, 1114 .val = val, 1115 }; 1116 *val = 0; 1117 1118 err = _msmon_read(comp, &arg); 1119 } 1120 1121 return err; 1122 } 1123 1124 static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) 1125 { 1126 u32 num_words, msb; 1127 u32 bm = ~0; 1128 int i; 1129 1130 lockdep_assert_held(&msc->part_sel_lock); 1131 1132 if (wd == 0) 1133 return; 1134 1135 /* 1136 * Write all ~0 to all but the last 32bit-word, which may 1137 * have fewer bits... 1138 */ 1139 num_words = DIV_ROUND_UP(wd, 32); 1140 for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) 1141 __mpam_write_reg(msc, reg, bm); 1142 1143 /* 1144 * ....and then the last (maybe) partial 32bit word. When wd is a 1145 * multiple of 32, msb should be 31 to write a full 32bit word. 1146 */ 1147 msb = (wd - 1) % 32; 1148 bm = GENMASK(msb, 0); 1149 __mpam_write_reg(msc, reg, bm); 1150 } 1151 1152 /* Called via IPI. Call while holding an SRCU reference */ 1153 static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, 1154 struct mpam_config *cfg) 1155 { 1156 u32 pri_val = 0; 1157 u16 cmax = MPAMCFG_CMAX_CMAX; 1158 struct mpam_msc *msc = ris->vmsc->msc; 1159 struct mpam_props *rprops = &ris->props; 1160 u16 dspri = GENMASK(rprops->dspri_wd, 0); 1161 u16 intpri = GENMASK(rprops->intpri_wd, 0); 1162 1163 mutex_lock(&msc->part_sel_lock); 1164 __mpam_part_sel(ris->ris_idx, partid, msc); 1165 1166 if (mpam_has_feature(mpam_feat_partid_nrw, rprops)) { 1167 /* Update the intpartid mapping */ 1168 mpam_write_partsel_reg(msc, INTPARTID, 1169 MPAMCFG_INTPARTID_INTERNAL | partid); 1170 1171 /* 1172 * Then switch to the 'internal' partid to update the 1173 * configuration. 1174 */ 1175 __mpam_intpart_sel(ris->ris_idx, partid, msc); 1176 } 1177 1178 if (mpam_has_feature(mpam_feat_cpor_part, rprops) && 1179 mpam_has_feature(mpam_feat_cpor_part, cfg)) { 1180 if (cfg->reset_cpbm) 1181 mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); 1182 else 1183 mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); 1184 } 1185 1186 if (mpam_has_feature(mpam_feat_mbw_part, rprops) && 1187 mpam_has_feature(mpam_feat_mbw_part, cfg)) { 1188 if (cfg->reset_mbw_pbm) 1189 mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); 1190 else 1191 mpam_write_partsel_reg(msc, MBW_PBM, cfg->mbw_pbm); 1192 } 1193 1194 if (mpam_has_feature(mpam_feat_mbw_min, rprops) && 1195 mpam_has_feature(mpam_feat_mbw_min, cfg)) 1196 mpam_write_partsel_reg(msc, MBW_MIN, 0); 1197 1198 if (mpam_has_feature(mpam_feat_mbw_max, rprops) && 1199 mpam_has_feature(mpam_feat_mbw_max, cfg)) { 1200 if (cfg->reset_mbw_max) 1201 mpam_write_partsel_reg(msc, MBW_MAX, MPAMCFG_MBW_MAX_MAX); 1202 else 1203 mpam_write_partsel_reg(msc, MBW_MAX, cfg->mbw_max); 1204 } 1205 1206 if (mpam_has_feature(mpam_feat_mbw_prop, rprops) && 1207 mpam_has_feature(mpam_feat_mbw_prop, cfg)) 1208 mpam_write_partsel_reg(msc, MBW_PROP, 0); 1209 1210 if (mpam_has_feature(mpam_feat_cmax_cmax, rprops)) 1211 mpam_write_partsel_reg(msc, CMAX, cmax); 1212 1213 if (mpam_has_feature(mpam_feat_cmax_cmin, rprops)) 1214 mpam_write_partsel_reg(msc, CMIN, 0); 1215 1216 if (mpam_has_feature(mpam_feat_cmax_cassoc, rprops)) 1217 mpam_write_partsel_reg(msc, CASSOC, MPAMCFG_CASSOC_CASSOC); 1218 1219 if (mpam_has_feature(mpam_feat_intpri_part, rprops) || 1220 mpam_has_feature(mpam_feat_dspri_part, rprops)) { 1221 /* aces high? */ 1222 if (!mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) 1223 intpri = 0; 1224 if (!mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) 1225 dspri = 0; 1226 1227 if (mpam_has_feature(mpam_feat_intpri_part, rprops)) 1228 pri_val |= FIELD_PREP(MPAMCFG_PRI_INTPRI, intpri); 1229 if (mpam_has_feature(mpam_feat_dspri_part, rprops)) 1230 pri_val |= FIELD_PREP(MPAMCFG_PRI_DSPRI, dspri); 1231 1232 mpam_write_partsel_reg(msc, PRI, pri_val); 1233 } 1234 1235 mutex_unlock(&msc->part_sel_lock); 1236 } 1237 1238 static void mpam_init_reset_cfg(struct mpam_config *reset_cfg) 1239 { 1240 *reset_cfg = (struct mpam_config) { 1241 .reset_cpbm = true, 1242 .reset_mbw_pbm = true, 1243 .reset_mbw_max = true, 1244 }; 1245 bitmap_fill(reset_cfg->features, MPAM_FEATURE_LAST); 1246 } 1247 1248 /* 1249 * Called via smp_call_on_cpu() to prevent migration, while still being 1250 * pre-emptible. Caller must hold mpam_srcu. 1251 */ 1252 static int mpam_reset_ris(void *arg) 1253 { 1254 u16 partid, partid_max; 1255 struct mpam_config reset_cfg; 1256 struct mpam_msc_ris *ris = arg; 1257 1258 if (ris->in_reset_state) 1259 return 0; 1260 1261 mpam_init_reset_cfg(&reset_cfg); 1262 1263 spin_lock(&partid_max_lock); 1264 partid_max = mpam_partid_max; 1265 spin_unlock(&partid_max_lock); 1266 for (partid = 0; partid <= partid_max; partid++) 1267 mpam_reprogram_ris_partid(ris, partid, &reset_cfg); 1268 1269 return 0; 1270 } 1271 1272 /* 1273 * Get the preferred CPU for this MSC. If it is accessible from this CPU, 1274 * this CPU is preferred. This can be preempted/migrated, it will only result 1275 * in more work. 1276 */ 1277 static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) 1278 { 1279 int cpu = raw_smp_processor_id(); 1280 1281 if (cpumask_test_cpu(cpu, &msc->accessibility)) 1282 return cpu; 1283 1284 return cpumask_first_and(&msc->accessibility, cpu_online_mask); 1285 } 1286 1287 static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) 1288 { 1289 lockdep_assert_irqs_enabled(); 1290 lockdep_assert_cpus_held(); 1291 WARN_ON_ONCE(!srcu_read_lock_held((&mpam_srcu))); 1292 1293 return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); 1294 } 1295 1296 struct mpam_write_config_arg { 1297 struct mpam_msc_ris *ris; 1298 struct mpam_component *comp; 1299 u16 partid; 1300 }; 1301 1302 static int __write_config(void *arg) 1303 { 1304 struct mpam_write_config_arg *c = arg; 1305 1306 mpam_reprogram_ris_partid(c->ris, c->partid, &c->comp->cfg[c->partid]); 1307 1308 return 0; 1309 } 1310 1311 static void mpam_reprogram_msc(struct mpam_msc *msc) 1312 { 1313 u16 partid; 1314 bool reset; 1315 struct mpam_config *cfg; 1316 struct mpam_msc_ris *ris; 1317 struct mpam_write_config_arg arg; 1318 1319 /* 1320 * No lock for mpam_partid_max as partid_max_published has been 1321 * set by mpam_enabled(), so the values can no longer change. 1322 */ 1323 mpam_assert_partid_sizes_fixed(); 1324 1325 mutex_lock(&msc->cfg_lock); 1326 list_for_each_entry_srcu(ris, &msc->ris, msc_list, 1327 srcu_read_lock_held(&mpam_srcu)) { 1328 if (!mpam_is_enabled() && !ris->in_reset_state) { 1329 mpam_touch_msc(msc, &mpam_reset_ris, ris); 1330 ris->in_reset_state = true; 1331 continue; 1332 } 1333 1334 arg.comp = ris->vmsc->comp; 1335 arg.ris = ris; 1336 reset = true; 1337 for (partid = 0; partid <= mpam_partid_max; partid++) { 1338 cfg = &ris->vmsc->comp->cfg[partid]; 1339 if (!bitmap_empty(cfg->features, MPAM_FEATURE_LAST)) 1340 reset = false; 1341 1342 arg.partid = partid; 1343 mpam_touch_msc(msc, __write_config, &arg); 1344 } 1345 ris->in_reset_state = reset; 1346 } 1347 mutex_unlock(&msc->cfg_lock); 1348 } 1349 1350 static void _enable_percpu_irq(void *_irq) 1351 { 1352 int *irq = _irq; 1353 1354 enable_percpu_irq(*irq, IRQ_TYPE_NONE); 1355 } 1356 1357 static int mpam_cpu_online(unsigned int cpu) 1358 { 1359 struct mpam_msc *msc; 1360 1361 guard(srcu)(&mpam_srcu); 1362 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 1363 srcu_read_lock_held(&mpam_srcu)) { 1364 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 1365 continue; 1366 1367 if (msc->reenable_error_ppi) 1368 _enable_percpu_irq(&msc->reenable_error_ppi); 1369 1370 if (atomic_fetch_inc(&msc->online_refs) == 0) 1371 mpam_reprogram_msc(msc); 1372 } 1373 1374 return 0; 1375 } 1376 1377 /* Before mpam is enabled, try to probe new MSC */ 1378 static int mpam_discovery_cpu_online(unsigned int cpu) 1379 { 1380 int err = 0; 1381 struct mpam_msc *msc; 1382 bool new_device_probed = false; 1383 1384 if (mpam_is_enabled()) 1385 return 0; 1386 1387 guard(srcu)(&mpam_srcu); 1388 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 1389 srcu_read_lock_held(&mpam_srcu)) { 1390 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 1391 continue; 1392 1393 mutex_lock(&msc->probe_lock); 1394 if (!msc->probed) 1395 err = mpam_msc_hw_probe(msc); 1396 mutex_unlock(&msc->probe_lock); 1397 1398 if (err) 1399 break; 1400 new_device_probed = true; 1401 } 1402 1403 if (new_device_probed && !err) 1404 schedule_work(&mpam_enable_work); 1405 if (err) { 1406 mpam_disable_reason = "error during probing"; 1407 schedule_work(&mpam_broken_work); 1408 } 1409 1410 return err; 1411 } 1412 1413 static int mpam_cpu_offline(unsigned int cpu) 1414 { 1415 struct mpam_msc *msc; 1416 1417 guard(srcu)(&mpam_srcu); 1418 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 1419 srcu_read_lock_held(&mpam_srcu)) { 1420 if (!cpumask_test_cpu(cpu, &msc->accessibility)) 1421 continue; 1422 1423 if (msc->reenable_error_ppi) 1424 disable_percpu_irq(msc->reenable_error_ppi); 1425 1426 if (atomic_dec_and_test(&msc->online_refs)) { 1427 struct mpam_msc_ris *ris; 1428 1429 mutex_lock(&msc->cfg_lock); 1430 list_for_each_entry_srcu(ris, &msc->ris, msc_list, 1431 srcu_read_lock_held(&mpam_srcu)) { 1432 mpam_touch_msc(msc, &mpam_reset_ris, ris); 1433 1434 /* 1435 * The reset state for non-zero partid may be 1436 * lost while the CPUs are offline. 1437 */ 1438 ris->in_reset_state = false; 1439 } 1440 mutex_unlock(&msc->cfg_lock); 1441 } 1442 } 1443 1444 return 0; 1445 } 1446 1447 static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online), 1448 int (*offline)(unsigned int offline), 1449 char *name) 1450 { 1451 mutex_lock(&mpam_cpuhp_state_lock); 1452 if (mpam_cpuhp_state) { 1453 cpuhp_remove_state(mpam_cpuhp_state); 1454 mpam_cpuhp_state = 0; 1455 } 1456 1457 mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, name, online, 1458 offline); 1459 if (mpam_cpuhp_state <= 0) { 1460 pr_err("Failed to register cpuhp callbacks"); 1461 mpam_cpuhp_state = 0; 1462 } 1463 mutex_unlock(&mpam_cpuhp_state_lock); 1464 } 1465 1466 static int __setup_ppi(struct mpam_msc *msc) 1467 { 1468 int cpu; 1469 1470 msc->error_dev_id = alloc_percpu(struct mpam_msc *); 1471 if (!msc->error_dev_id) 1472 return -ENOMEM; 1473 1474 for_each_cpu(cpu, &msc->accessibility) 1475 *per_cpu_ptr(msc->error_dev_id, cpu) = msc; 1476 1477 return 0; 1478 } 1479 1480 static int mpam_msc_setup_error_irq(struct mpam_msc *msc) 1481 { 1482 int irq; 1483 1484 irq = platform_get_irq_byname_optional(msc->pdev, "error"); 1485 if (irq <= 0) 1486 return 0; 1487 1488 /* Allocate and initialise the percpu device pointer for PPI */ 1489 if (irq_is_percpu(irq)) 1490 return __setup_ppi(msc); 1491 1492 /* sanity check: shared interrupts can be routed anywhere? */ 1493 if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) { 1494 pr_err_once("msc:%u is a private resource with a shared error interrupt", 1495 msc->id); 1496 return -EINVAL; 1497 } 1498 1499 return 0; 1500 } 1501 1502 /* 1503 * An MSC can control traffic from a set of CPUs, but may only be accessible 1504 * from a (hopefully wider) set of CPUs. The common reason for this is power 1505 * management. If all the CPUs in a cluster are in PSCI:CPU_SUSPEND, the 1506 * corresponding cache may also be powered off. By making accesses from 1507 * one of those CPUs, we ensure we don't access a cache that's powered off. 1508 */ 1509 static void update_msc_accessibility(struct mpam_msc *msc) 1510 { 1511 u32 affinity_id; 1512 int err; 1513 1514 err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", 1515 &affinity_id); 1516 if (err) 1517 cpumask_copy(&msc->accessibility, cpu_possible_mask); 1518 else 1519 acpi_pptt_get_cpus_from_container(affinity_id, &msc->accessibility); 1520 } 1521 1522 /* 1523 * There are two ways of reaching a struct mpam_msc_ris. Via the 1524 * class->component->vmsc->ris, or via the msc. 1525 * When destroying the msc, the other side needs unlinking and cleaning up too. 1526 */ 1527 static void mpam_msc_destroy(struct mpam_msc *msc) 1528 { 1529 struct platform_device *pdev = msc->pdev; 1530 struct mpam_msc_ris *ris, *tmp; 1531 1532 lockdep_assert_held(&mpam_list_lock); 1533 1534 list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) 1535 mpam_ris_destroy(ris); 1536 1537 list_del_rcu(&msc->all_msc_list); 1538 platform_set_drvdata(pdev, NULL); 1539 1540 add_to_garbage(msc); 1541 } 1542 1543 static void mpam_msc_drv_remove(struct platform_device *pdev) 1544 { 1545 struct mpam_msc *msc = platform_get_drvdata(pdev); 1546 1547 mutex_lock(&mpam_list_lock); 1548 mpam_msc_destroy(msc); 1549 mutex_unlock(&mpam_list_lock); 1550 1551 mpam_free_garbage(); 1552 } 1553 1554 static struct mpam_msc *do_mpam_msc_drv_probe(struct platform_device *pdev) 1555 { 1556 int err; 1557 u32 tmp; 1558 struct mpam_msc *msc; 1559 struct resource *msc_res; 1560 struct device *dev = &pdev->dev; 1561 1562 lockdep_assert_held(&mpam_list_lock); 1563 1564 msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); 1565 if (!msc) 1566 return ERR_PTR(-ENOMEM); 1567 init_garbage(&msc->garbage); 1568 msc->garbage.pdev = pdev; 1569 1570 err = devm_mutex_init(dev, &msc->probe_lock); 1571 if (err) 1572 return ERR_PTR(err); 1573 1574 err = devm_mutex_init(dev, &msc->part_sel_lock); 1575 if (err) 1576 return ERR_PTR(err); 1577 1578 err = devm_mutex_init(dev, &msc->error_irq_lock); 1579 if (err) 1580 return ERR_PTR(err); 1581 1582 err = devm_mutex_init(dev, &msc->cfg_lock); 1583 if (err) 1584 return ERR_PTR(err); 1585 1586 mpam_mon_sel_lock_init(msc); 1587 msc->id = pdev->id; 1588 msc->pdev = pdev; 1589 INIT_LIST_HEAD_RCU(&msc->all_msc_list); 1590 INIT_LIST_HEAD_RCU(&msc->ris); 1591 1592 update_msc_accessibility(msc); 1593 if (cpumask_empty(&msc->accessibility)) { 1594 dev_err_once(dev, "MSC is not accessible from any CPU!"); 1595 return ERR_PTR(-EINVAL); 1596 } 1597 1598 err = mpam_msc_setup_error_irq(msc); 1599 if (err) 1600 return ERR_PTR(err); 1601 1602 if (device_property_read_u32(&pdev->dev, "pcc-channel", &tmp)) 1603 msc->iface = MPAM_IFACE_MMIO; 1604 else 1605 msc->iface = MPAM_IFACE_PCC; 1606 1607 if (msc->iface == MPAM_IFACE_MMIO) { 1608 void __iomem *io; 1609 1610 io = devm_platform_get_and_ioremap_resource(pdev, 0, 1611 &msc_res); 1612 if (IS_ERR(io)) { 1613 dev_err_once(dev, "Failed to map MSC base address\n"); 1614 return ERR_CAST(io); 1615 } 1616 msc->mapped_hwpage_sz = msc_res->end - msc_res->start; 1617 msc->mapped_hwpage = io; 1618 } else { 1619 return ERR_PTR(-EINVAL); 1620 } 1621 1622 list_add_rcu(&msc->all_msc_list, &mpam_all_msc); 1623 platform_set_drvdata(pdev, msc); 1624 1625 return msc; 1626 } 1627 1628 static int fw_num_msc; 1629 1630 static int mpam_msc_drv_probe(struct platform_device *pdev) 1631 { 1632 int err; 1633 struct mpam_msc *msc = NULL; 1634 void *plat_data = pdev->dev.platform_data; 1635 1636 mutex_lock(&mpam_list_lock); 1637 msc = do_mpam_msc_drv_probe(pdev); 1638 mutex_unlock(&mpam_list_lock); 1639 1640 if (IS_ERR(msc)) 1641 return PTR_ERR(msc); 1642 1643 /* Create RIS entries described by firmware */ 1644 err = acpi_mpam_parse_resources(msc, plat_data); 1645 if (err) { 1646 mpam_msc_drv_remove(pdev); 1647 return err; 1648 } 1649 1650 if (atomic_add_return(1, &mpam_num_msc) == fw_num_msc) 1651 mpam_register_cpuhp_callbacks(mpam_discovery_cpu_online, NULL, 1652 "mpam:drv_probe"); 1653 1654 return 0; 1655 } 1656 1657 static struct platform_driver mpam_msc_driver = { 1658 .driver = { 1659 .name = "mpam_msc", 1660 }, 1661 .probe = mpam_msc_drv_probe, 1662 .remove = mpam_msc_drv_remove, 1663 }; 1664 1665 /* Any of these features mean the BWA_WD field is valid. */ 1666 static bool mpam_has_bwa_wd_feature(struct mpam_props *props) 1667 { 1668 if (mpam_has_feature(mpam_feat_mbw_min, props)) 1669 return true; 1670 if (mpam_has_feature(mpam_feat_mbw_max, props)) 1671 return true; 1672 if (mpam_has_feature(mpam_feat_mbw_prop, props)) 1673 return true; 1674 return false; 1675 } 1676 1677 /* Any of these features mean the CMAX_WD field is valid. */ 1678 static bool mpam_has_cmax_wd_feature(struct mpam_props *props) 1679 { 1680 if (mpam_has_feature(mpam_feat_cmax_cmax, props)) 1681 return true; 1682 if (mpam_has_feature(mpam_feat_cmax_cmin, props)) 1683 return true; 1684 return false; 1685 } 1686 1687 #define MISMATCHED_HELPER(parent, child, helper, field, alias) \ 1688 helper(parent) && \ 1689 ((helper(child) && (parent)->field != (child)->field) || \ 1690 (!helper(child) && !(alias))) 1691 1692 #define MISMATCHED_FEAT(parent, child, feat, field, alias) \ 1693 mpam_has_feature((feat), (parent)) && \ 1694 ((mpam_has_feature((feat), (child)) && (parent)->field != (child)->field) || \ 1695 (!mpam_has_feature((feat), (child)) && !(alias))) 1696 1697 #define CAN_MERGE_FEAT(parent, child, feat, alias) \ 1698 (alias) && !mpam_has_feature((feat), (parent)) && \ 1699 mpam_has_feature((feat), (child)) 1700 1701 /* 1702 * Combine two props fields. 1703 * If this is for controls that alias the same resource, it is safe to just 1704 * copy the values over. If two aliasing controls implement the same scheme 1705 * a safe value must be picked. 1706 * For non-aliasing controls, these control different resources, and the 1707 * resulting safe value must be compatible with both. When merging values in 1708 * the tree, all the aliasing resources must be handled first. 1709 * On mismatch, parent is modified. 1710 */ 1711 static void __props_mismatch(struct mpam_props *parent, 1712 struct mpam_props *child, bool alias) 1713 { 1714 if (CAN_MERGE_FEAT(parent, child, mpam_feat_cpor_part, alias)) { 1715 parent->cpbm_wd = child->cpbm_wd; 1716 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_cpor_part, 1717 cpbm_wd, alias)) { 1718 pr_debug("cleared cpor_part\n"); 1719 mpam_clear_feature(mpam_feat_cpor_part, parent); 1720 parent->cpbm_wd = 0; 1721 } 1722 1723 if (CAN_MERGE_FEAT(parent, child, mpam_feat_mbw_part, alias)) { 1724 parent->mbw_pbm_bits = child->mbw_pbm_bits; 1725 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_mbw_part, 1726 mbw_pbm_bits, alias)) { 1727 pr_debug("cleared mbw_part\n"); 1728 mpam_clear_feature(mpam_feat_mbw_part, parent); 1729 parent->mbw_pbm_bits = 0; 1730 } 1731 1732 /* bwa_wd is a count of bits, fewer bits means less precision */ 1733 if (alias && !mpam_has_bwa_wd_feature(parent) && 1734 mpam_has_bwa_wd_feature(child)) { 1735 parent->bwa_wd = child->bwa_wd; 1736 } else if (MISMATCHED_HELPER(parent, child, mpam_has_bwa_wd_feature, 1737 bwa_wd, alias)) { 1738 pr_debug("took the min bwa_wd\n"); 1739 parent->bwa_wd = min(parent->bwa_wd, child->bwa_wd); 1740 } 1741 1742 if (alias && !mpam_has_cmax_wd_feature(parent) && mpam_has_cmax_wd_feature(child)) { 1743 parent->cmax_wd = child->cmax_wd; 1744 } else if (MISMATCHED_HELPER(parent, child, mpam_has_cmax_wd_feature, 1745 cmax_wd, alias)) { 1746 pr_debug("%s took the min cmax_wd\n", __func__); 1747 parent->cmax_wd = min(parent->cmax_wd, child->cmax_wd); 1748 } 1749 1750 if (CAN_MERGE_FEAT(parent, child, mpam_feat_cmax_cassoc, alias)) { 1751 parent->cassoc_wd = child->cassoc_wd; 1752 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_cmax_cassoc, 1753 cassoc_wd, alias)) { 1754 pr_debug("%s cleared cassoc_wd\n", __func__); 1755 mpam_clear_feature(mpam_feat_cmax_cassoc, parent); 1756 parent->cassoc_wd = 0; 1757 } 1758 1759 /* For num properties, take the minimum */ 1760 if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_csu, alias)) { 1761 parent->num_csu_mon = child->num_csu_mon; 1762 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_csu, 1763 num_csu_mon, alias)) { 1764 pr_debug("took the min num_csu_mon\n"); 1765 parent->num_csu_mon = min(parent->num_csu_mon, 1766 child->num_csu_mon); 1767 } 1768 1769 if (CAN_MERGE_FEAT(parent, child, mpam_feat_msmon_mbwu, alias)) { 1770 parent->num_mbwu_mon = child->num_mbwu_mon; 1771 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_msmon_mbwu, 1772 num_mbwu_mon, alias)) { 1773 pr_debug("took the min num_mbwu_mon\n"); 1774 parent->num_mbwu_mon = min(parent->num_mbwu_mon, 1775 child->num_mbwu_mon); 1776 } 1777 1778 if (CAN_MERGE_FEAT(parent, child, mpam_feat_intpri_part, alias)) { 1779 parent->intpri_wd = child->intpri_wd; 1780 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_intpri_part, 1781 intpri_wd, alias)) { 1782 pr_debug("%s took the min intpri_wd\n", __func__); 1783 parent->intpri_wd = min(parent->intpri_wd, child->intpri_wd); 1784 } 1785 1786 if (CAN_MERGE_FEAT(parent, child, mpam_feat_dspri_part, alias)) { 1787 parent->dspri_wd = child->dspri_wd; 1788 } else if (MISMATCHED_FEAT(parent, child, mpam_feat_dspri_part, 1789 dspri_wd, alias)) { 1790 pr_debug("%s took the min dspri_wd\n", __func__); 1791 parent->dspri_wd = min(parent->dspri_wd, child->dspri_wd); 1792 } 1793 1794 /* TODO: alias support for these two */ 1795 /* {int,ds}pri may not have differing 0-low behaviour */ 1796 if (mpam_has_feature(mpam_feat_intpri_part, parent) && 1797 (!mpam_has_feature(mpam_feat_intpri_part, child) || 1798 mpam_has_feature(mpam_feat_intpri_part_0_low, parent) != 1799 mpam_has_feature(mpam_feat_intpri_part_0_low, child))) { 1800 pr_debug("%s cleared intpri_part\n", __func__); 1801 mpam_clear_feature(mpam_feat_intpri_part, parent); 1802 mpam_clear_feature(mpam_feat_intpri_part_0_low, parent); 1803 } 1804 if (mpam_has_feature(mpam_feat_dspri_part, parent) && 1805 (!mpam_has_feature(mpam_feat_dspri_part, child) || 1806 mpam_has_feature(mpam_feat_dspri_part_0_low, parent) != 1807 mpam_has_feature(mpam_feat_dspri_part_0_low, child))) { 1808 pr_debug("%s cleared dspri_part\n", __func__); 1809 mpam_clear_feature(mpam_feat_dspri_part, parent); 1810 mpam_clear_feature(mpam_feat_dspri_part_0_low, parent); 1811 } 1812 1813 if (alias) { 1814 /* Merge features for aliased resources */ 1815 bitmap_or(parent->features, parent->features, child->features, MPAM_FEATURE_LAST); 1816 } else { 1817 /* Clear missing features for non aliasing */ 1818 bitmap_and(parent->features, parent->features, child->features, MPAM_FEATURE_LAST); 1819 } 1820 } 1821 1822 /* 1823 * If a vmsc doesn't match class feature/configuration, do the right thing(tm). 1824 * For 'num' properties we can just take the minimum. 1825 * For properties where the mismatched unused bits would make a difference, we 1826 * nobble the class feature, as we can't configure all the resources. 1827 * e.g. The L3 cache is composed of two resources with 13 and 17 portion 1828 * bitmaps respectively. 1829 */ 1830 static void 1831 __class_props_mismatch(struct mpam_class *class, struct mpam_vmsc *vmsc) 1832 { 1833 struct mpam_props *cprops = &class->props; 1834 struct mpam_props *vprops = &vmsc->props; 1835 struct device *dev = &vmsc->msc->pdev->dev; 1836 1837 lockdep_assert_held(&mpam_list_lock); /* we modify class */ 1838 1839 dev_dbg(dev, "Merging features for class:0x%lx &= vmsc:0x%lx\n", 1840 (long)cprops->features, (long)vprops->features); 1841 1842 /* Take the safe value for any common features */ 1843 __props_mismatch(cprops, vprops, false); 1844 } 1845 1846 static void 1847 __vmsc_props_mismatch(struct mpam_vmsc *vmsc, struct mpam_msc_ris *ris) 1848 { 1849 struct mpam_props *rprops = &ris->props; 1850 struct mpam_props *vprops = &vmsc->props; 1851 struct device *dev = &vmsc->msc->pdev->dev; 1852 1853 lockdep_assert_held(&mpam_list_lock); /* we modify vmsc */ 1854 1855 dev_dbg(dev, "Merging features for vmsc:0x%lx |= ris:0x%lx\n", 1856 (long)vprops->features, (long)rprops->features); 1857 1858 /* 1859 * Merge mismatched features - Copy any features that aren't common, 1860 * but take the safe value for any common features. 1861 */ 1862 __props_mismatch(vprops, rprops, true); 1863 } 1864 1865 /* 1866 * Copy the first component's first vMSC's properties and features to the 1867 * class. __class_props_mismatch() will remove conflicts. 1868 * It is not possible to have a class with no components, or a component with 1869 * no resources. The vMSC properties have already been built. 1870 */ 1871 static void mpam_enable_init_class_features(struct mpam_class *class) 1872 { 1873 struct mpam_vmsc *vmsc; 1874 struct mpam_component *comp; 1875 1876 comp = list_first_entry(&class->components, 1877 struct mpam_component, class_list); 1878 vmsc = list_first_entry(&comp->vmsc, 1879 struct mpam_vmsc, comp_list); 1880 1881 class->props = vmsc->props; 1882 } 1883 1884 static void mpam_enable_merge_vmsc_features(struct mpam_component *comp) 1885 { 1886 struct mpam_vmsc *vmsc; 1887 struct mpam_msc_ris *ris; 1888 struct mpam_class *class = comp->class; 1889 1890 list_for_each_entry(vmsc, &comp->vmsc, comp_list) { 1891 list_for_each_entry(ris, &vmsc->ris, vmsc_list) { 1892 __vmsc_props_mismatch(vmsc, ris); 1893 class->nrdy_usec = max(class->nrdy_usec, 1894 vmsc->msc->nrdy_usec); 1895 } 1896 } 1897 } 1898 1899 static void mpam_enable_merge_class_features(struct mpam_component *comp) 1900 { 1901 struct mpam_vmsc *vmsc; 1902 struct mpam_class *class = comp->class; 1903 1904 list_for_each_entry(vmsc, &comp->vmsc, comp_list) 1905 __class_props_mismatch(class, vmsc); 1906 } 1907 1908 /* 1909 * Merge all the common resource features into class. 1910 * vmsc features are bitwise-or'd together by mpam_enable_merge_vmsc_features() 1911 * as the first step so that mpam_enable_init_class_features() can initialise 1912 * the class with a representative set of features. 1913 * Next the mpam_enable_merge_class_features() bitwise-and's all the vmsc 1914 * features to form the class features. 1915 * Other features are the min/max as appropriate. 1916 * 1917 * To avoid walking the whole tree twice, the class->nrdy_usec property is 1918 * updated when working with the vmsc as it is a max(), and doesn't need 1919 * initialising first. 1920 */ 1921 static void mpam_enable_merge_features(struct list_head *all_classes_list) 1922 { 1923 struct mpam_class *class; 1924 struct mpam_component *comp; 1925 1926 lockdep_assert_held(&mpam_list_lock); 1927 1928 list_for_each_entry(class, all_classes_list, classes_list) { 1929 list_for_each_entry(comp, &class->components, class_list) 1930 mpam_enable_merge_vmsc_features(comp); 1931 1932 mpam_enable_init_class_features(class); 1933 1934 list_for_each_entry(comp, &class->components, class_list) 1935 mpam_enable_merge_class_features(comp); 1936 } 1937 } 1938 1939 static char *mpam_errcode_names[16] = { 1940 [MPAM_ERRCODE_NONE] = "No error", 1941 [MPAM_ERRCODE_PARTID_SEL_RANGE] = "PARTID_SEL_Range", 1942 [MPAM_ERRCODE_REQ_PARTID_RANGE] = "Req_PARTID_Range", 1943 [MPAM_ERRCODE_MSMONCFG_ID_RANGE] = "MSMONCFG_ID_RANGE", 1944 [MPAM_ERRCODE_REQ_PMG_RANGE] = "Req_PMG_Range", 1945 [MPAM_ERRCODE_MONITOR_RANGE] = "Monitor_Range", 1946 [MPAM_ERRCODE_INTPARTID_RANGE] = "intPARTID_Range", 1947 [MPAM_ERRCODE_UNEXPECTED_INTERNAL] = "Unexpected_INTERNAL", 1948 [MPAM_ERRCODE_UNDEFINED_RIS_PART_SEL] = "Undefined_RIS_PART_SEL", 1949 [MPAM_ERRCODE_RIS_NO_CONTROL] = "RIS_No_Control", 1950 [MPAM_ERRCODE_UNDEFINED_RIS_MON_SEL] = "Undefined_RIS_MON_SEL", 1951 [MPAM_ERRCODE_RIS_NO_MONITOR] = "RIS_No_Monitor", 1952 [12 ... 15] = "Reserved" 1953 }; 1954 1955 static int mpam_enable_msc_ecr(void *_msc) 1956 { 1957 struct mpam_msc *msc = _msc; 1958 1959 __mpam_write_reg(msc, MPAMF_ECR, MPAMF_ECR_INTEN); 1960 1961 return 0; 1962 } 1963 1964 /* This can run in mpam_disable(), and the interrupt handler on the same CPU */ 1965 static int mpam_disable_msc_ecr(void *_msc) 1966 { 1967 struct mpam_msc *msc = _msc; 1968 1969 __mpam_write_reg(msc, MPAMF_ECR, 0); 1970 1971 return 0; 1972 } 1973 1974 static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) 1975 { 1976 u64 reg; 1977 u16 partid; 1978 u8 errcode, pmg, ris; 1979 1980 if (WARN_ON_ONCE(!msc) || 1981 WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), 1982 &msc->accessibility))) 1983 return IRQ_NONE; 1984 1985 reg = mpam_msc_read_esr(msc); 1986 1987 errcode = FIELD_GET(MPAMF_ESR_ERRCODE, reg); 1988 if (!errcode) 1989 return IRQ_NONE; 1990 1991 /* Clear level triggered irq */ 1992 mpam_msc_clear_esr(msc); 1993 1994 partid = FIELD_GET(MPAMF_ESR_PARTID_MON, reg); 1995 pmg = FIELD_GET(MPAMF_ESR_PMG, reg); 1996 ris = FIELD_GET(MPAMF_ESR_RIS, reg); 1997 1998 pr_err_ratelimited("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", 1999 msc->id, mpam_errcode_names[errcode], partid, pmg, 2000 ris); 2001 2002 /* Disable this interrupt. */ 2003 mpam_disable_msc_ecr(msc); 2004 2005 /* Are we racing with the thread disabling MPAM? */ 2006 if (!mpam_is_enabled()) 2007 return IRQ_HANDLED; 2008 2009 /* 2010 * Schedule the teardown work. Don't use a threaded IRQ as we can't 2011 * unregister the interrupt from the threaded part of the handler. 2012 */ 2013 mpam_disable_reason = "hardware error interrupt"; 2014 schedule_work(&mpam_broken_work); 2015 2016 return IRQ_HANDLED; 2017 } 2018 2019 static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) 2020 { 2021 struct mpam_msc *msc = *(struct mpam_msc **)dev_id; 2022 2023 return __mpam_irq_handler(irq, msc); 2024 } 2025 2026 static irqreturn_t mpam_spi_handler(int irq, void *dev_id) 2027 { 2028 struct mpam_msc *msc = dev_id; 2029 2030 return __mpam_irq_handler(irq, msc); 2031 } 2032 2033 static int mpam_register_irqs(void) 2034 { 2035 int err, irq; 2036 struct mpam_msc *msc; 2037 2038 lockdep_assert_cpus_held(); 2039 2040 guard(srcu)(&mpam_srcu); 2041 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 2042 srcu_read_lock_held(&mpam_srcu)) { 2043 irq = platform_get_irq_byname_optional(msc->pdev, "error"); 2044 if (irq <= 0) 2045 continue; 2046 2047 /* The MPAM spec says the interrupt can be SPI, PPI or LPI */ 2048 /* We anticipate sharing the interrupt with other MSCs */ 2049 if (irq_is_percpu(irq)) { 2050 err = request_percpu_irq(irq, &mpam_ppi_handler, 2051 "mpam:msc:error", 2052 msc->error_dev_id); 2053 if (err) 2054 return err; 2055 2056 msc->reenable_error_ppi = irq; 2057 smp_call_function_many(&msc->accessibility, 2058 &_enable_percpu_irq, &irq, 2059 true); 2060 } else { 2061 err = devm_request_irq(&msc->pdev->dev, irq, 2062 &mpam_spi_handler, IRQF_SHARED, 2063 "mpam:msc:error", msc); 2064 if (err) 2065 return err; 2066 } 2067 2068 mutex_lock(&msc->error_irq_lock); 2069 msc->error_irq_req = true; 2070 mpam_touch_msc(msc, mpam_enable_msc_ecr, msc); 2071 msc->error_irq_hw_enabled = true; 2072 mutex_unlock(&msc->error_irq_lock); 2073 } 2074 2075 return 0; 2076 } 2077 2078 static void mpam_unregister_irqs(void) 2079 { 2080 int irq; 2081 struct mpam_msc *msc; 2082 2083 guard(cpus_read_lock)(); 2084 guard(srcu)(&mpam_srcu); 2085 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 2086 srcu_read_lock_held(&mpam_srcu)) { 2087 irq = platform_get_irq_byname_optional(msc->pdev, "error"); 2088 if (irq <= 0) 2089 continue; 2090 2091 mutex_lock(&msc->error_irq_lock); 2092 if (msc->error_irq_hw_enabled) { 2093 mpam_touch_msc(msc, mpam_disable_msc_ecr, msc); 2094 msc->error_irq_hw_enabled = false; 2095 } 2096 2097 if (msc->error_irq_req) { 2098 if (irq_is_percpu(irq)) { 2099 msc->reenable_error_ppi = 0; 2100 free_percpu_irq(irq, msc->error_dev_id); 2101 } else { 2102 devm_free_irq(&msc->pdev->dev, irq, msc); 2103 } 2104 msc->error_irq_req = false; 2105 } 2106 mutex_unlock(&msc->error_irq_lock); 2107 } 2108 } 2109 2110 static void __destroy_component_cfg(struct mpam_component *comp) 2111 { 2112 add_to_garbage(comp->cfg); 2113 } 2114 2115 static void mpam_reset_component_cfg(struct mpam_component *comp) 2116 { 2117 int i; 2118 struct mpam_props *cprops = &comp->class->props; 2119 2120 mpam_assert_partid_sizes_fixed(); 2121 2122 if (!comp->cfg) 2123 return; 2124 2125 for (i = 0; i <= mpam_partid_max; i++) { 2126 comp->cfg[i] = (struct mpam_config) {}; 2127 if (cprops->cpbm_wd) 2128 comp->cfg[i].cpbm = GENMASK(cprops->cpbm_wd - 1, 0); 2129 if (cprops->mbw_pbm_bits) 2130 comp->cfg[i].mbw_pbm = GENMASK(cprops->mbw_pbm_bits - 1, 0); 2131 if (cprops->bwa_wd) 2132 comp->cfg[i].mbw_max = GENMASK(15, 16 - cprops->bwa_wd); 2133 } 2134 } 2135 2136 static int __allocate_component_cfg(struct mpam_component *comp) 2137 { 2138 mpam_assert_partid_sizes_fixed(); 2139 2140 if (comp->cfg) 2141 return 0; 2142 2143 comp->cfg = kcalloc(mpam_partid_max + 1, sizeof(*comp->cfg), GFP_KERNEL); 2144 if (!comp->cfg) 2145 return -ENOMEM; 2146 2147 /* 2148 * The array is free()d in one go, so only cfg[0]'s structure needs 2149 * to be initialised. 2150 */ 2151 init_garbage(&comp->cfg[0].garbage); 2152 2153 mpam_reset_component_cfg(comp); 2154 2155 return 0; 2156 } 2157 2158 static int mpam_allocate_config(void) 2159 { 2160 struct mpam_class *class; 2161 struct mpam_component *comp; 2162 2163 lockdep_assert_held(&mpam_list_lock); 2164 2165 list_for_each_entry(class, &mpam_classes, classes_list) { 2166 list_for_each_entry(comp, &class->components, class_list) { 2167 int err = __allocate_component_cfg(comp); 2168 if (err) 2169 return err; 2170 } 2171 } 2172 2173 return 0; 2174 } 2175 2176 static void mpam_enable_once(void) 2177 { 2178 int err; 2179 2180 /* 2181 * Once the cpuhp callbacks have been changed, mpam_partid_max can no 2182 * longer change. 2183 */ 2184 spin_lock(&partid_max_lock); 2185 partid_max_published = true; 2186 spin_unlock(&partid_max_lock); 2187 2188 /* 2189 * If all the MSC have been probed, enabling the IRQs happens next. 2190 * That involves cross-calling to a CPU that can reach the MSC, and 2191 * the locks must be taken in this order: 2192 */ 2193 cpus_read_lock(); 2194 mutex_lock(&mpam_list_lock); 2195 do { 2196 mpam_enable_merge_features(&mpam_classes); 2197 2198 err = mpam_register_irqs(); 2199 if (err) { 2200 pr_warn("Failed to register irqs: %d\n", err); 2201 break; 2202 } 2203 2204 err = mpam_allocate_config(); 2205 if (err) { 2206 pr_err("Failed to allocate configuration arrays.\n"); 2207 break; 2208 } 2209 } while (0); 2210 mutex_unlock(&mpam_list_lock); 2211 cpus_read_unlock(); 2212 2213 if (err) { 2214 mpam_disable_reason = "Failed to enable."; 2215 schedule_work(&mpam_broken_work); 2216 return; 2217 } 2218 2219 static_branch_enable(&mpam_enabled); 2220 mpam_register_cpuhp_callbacks(mpam_cpu_online, mpam_cpu_offline, 2221 "mpam:online"); 2222 2223 /* Use printk() to avoid the pr_fmt adding the function name. */ 2224 printk(KERN_INFO "MPAM enabled with %u PARTIDs and %u PMGs\n", 2225 mpam_partid_max + 1, mpam_pmg_max + 1); 2226 } 2227 2228 static void mpam_reset_component_locked(struct mpam_component *comp) 2229 { 2230 struct mpam_vmsc *vmsc; 2231 2232 lockdep_assert_cpus_held(); 2233 mpam_assert_partid_sizes_fixed(); 2234 2235 mpam_reset_component_cfg(comp); 2236 2237 guard(srcu)(&mpam_srcu); 2238 list_for_each_entry_srcu(vmsc, &comp->vmsc, comp_list, 2239 srcu_read_lock_held(&mpam_srcu)) { 2240 struct mpam_msc *msc = vmsc->msc; 2241 struct mpam_msc_ris *ris; 2242 2243 list_for_each_entry_srcu(ris, &vmsc->ris, vmsc_list, 2244 srcu_read_lock_held(&mpam_srcu)) { 2245 if (!ris->in_reset_state) 2246 mpam_touch_msc(msc, mpam_reset_ris, ris); 2247 ris->in_reset_state = true; 2248 } 2249 } 2250 } 2251 2252 static void mpam_reset_class_locked(struct mpam_class *class) 2253 { 2254 struct mpam_component *comp; 2255 2256 lockdep_assert_cpus_held(); 2257 2258 guard(srcu)(&mpam_srcu); 2259 list_for_each_entry_srcu(comp, &class->components, class_list, 2260 srcu_read_lock_held(&mpam_srcu)) 2261 mpam_reset_component_locked(comp); 2262 } 2263 2264 static void mpam_reset_class(struct mpam_class *class) 2265 { 2266 cpus_read_lock(); 2267 mpam_reset_class_locked(class); 2268 cpus_read_unlock(); 2269 } 2270 2271 /* 2272 * Called in response to an error IRQ. 2273 * All of MPAMs errors indicate a software bug, restore any modified 2274 * controls to their reset values. 2275 */ 2276 void mpam_disable(struct work_struct *ignored) 2277 { 2278 int idx; 2279 struct mpam_class *class; 2280 struct mpam_msc *msc, *tmp; 2281 2282 mutex_lock(&mpam_cpuhp_state_lock); 2283 if (mpam_cpuhp_state) { 2284 cpuhp_remove_state(mpam_cpuhp_state); 2285 mpam_cpuhp_state = 0; 2286 } 2287 mutex_unlock(&mpam_cpuhp_state_lock); 2288 2289 static_branch_disable(&mpam_enabled); 2290 2291 mpam_unregister_irqs(); 2292 2293 idx = srcu_read_lock(&mpam_srcu); 2294 list_for_each_entry_srcu(class, &mpam_classes, classes_list, 2295 srcu_read_lock_held(&mpam_srcu)) 2296 mpam_reset_class(class); 2297 srcu_read_unlock(&mpam_srcu, idx); 2298 2299 mutex_lock(&mpam_list_lock); 2300 list_for_each_entry_safe(msc, tmp, &mpam_all_msc, all_msc_list) 2301 mpam_msc_destroy(msc); 2302 mutex_unlock(&mpam_list_lock); 2303 mpam_free_garbage(); 2304 2305 pr_err_once("MPAM disabled due to %s\n", mpam_disable_reason); 2306 } 2307 2308 /* 2309 * Enable mpam once all devices have been probed. 2310 * Scheduled by mpam_discovery_cpu_online() once all devices have been created. 2311 * Also scheduled when new devices are probed when new CPUs come online. 2312 */ 2313 void mpam_enable(struct work_struct *work) 2314 { 2315 static atomic_t once; 2316 struct mpam_msc *msc; 2317 bool all_devices_probed = true; 2318 2319 /* Have we probed all the hw devices? */ 2320 guard(srcu)(&mpam_srcu); 2321 list_for_each_entry_srcu(msc, &mpam_all_msc, all_msc_list, 2322 srcu_read_lock_held(&mpam_srcu)) { 2323 mutex_lock(&msc->probe_lock); 2324 if (!msc->probed) 2325 all_devices_probed = false; 2326 mutex_unlock(&msc->probe_lock); 2327 2328 if (!all_devices_probed) 2329 break; 2330 } 2331 2332 if (all_devices_probed && !atomic_fetch_inc(&once)) 2333 mpam_enable_once(); 2334 } 2335 2336 #define maybe_update_config(cfg, feature, newcfg, member, changes) do { \ 2337 if (mpam_has_feature(feature, newcfg) && \ 2338 (newcfg)->member != (cfg)->member) { \ 2339 (cfg)->member = (newcfg)->member; \ 2340 mpam_set_feature(feature, cfg); \ 2341 \ 2342 (changes) = true; \ 2343 } \ 2344 } while (0) 2345 2346 static bool mpam_update_config(struct mpam_config *cfg, 2347 const struct mpam_config *newcfg) 2348 { 2349 bool has_changes = false; 2350 2351 maybe_update_config(cfg, mpam_feat_cpor_part, newcfg, cpbm, has_changes); 2352 maybe_update_config(cfg, mpam_feat_mbw_part, newcfg, mbw_pbm, has_changes); 2353 maybe_update_config(cfg, mpam_feat_mbw_max, newcfg, mbw_max, has_changes); 2354 2355 return has_changes; 2356 } 2357 2358 int mpam_apply_config(struct mpam_component *comp, u16 partid, 2359 struct mpam_config *cfg) 2360 { 2361 struct mpam_write_config_arg arg; 2362 struct mpam_msc_ris *ris; 2363 struct mpam_vmsc *vmsc; 2364 struct mpam_msc *msc; 2365 2366 lockdep_assert_cpus_held(); 2367 2368 /* Don't pass in the current config! */ 2369 WARN_ON_ONCE(&comp->cfg[partid] == cfg); 2370 2371 if (!mpam_update_config(&comp->cfg[partid], cfg)) 2372 return 0; 2373 2374 arg.comp = comp; 2375 arg.partid = partid; 2376 2377 guard(srcu)(&mpam_srcu); 2378 list_for_each_entry_srcu(vmsc, &comp->vmsc, comp_list, 2379 srcu_read_lock_held(&mpam_srcu)) { 2380 msc = vmsc->msc; 2381 2382 mutex_lock(&msc->cfg_lock); 2383 list_for_each_entry_srcu(ris, &vmsc->ris, vmsc_list, 2384 srcu_read_lock_held(&mpam_srcu)) { 2385 arg.ris = ris; 2386 mpam_touch_msc(msc, __write_config, &arg); 2387 } 2388 mutex_unlock(&msc->cfg_lock); 2389 } 2390 2391 return 0; 2392 } 2393 2394 static int __init mpam_msc_driver_init(void) 2395 { 2396 if (!system_supports_mpam()) 2397 return -EOPNOTSUPP; 2398 2399 init_srcu_struct(&mpam_srcu); 2400 2401 fw_num_msc = acpi_mpam_count_msc(); 2402 if (fw_num_msc <= 0) { 2403 pr_err("No MSC devices found in firmware\n"); 2404 return -EINVAL; 2405 } 2406 2407 return platform_driver_register(&mpam_msc_driver); 2408 } 2409 2410 /* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ 2411 subsys_initcall(mpam_msc_driver_init); 2412