1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2025 Arm Ltd. 3 4 /* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ 5 6 #define pr_fmt(fmt) "ACPI MPAM: " fmt 7 8 #include <linux/acpi.h> 9 #include <linux/arm_mpam.h> 10 #include <linux/bits.h> 11 #include <linux/cpu.h> 12 #include <linux/cpumask.h> 13 #include <linux/platform_device.h> 14 15 #include <acpi/processor.h> 16 17 /* 18 * Flags for acpi_table_mpam_msc.*_interrupt_flags. 19 * See 2.1.1 Interrupt Flags, Table 5, of DEN0065B_MPAM_ACPI_3.0-bet. 20 */ 21 #define ACPI_MPAM_MSC_IRQ_MODE BIT(0) 22 #define ACPI_MPAM_MSC_IRQ_TYPE_MASK GENMASK(2, 1) 23 #define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0 24 #define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK BIT(3) 25 #define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR 0 26 #define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER 1 27 #define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID BIT(4) 28 29 /* 30 * Encodings for the MSC node body interface type field. 31 * See 2.1 MPAM MSC node, Table 4 of DEN0065B_MPAM_ACPI_3.0-bet. 32 */ 33 #define ACPI_MPAM_MSC_IFACE_MMIO 0x00 34 #define ACPI_MPAM_MSC_IFACE_PCC 0x0a 35 36 static bool _is_ppi_partition(u32 flags) 37 { 38 u32 aff_type, is_ppi; 39 bool ret; 40 41 is_ppi = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_VALID, flags); 42 if (!is_ppi) 43 return false; 44 45 aff_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK, flags); 46 ret = (aff_type == ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER); 47 if (ret) 48 pr_err_once("Partitioned interrupts not supported\n"); 49 50 return ret; 51 } 52 53 static int acpi_mpam_register_irq(struct platform_device *pdev, 54 u32 intid, u32 flags) 55 { 56 int irq; 57 u32 int_type; 58 int trigger; 59 60 if (!intid) 61 return -EINVAL; 62 63 if (_is_ppi_partition(flags)) 64 return -EINVAL; 65 66 trigger = FIELD_GET(ACPI_MPAM_MSC_IRQ_MODE, flags); 67 int_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_TYPE_MASK, flags); 68 if (int_type != ACPI_MPAM_MSC_IRQ_TYPE_WIRED) 69 return -EINVAL; 70 71 irq = acpi_register_gsi(&pdev->dev, intid, trigger, ACPI_ACTIVE_HIGH); 72 if (irq < 0) 73 pr_err_once("Failed to register interrupt 0x%x with ACPI\n", intid); 74 75 return irq; 76 } 77 78 static void acpi_mpam_parse_irqs(struct platform_device *pdev, 79 struct acpi_mpam_msc_node *tbl_msc, 80 struct resource *res, int *res_idx) 81 { 82 u32 flags, intid; 83 int irq; 84 85 intid = tbl_msc->overflow_interrupt; 86 flags = tbl_msc->overflow_interrupt_flags; 87 irq = acpi_mpam_register_irq(pdev, intid, flags); 88 if (irq > 0) 89 res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "overflow"); 90 91 intid = tbl_msc->error_interrupt; 92 flags = tbl_msc->error_interrupt_flags; 93 irq = acpi_mpam_register_irq(pdev, intid, flags); 94 if (irq > 0) 95 res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "error"); 96 } 97 98 static int acpi_mpam_parse_resource(struct mpam_msc *msc, 99 struct acpi_mpam_resource_node *res) 100 { 101 int level, nid; 102 u32 cache_id; 103 104 switch (res->locator_type) { 105 case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: 106 cache_id = res->locator.cache_locator.cache_reference; 107 level = find_acpi_cache_level_from_id(cache_id); 108 if (level <= 0) { 109 pr_err_once("Bad level (%d) for cache with id %u\n", level, cache_id); 110 return -EINVAL; 111 } 112 return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, 113 level, cache_id); 114 case ACPI_MPAM_LOCATION_TYPE_MEMORY: 115 nid = pxm_to_node(res->locator.memory_locator.proximity_domain); 116 if (nid == NUMA_NO_NODE) { 117 pr_debug("Bad proximity domain %lld, using node 0 instead\n", 118 res->locator.memory_locator.proximity_domain); 119 nid = 0; 120 } 121 return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, 122 MPAM_CLASS_ID_DEFAULT, nid); 123 default: 124 /* These get discovered later and are treated as unknown */ 125 return 0; 126 } 127 } 128 129 int acpi_mpam_parse_resources(struct mpam_msc *msc, 130 struct acpi_mpam_msc_node *tbl_msc) 131 { 132 int i, err; 133 char *ptr, *table_end; 134 struct acpi_mpam_resource_node *resource; 135 136 table_end = (char *)tbl_msc + tbl_msc->length; 137 ptr = (char *)(tbl_msc + 1); 138 for (i = 0; i < tbl_msc->num_resource_nodes; i++) { 139 u64 max_deps, remaining_table; 140 141 if (ptr + sizeof(*resource) > table_end) 142 return -EINVAL; 143 144 resource = (struct acpi_mpam_resource_node *)ptr; 145 146 remaining_table = table_end - ptr; 147 max_deps = remaining_table / sizeof(struct acpi_mpam_func_deps); 148 if (resource->num_functional_deps > max_deps) { 149 pr_debug("MSC has impossible number of functional dependencies\n"); 150 return -EINVAL; 151 } 152 153 err = acpi_mpam_parse_resource(msc, resource); 154 if (err) 155 return err; 156 157 ptr += sizeof(*resource); 158 ptr += resource->num_functional_deps * sizeof(struct acpi_mpam_func_deps); 159 } 160 161 return 0; 162 } 163 164 /* 165 * Creates the device power management link and returns true if the 166 * acpi id is valid and usable for cpu affinity. This is the case 167 * when the linked device is a processor or a processor container. 168 */ 169 static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc, 170 struct platform_device *pdev, 171 u32 *acpi_id) 172 { 173 char hid[sizeof(tbl_msc->hardware_id_linked_device) + 1] = { 0 }; 174 bool acpi_id_valid = false; 175 struct acpi_device *buddy; 176 char uid[11]; 177 int len; 178 179 memcpy(hid, &tbl_msc->hardware_id_linked_device, 180 sizeof(tbl_msc->hardware_id_linked_device)); 181 182 if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) { 183 *acpi_id = tbl_msc->instance_id_linked_device; 184 acpi_id_valid = true; 185 } 186 187 len = snprintf(uid, sizeof(uid), "%u", 188 tbl_msc->instance_id_linked_device); 189 if (len >= sizeof(uid)) { 190 pr_debug("Failed to convert uid of device for power management."); 191 return acpi_id_valid; 192 } 193 194 buddy = acpi_dev_get_first_match_dev(hid, uid, -1); 195 if (buddy) { 196 device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS); 197 acpi_dev_put(buddy); 198 } 199 200 return acpi_id_valid; 201 } 202 203 static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc, 204 enum mpam_msc_iface *iface) 205 { 206 switch (tbl_msc->interface_type) { 207 case ACPI_MPAM_MSC_IFACE_MMIO: 208 *iface = MPAM_IFACE_MMIO; 209 return 0; 210 case ACPI_MPAM_MSC_IFACE_PCC: 211 *iface = MPAM_IFACE_PCC; 212 return 0; 213 default: 214 return -EINVAL; 215 } 216 } 217 218 static struct platform_device * __init acpi_mpam_parse_msc(struct acpi_mpam_msc_node *tbl_msc) 219 { 220 struct platform_device *pdev __free(platform_device_put) = 221 platform_device_alloc("mpam_msc", tbl_msc->identifier); 222 int next_res = 0, next_prop = 0, err; 223 /* pcc, nrdy, affinity and a sentinel */ 224 struct property_entry props[4] = { 0 }; 225 /* mmio, 2xirq, no sentinel. */ 226 struct resource res[3] = { 0 }; 227 struct acpi_device *companion; 228 enum mpam_msc_iface iface; 229 char uid[16]; 230 u32 acpi_id; 231 232 if (!pdev) 233 return ERR_PTR(-ENOMEM); 234 235 /* Some power management is described in the namespace: */ 236 err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier); 237 if (err > 0 && err < sizeof(uid)) { 238 companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1); 239 if (companion) { 240 ACPI_COMPANION_SET(&pdev->dev, companion); 241 acpi_dev_put(companion); 242 } else { 243 pr_debug("MSC.%u: missing namespace entry\n", tbl_msc->identifier); 244 } 245 } 246 247 if (decode_interface_type(tbl_msc, &iface)) { 248 pr_debug("MSC.%u: unknown interface type\n", tbl_msc->identifier); 249 return ERR_PTR(-EINVAL); 250 } 251 252 if (iface == MPAM_IFACE_MMIO) { 253 res[next_res++] = DEFINE_RES_MEM_NAMED(tbl_msc->base_address, 254 tbl_msc->mmio_size, 255 "MPAM:MSC"); 256 } else if (iface == MPAM_IFACE_PCC) { 257 props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel", 258 tbl_msc->base_address); 259 } 260 261 acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res); 262 263 WARN_ON_ONCE(next_res > ARRAY_SIZE(res)); 264 err = platform_device_add_resources(pdev, res, next_res); 265 if (err) 266 return ERR_PTR(err); 267 268 props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us", 269 tbl_msc->max_nrdy_usec); 270 271 /* 272 * The MSC's CPU affinity is described via its linked power 273 * management device, but only if it points at a Processor or 274 * Processor Container. 275 */ 276 if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id)) 277 props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", acpi_id); 278 279 WARN_ON_ONCE(next_prop > ARRAY_SIZE(props) - 1); 280 err = device_create_managed_software_node(&pdev->dev, props, NULL); 281 if (err) 282 return ERR_PTR(err); 283 284 /* 285 * Stash the table entry for acpi_mpam_parse_resources() to discover 286 * what this MSC controls. 287 */ 288 err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length); 289 if (err) 290 return ERR_PTR(err); 291 292 err = platform_device_add(pdev); 293 if (err) 294 return ERR_PTR(err); 295 296 return_ptr(pdev); 297 } 298 299 static int __init acpi_mpam_parse(void) 300 { 301 char *table_end, *table_offset; 302 struct acpi_mpam_msc_node *tbl_msc; 303 struct platform_device *pdev; 304 305 if (acpi_disabled || !system_supports_mpam()) 306 return 0; 307 308 struct acpi_table_header *table __free(acpi_put_table) = 309 acpi_get_table_pointer(ACPI_SIG_MPAM, 0); 310 311 if (IS_ERR(table)) 312 return 0; 313 314 if (table->revision < 1) { 315 pr_debug("MPAM ACPI table revision %d not supported\n", table->revision); 316 return 0; 317 } 318 319 table_offset = (char *)(table + 1); 320 table_end = (char *)table + table->length; 321 322 while (table_offset < table_end) { 323 tbl_msc = (struct acpi_mpam_msc_node *)table_offset; 324 if (table_offset + sizeof(*tbl_msc) > table_end || 325 table_offset + tbl_msc->length > table_end) { 326 pr_err("MSC entry overlaps end of ACPI table\n"); 327 return -EINVAL; 328 } 329 table_offset += tbl_msc->length; 330 331 /* 332 * If any of the reserved fields are set, make no attempt to 333 * parse the MSC structure. This MSC will still be counted by 334 * acpi_mpam_count_msc(), meaning the MPAM driver can't probe 335 * against all MSC, and will never be enabled. There is no way 336 * to enable it safely, because we cannot determine safe 337 * system-wide partid and pmg ranges in this situation. 338 */ 339 if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) { 340 pr_err_once("Unrecognised MSC, MPAM not usable\n"); 341 pr_debug("MSC.%u: reserved field set\n", tbl_msc->identifier); 342 continue; 343 } 344 345 if (!tbl_msc->mmio_size) { 346 pr_debug("MSC.%u: marked as disabled\n", tbl_msc->identifier); 347 continue; 348 } 349 350 pdev = acpi_mpam_parse_msc(tbl_msc); 351 if (IS_ERR(pdev)) 352 return PTR_ERR(pdev); 353 } 354 355 return 0; 356 } 357 358 /** 359 * acpi_mpam_count_msc() - Count the number of MSC described by firmware. 360 * 361 * Returns the number of MSCs, or zero for an error. 362 * 363 * This can be called before or in parallel with acpi_mpam_parse(). 364 */ 365 int acpi_mpam_count_msc(void) 366 { 367 char *table_end, *table_offset; 368 struct acpi_mpam_msc_node *tbl_msc; 369 int count = 0; 370 371 if (acpi_disabled || !system_supports_mpam()) 372 return 0; 373 374 struct acpi_table_header *table __free(acpi_put_table) = 375 acpi_get_table_pointer(ACPI_SIG_MPAM, 0); 376 377 if (IS_ERR(table)) 378 return 0; 379 380 if (table->revision < 1) 381 return 0; 382 383 table_offset = (char *)(table + 1); 384 table_end = (char *)table + table->length; 385 386 while (table_offset < table_end) { 387 tbl_msc = (struct acpi_mpam_msc_node *)table_offset; 388 389 if (table_offset + sizeof(*tbl_msc) > table_end) 390 return -EINVAL; 391 if (tbl_msc->length < sizeof(*tbl_msc)) 392 return -EINVAL; 393 if (tbl_msc->length > table_end - table_offset) 394 return -EINVAL; 395 table_offset += tbl_msc->length; 396 397 if (!tbl_msc->mmio_size) 398 continue; 399 400 count++; 401 } 402 403 return count; 404 } 405 406 /* 407 * Call after ACPI devices have been created, which happens behind acpi_scan_init() 408 * called from subsys_initcall(). PCC requires the mailbox driver, which is 409 * initialised from postcore_initcall(). 410 */ 411 subsys_initcall_sync(acpi_mpam_parse); 412