1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * pseries Memory Hotplug infrastructure. 4 * 5 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation 6 */ 7 8 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt 9 10 #include <linux/of.h> 11 #include <linux/of_address.h> 12 #include <linux/memblock.h> 13 #include <linux/memory.h> 14 #include <linux/memory_hotplug.h> 15 #include <linux/slab.h> 16 17 #include <asm/firmware.h> 18 #include <asm/machdep.h> 19 #include <asm/prom.h> 20 #include <asm/sparsemem.h> 21 #include <asm/fadump.h> 22 #include <asm/drmem.h> 23 #include "pseries.h" 24 25 unsigned long pseries_memory_block_size(void) 26 { 27 struct device_node *np; 28 u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; 29 struct resource r; 30 31 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 32 if (np) { 33 int len; 34 int size_cells; 35 const __be32 *prop; 36 37 size_cells = of_n_size_cells(np); 38 39 prop = of_get_property(np, "ibm,lmb-size", &len); 40 if (prop && len >= size_cells * sizeof(__be32)) 41 memblock_size = of_read_number(prop, size_cells); 42 of_node_put(np); 43 44 } else if (machine_is(pseries)) { 45 /* This fallback really only applies to pseries */ 46 unsigned int memzero_size = 0; 47 48 np = of_find_node_by_path("/memory@0"); 49 if (np) { 50 if (!of_address_to_resource(np, 0, &r)) 51 memzero_size = resource_size(&r); 52 of_node_put(np); 53 } 54 55 if (memzero_size) { 56 /* We now know the size of memory@0, use this to find 57 * the first memoryblock and get its size. 58 */ 59 char buf[64]; 60 61 sprintf(buf, "/memory@%x", memzero_size); 62 np = of_find_node_by_path(buf); 63 if (np) { 64 if (!of_address_to_resource(np, 0, &r)) 65 memblock_size = resource_size(&r); 66 of_node_put(np); 67 } 68 } 69 } 70 return memblock_size; 71 } 72 73 static void dlpar_free_property(struct property *prop) 74 { 75 kfree(prop->name); 76 kfree(prop->value); 77 kfree(prop); 78 } 79 80 static struct property *dlpar_clone_property(struct property *prop, 81 u32 prop_size) 82 { 83 struct property *new_prop; 84 85 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); 86 if (!new_prop) 87 return NULL; 88 89 new_prop->name = kstrdup(prop->name, GFP_KERNEL); 90 new_prop->value = kzalloc(prop_size, GFP_KERNEL); 91 if (!new_prop->name || !new_prop->value) { 92 dlpar_free_property(new_prop); 93 return NULL; 94 } 95 96 memcpy(new_prop->value, prop->value, prop->length); 97 new_prop->length = prop_size; 98 99 of_property_set_flag(new_prop, OF_DYNAMIC); 100 return new_prop; 101 } 102 103 static bool find_aa_index(struct device_node *dr_node, 104 struct property *ala_prop, 105 const u32 *lmb_assoc, u32 *aa_index) 106 { 107 u32 *assoc_arrays, new_prop_size; 108 struct property *new_prop; 109 int aa_arrays, aa_array_entries, aa_array_sz; 110 int i, index; 111 112 /* 113 * The ibm,associativity-lookup-arrays property is defined to be 114 * a 32-bit value specifying the number of associativity arrays 115 * followed by a 32-bitvalue specifying the number of entries per 116 * array, followed by the associativity arrays. 117 */ 118 assoc_arrays = ala_prop->value; 119 120 aa_arrays = be32_to_cpu(assoc_arrays[0]); 121 aa_array_entries = be32_to_cpu(assoc_arrays[1]); 122 aa_array_sz = aa_array_entries * sizeof(u32); 123 124 for (i = 0; i < aa_arrays; i++) { 125 index = (i * aa_array_entries) + 2; 126 127 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz)) 128 continue; 129 130 *aa_index = i; 131 return true; 132 } 133 134 new_prop_size = ala_prop->length + aa_array_sz; 135 new_prop = dlpar_clone_property(ala_prop, new_prop_size); 136 if (!new_prop) 137 return false; 138 139 assoc_arrays = new_prop->value; 140 141 /* increment the number of entries in the lookup array */ 142 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); 143 144 /* copy the new associativity into the lookup array */ 145 index = aa_arrays * aa_array_entries + 2; 146 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); 147 148 of_update_property(dr_node, new_prop); 149 150 /* 151 * The associativity lookup array index for this lmb is 152 * number of entries - 1 since we added its associativity 153 * to the end of the lookup array. 154 */ 155 *aa_index = be32_to_cpu(assoc_arrays[0]) - 1; 156 return true; 157 } 158 159 static int update_lmb_associativity_index(struct drmem_lmb *lmb) 160 { 161 struct device_node *parent, *lmb_node, *dr_node; 162 struct property *ala_prop; 163 const u32 *lmb_assoc; 164 u32 aa_index; 165 bool found; 166 167 parent = of_find_node_by_path("/"); 168 if (!parent) 169 return -ENODEV; 170 171 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index), 172 parent); 173 of_node_put(parent); 174 if (!lmb_node) 175 return -EINVAL; 176 177 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL); 178 if (!lmb_assoc) { 179 dlpar_free_cc_nodes(lmb_node); 180 return -ENODEV; 181 } 182 183 update_numa_distance(lmb_node); 184 185 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 186 if (!dr_node) { 187 dlpar_free_cc_nodes(lmb_node); 188 return -ENODEV; 189 } 190 191 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays", 192 NULL); 193 if (!ala_prop) { 194 of_node_put(dr_node); 195 dlpar_free_cc_nodes(lmb_node); 196 return -ENODEV; 197 } 198 199 found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index); 200 201 of_node_put(dr_node); 202 dlpar_free_cc_nodes(lmb_node); 203 204 if (!found) { 205 pr_err("Could not find LMB associativity\n"); 206 return -1; 207 } 208 209 lmb->aa_index = aa_index; 210 return 0; 211 } 212 213 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb) 214 { 215 unsigned long section_nr; 216 struct mem_section *mem_sect; 217 struct memory_block *mem_block; 218 219 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); 220 mem_sect = __nr_to_section(section_nr); 221 222 mem_block = find_memory_block(mem_sect); 223 return mem_block; 224 } 225 226 static int get_lmb_range(u32 drc_index, int n_lmbs, 227 struct drmem_lmb **start_lmb, 228 struct drmem_lmb **end_lmb) 229 { 230 struct drmem_lmb *lmb, *start, *end; 231 struct drmem_lmb *limit; 232 233 start = NULL; 234 for_each_drmem_lmb(lmb) { 235 if (lmb->drc_index == drc_index) { 236 start = lmb; 237 break; 238 } 239 } 240 241 if (!start) 242 return -EINVAL; 243 244 end = &start[n_lmbs]; 245 246 limit = &drmem_info->lmbs[drmem_info->n_lmbs]; 247 if (end > limit) 248 return -EINVAL; 249 250 *start_lmb = start; 251 *end_lmb = end; 252 return 0; 253 } 254 255 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online) 256 { 257 struct memory_block *mem_block; 258 int rc; 259 260 mem_block = lmb_to_memblock(lmb); 261 if (!mem_block) 262 return -EINVAL; 263 264 if (online && mem_block->dev.offline) 265 rc = device_online(&mem_block->dev); 266 else if (!online && !mem_block->dev.offline) 267 rc = device_offline(&mem_block->dev); 268 else 269 rc = 0; 270 271 put_device(&mem_block->dev); 272 273 return rc; 274 } 275 276 static int dlpar_online_lmb(struct drmem_lmb *lmb) 277 { 278 return dlpar_change_lmb_state(lmb, true); 279 } 280 281 #ifdef CONFIG_MEMORY_HOTREMOVE 282 static int dlpar_offline_lmb(struct drmem_lmb *lmb) 283 { 284 return dlpar_change_lmb_state(lmb, false); 285 } 286 287 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size) 288 { 289 unsigned long block_sz, start_pfn; 290 int sections_per_block; 291 int i, nid; 292 293 start_pfn = base >> PAGE_SHIFT; 294 295 lock_device_hotplug(); 296 297 if (!pfn_valid(start_pfn)) 298 goto out; 299 300 block_sz = pseries_memory_block_size(); 301 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 302 nid = memory_add_physaddr_to_nid(base); 303 304 for (i = 0; i < sections_per_block; i++) { 305 __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); 306 base += MIN_MEMORY_BLOCK_SIZE; 307 } 308 309 out: 310 /* Update memory regions for memory remove */ 311 memblock_remove(base, memblock_size); 312 unlock_device_hotplug(); 313 return 0; 314 } 315 316 static int pseries_remove_mem_node(struct device_node *np) 317 { 318 const __be32 *prop; 319 unsigned long base; 320 unsigned long lmb_size; 321 int ret = -EINVAL; 322 int addr_cells, size_cells; 323 324 /* 325 * Check to see if we are actually removing memory 326 */ 327 if (!of_node_is_type(np, "memory")) 328 return 0; 329 330 /* 331 * Find the base address and size of the memblock 332 */ 333 prop = of_get_property(np, "reg", NULL); 334 if (!prop) 335 return ret; 336 337 addr_cells = of_n_addr_cells(np); 338 size_cells = of_n_size_cells(np); 339 340 /* 341 * "reg" property represents (addr,size) tuple. 342 */ 343 base = of_read_number(prop, addr_cells); 344 prop += addr_cells; 345 lmb_size = of_read_number(prop, size_cells); 346 347 pseries_remove_memblock(base, lmb_size); 348 return 0; 349 } 350 351 static bool lmb_is_removable(struct drmem_lmb *lmb) 352 { 353 if ((lmb->flags & DRCONF_MEM_RESERVED) || 354 !(lmb->flags & DRCONF_MEM_ASSIGNED)) 355 return false; 356 357 #ifdef CONFIG_FA_DUMP 358 /* 359 * Don't hot-remove memory that falls in fadump boot memory area 360 * and memory that is reserved for capturing old kernel memory. 361 */ 362 if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes())) 363 return false; 364 #endif 365 /* device_offline() will determine if we can actually remove this lmb */ 366 return true; 367 } 368 369 static int dlpar_add_lmb(struct drmem_lmb *); 370 371 static int dlpar_remove_lmb(struct drmem_lmb *lmb) 372 { 373 struct memory_block *mem_block; 374 unsigned long block_sz; 375 int rc; 376 377 if (!lmb_is_removable(lmb)) 378 return -EINVAL; 379 380 mem_block = lmb_to_memblock(lmb); 381 if (mem_block == NULL) 382 return -EINVAL; 383 384 rc = dlpar_offline_lmb(lmb); 385 if (rc) { 386 put_device(&mem_block->dev); 387 return rc; 388 } 389 390 block_sz = pseries_memory_block_size(); 391 392 __remove_memory(mem_block->nid, lmb->base_addr, block_sz); 393 put_device(&mem_block->dev); 394 395 /* Update memory regions for memory remove */ 396 memblock_remove(lmb->base_addr, block_sz); 397 398 invalidate_lmb_associativity_index(lmb); 399 lmb->flags &= ~DRCONF_MEM_ASSIGNED; 400 401 return 0; 402 } 403 404 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove) 405 { 406 struct drmem_lmb *lmb; 407 int lmbs_reserved = 0; 408 int lmbs_available = 0; 409 int rc; 410 411 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove); 412 413 if (lmbs_to_remove == 0) 414 return -EINVAL; 415 416 /* Validate that there are enough LMBs to satisfy the request */ 417 for_each_drmem_lmb(lmb) { 418 if (lmb_is_removable(lmb)) 419 lmbs_available++; 420 421 if (lmbs_available == lmbs_to_remove) 422 break; 423 } 424 425 if (lmbs_available < lmbs_to_remove) { 426 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n", 427 lmbs_available, lmbs_to_remove); 428 return -EINVAL; 429 } 430 431 for_each_drmem_lmb(lmb) { 432 rc = dlpar_remove_lmb(lmb); 433 if (rc) 434 continue; 435 436 /* Mark this lmb so we can add it later if all of the 437 * requested LMBs cannot be removed. 438 */ 439 drmem_mark_lmb_reserved(lmb); 440 441 lmbs_reserved++; 442 if (lmbs_reserved == lmbs_to_remove) 443 break; 444 } 445 446 if (lmbs_reserved != lmbs_to_remove) { 447 pr_err("Memory hot-remove failed, adding LMB's back\n"); 448 449 for_each_drmem_lmb(lmb) { 450 if (!drmem_lmb_reserved(lmb)) 451 continue; 452 453 rc = dlpar_add_lmb(lmb); 454 if (rc) 455 pr_err("Failed to add LMB back, drc index %x\n", 456 lmb->drc_index); 457 458 drmem_remove_lmb_reservation(lmb); 459 460 lmbs_reserved--; 461 if (lmbs_reserved == 0) 462 break; 463 } 464 465 rc = -EINVAL; 466 } else { 467 for_each_drmem_lmb(lmb) { 468 if (!drmem_lmb_reserved(lmb)) 469 continue; 470 471 dlpar_release_drc(lmb->drc_index); 472 pr_info("Memory at %llx was hot-removed\n", 473 lmb->base_addr); 474 475 drmem_remove_lmb_reservation(lmb); 476 477 lmbs_reserved--; 478 if (lmbs_reserved == 0) 479 break; 480 } 481 rc = 0; 482 } 483 484 return rc; 485 } 486 487 static int dlpar_memory_remove_by_index(u32 drc_index) 488 { 489 struct drmem_lmb *lmb; 490 int lmb_found; 491 int rc; 492 493 pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index); 494 495 lmb_found = 0; 496 for_each_drmem_lmb(lmb) { 497 if (lmb->drc_index == drc_index) { 498 lmb_found = 1; 499 rc = dlpar_remove_lmb(lmb); 500 if (!rc) 501 dlpar_release_drc(lmb->drc_index); 502 503 break; 504 } 505 } 506 507 if (!lmb_found) 508 rc = -EINVAL; 509 510 if (rc) 511 pr_debug("Failed to hot-remove memory at %llx\n", 512 lmb->base_addr); 513 else 514 pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr); 515 516 return rc; 517 } 518 519 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) 520 { 521 struct drmem_lmb *lmb, *start_lmb, *end_lmb; 522 int rc; 523 524 pr_info("Attempting to hot-remove %u LMB(s) at %x\n", 525 lmbs_to_remove, drc_index); 526 527 if (lmbs_to_remove == 0) 528 return -EINVAL; 529 530 rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb); 531 if (rc) 532 return -EINVAL; 533 534 /* 535 * Validate that all LMBs in range are not reserved. Note that it 536 * is ok if they are !ASSIGNED since our goal here is to remove the 537 * LMB range, regardless of whether some LMBs were already removed 538 * by any other reason. 539 * 540 * This is a contrast to what is done in remove_by_count() where we 541 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()), 542 * because we want to remove a fixed amount of LMBs in that function. 543 */ 544 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 545 if (lmb->flags & DRCONF_MEM_RESERVED) { 546 pr_err("Memory at %llx (drc index %x) is reserved\n", 547 lmb->base_addr, lmb->drc_index); 548 return -EINVAL; 549 } 550 } 551 552 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 553 /* 554 * dlpar_remove_lmb() will error out if the LMB is already 555 * !ASSIGNED, but this case is a no-op for us. 556 */ 557 if (!(lmb->flags & DRCONF_MEM_ASSIGNED)) 558 continue; 559 560 rc = dlpar_remove_lmb(lmb); 561 if (rc) 562 break; 563 564 drmem_mark_lmb_reserved(lmb); 565 } 566 567 if (rc) { 568 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n"); 569 570 571 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 572 if (!drmem_lmb_reserved(lmb)) 573 continue; 574 575 /* 576 * Setting the isolation state of an UNISOLATED/CONFIGURED 577 * device to UNISOLATE is a no-op, but the hypervisor can 578 * use it as a hint that the LMB removal failed. 579 */ 580 dlpar_unisolate_drc(lmb->drc_index); 581 582 rc = dlpar_add_lmb(lmb); 583 if (rc) 584 pr_err("Failed to add LMB, drc index %x\n", 585 lmb->drc_index); 586 587 drmem_remove_lmb_reservation(lmb); 588 } 589 rc = -EINVAL; 590 } else { 591 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 592 if (!drmem_lmb_reserved(lmb)) 593 continue; 594 595 dlpar_release_drc(lmb->drc_index); 596 pr_info("Memory at %llx (drc index %x) was hot-removed\n", 597 lmb->base_addr, lmb->drc_index); 598 599 drmem_remove_lmb_reservation(lmb); 600 } 601 } 602 603 return rc; 604 } 605 606 #else 607 static inline int pseries_remove_memblock(unsigned long base, 608 unsigned long memblock_size) 609 { 610 return -EOPNOTSUPP; 611 } 612 static inline int pseries_remove_mem_node(struct device_node *np) 613 { 614 return 0; 615 } 616 static int dlpar_remove_lmb(struct drmem_lmb *lmb) 617 { 618 return -EOPNOTSUPP; 619 } 620 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove) 621 { 622 return -EOPNOTSUPP; 623 } 624 static int dlpar_memory_remove_by_index(u32 drc_index) 625 { 626 return -EOPNOTSUPP; 627 } 628 629 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) 630 { 631 return -EOPNOTSUPP; 632 } 633 #endif /* CONFIG_MEMORY_HOTREMOVE */ 634 635 static int dlpar_add_lmb(struct drmem_lmb *lmb) 636 { 637 unsigned long block_sz; 638 int nid, rc; 639 640 if (lmb->flags & DRCONF_MEM_ASSIGNED) 641 return -EINVAL; 642 643 rc = update_lmb_associativity_index(lmb); 644 if (rc) { 645 dlpar_release_drc(lmb->drc_index); 646 return rc; 647 } 648 649 block_sz = memory_block_size_bytes(); 650 651 /* Find the node id for this LMB. Fake one if necessary. */ 652 nid = of_drconf_to_nid_single(lmb); 653 if (nid < 0 || !node_possible(nid)) 654 nid = first_online_node; 655 656 /* Add the memory */ 657 rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE); 658 if (rc) { 659 invalidate_lmb_associativity_index(lmb); 660 return rc; 661 } 662 663 rc = dlpar_online_lmb(lmb); 664 if (rc) { 665 __remove_memory(nid, lmb->base_addr, block_sz); 666 invalidate_lmb_associativity_index(lmb); 667 } else { 668 lmb->flags |= DRCONF_MEM_ASSIGNED; 669 } 670 671 return rc; 672 } 673 674 static int dlpar_memory_add_by_count(u32 lmbs_to_add) 675 { 676 struct drmem_lmb *lmb; 677 int lmbs_available = 0; 678 int lmbs_reserved = 0; 679 int rc; 680 681 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add); 682 683 if (lmbs_to_add == 0) 684 return -EINVAL; 685 686 /* Validate that there are enough LMBs to satisfy the request */ 687 for_each_drmem_lmb(lmb) { 688 if (lmb->flags & DRCONF_MEM_RESERVED) 689 continue; 690 691 if (!(lmb->flags & DRCONF_MEM_ASSIGNED)) 692 lmbs_available++; 693 694 if (lmbs_available == lmbs_to_add) 695 break; 696 } 697 698 if (lmbs_available < lmbs_to_add) 699 return -EINVAL; 700 701 for_each_drmem_lmb(lmb) { 702 if (lmb->flags & DRCONF_MEM_ASSIGNED) 703 continue; 704 705 rc = dlpar_acquire_drc(lmb->drc_index); 706 if (rc) 707 continue; 708 709 rc = dlpar_add_lmb(lmb); 710 if (rc) { 711 dlpar_release_drc(lmb->drc_index); 712 continue; 713 } 714 715 /* Mark this lmb so we can remove it later if all of the 716 * requested LMBs cannot be added. 717 */ 718 drmem_mark_lmb_reserved(lmb); 719 lmbs_reserved++; 720 if (lmbs_reserved == lmbs_to_add) 721 break; 722 } 723 724 if (lmbs_reserved != lmbs_to_add) { 725 pr_err("Memory hot-add failed, removing any added LMBs\n"); 726 727 for_each_drmem_lmb(lmb) { 728 if (!drmem_lmb_reserved(lmb)) 729 continue; 730 731 rc = dlpar_remove_lmb(lmb); 732 if (rc) 733 pr_err("Failed to remove LMB, drc index %x\n", 734 lmb->drc_index); 735 else 736 dlpar_release_drc(lmb->drc_index); 737 738 drmem_remove_lmb_reservation(lmb); 739 lmbs_reserved--; 740 741 if (lmbs_reserved == 0) 742 break; 743 } 744 rc = -EINVAL; 745 } else { 746 for_each_drmem_lmb(lmb) { 747 if (!drmem_lmb_reserved(lmb)) 748 continue; 749 750 pr_debug("Memory at %llx (drc index %x) was hot-added\n", 751 lmb->base_addr, lmb->drc_index); 752 drmem_remove_lmb_reservation(lmb); 753 lmbs_reserved--; 754 755 if (lmbs_reserved == 0) 756 break; 757 } 758 rc = 0; 759 } 760 761 return rc; 762 } 763 764 static int dlpar_memory_add_by_index(u32 drc_index) 765 { 766 struct drmem_lmb *lmb; 767 int rc, lmb_found; 768 769 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index); 770 771 lmb_found = 0; 772 for_each_drmem_lmb(lmb) { 773 if (lmb->drc_index == drc_index) { 774 lmb_found = 1; 775 rc = dlpar_acquire_drc(lmb->drc_index); 776 if (!rc) { 777 rc = dlpar_add_lmb(lmb); 778 if (rc) 779 dlpar_release_drc(lmb->drc_index); 780 } 781 782 break; 783 } 784 } 785 786 if (!lmb_found) 787 rc = -EINVAL; 788 789 if (rc) 790 pr_info("Failed to hot-add memory, drc index %x\n", drc_index); 791 else 792 pr_info("Memory at %llx (drc index %x) was hot-added\n", 793 lmb->base_addr, drc_index); 794 795 return rc; 796 } 797 798 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index) 799 { 800 struct drmem_lmb *lmb, *start_lmb, *end_lmb; 801 int rc; 802 803 pr_info("Attempting to hot-add %u LMB(s) at index %x\n", 804 lmbs_to_add, drc_index); 805 806 if (lmbs_to_add == 0) 807 return -EINVAL; 808 809 rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb); 810 if (rc) 811 return -EINVAL; 812 813 /* Validate that the LMBs in this range are not reserved */ 814 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 815 /* Fail immediately if the whole range can't be hot-added */ 816 if (lmb->flags & DRCONF_MEM_RESERVED) { 817 pr_err("Memory at %llx (drc index %x) is reserved\n", 818 lmb->base_addr, lmb->drc_index); 819 return -EINVAL; 820 } 821 } 822 823 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 824 if (lmb->flags & DRCONF_MEM_ASSIGNED) 825 continue; 826 827 rc = dlpar_acquire_drc(lmb->drc_index); 828 if (rc) 829 break; 830 831 rc = dlpar_add_lmb(lmb); 832 if (rc) { 833 dlpar_release_drc(lmb->drc_index); 834 break; 835 } 836 837 drmem_mark_lmb_reserved(lmb); 838 } 839 840 if (rc) { 841 pr_err("Memory indexed-count-add failed, removing any added LMBs\n"); 842 843 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 844 if (!drmem_lmb_reserved(lmb)) 845 continue; 846 847 rc = dlpar_remove_lmb(lmb); 848 if (rc) 849 pr_err("Failed to remove LMB, drc index %x\n", 850 lmb->drc_index); 851 else 852 dlpar_release_drc(lmb->drc_index); 853 854 drmem_remove_lmb_reservation(lmb); 855 } 856 rc = -EINVAL; 857 } else { 858 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) { 859 if (!drmem_lmb_reserved(lmb)) 860 continue; 861 862 pr_info("Memory at %llx (drc index %x) was hot-added\n", 863 lmb->base_addr, lmb->drc_index); 864 drmem_remove_lmb_reservation(lmb); 865 } 866 } 867 868 return rc; 869 } 870 871 int dlpar_memory(struct pseries_hp_errorlog *hp_elog) 872 { 873 u32 count, drc_index; 874 int rc; 875 876 lock_device_hotplug(); 877 878 switch (hp_elog->action) { 879 case PSERIES_HP_ELOG_ACTION_ADD: 880 switch (hp_elog->id_type) { 881 case PSERIES_HP_ELOG_ID_DRC_COUNT: 882 count = hp_elog->_drc_u.drc_count; 883 rc = dlpar_memory_add_by_count(count); 884 break; 885 case PSERIES_HP_ELOG_ID_DRC_INDEX: 886 drc_index = hp_elog->_drc_u.drc_index; 887 rc = dlpar_memory_add_by_index(drc_index); 888 break; 889 case PSERIES_HP_ELOG_ID_DRC_IC: 890 count = hp_elog->_drc_u.ic.count; 891 drc_index = hp_elog->_drc_u.ic.index; 892 rc = dlpar_memory_add_by_ic(count, drc_index); 893 break; 894 default: 895 rc = -EINVAL; 896 break; 897 } 898 899 break; 900 case PSERIES_HP_ELOG_ACTION_REMOVE: 901 switch (hp_elog->id_type) { 902 case PSERIES_HP_ELOG_ID_DRC_COUNT: 903 count = hp_elog->_drc_u.drc_count; 904 rc = dlpar_memory_remove_by_count(count); 905 break; 906 case PSERIES_HP_ELOG_ID_DRC_INDEX: 907 drc_index = hp_elog->_drc_u.drc_index; 908 rc = dlpar_memory_remove_by_index(drc_index); 909 break; 910 case PSERIES_HP_ELOG_ID_DRC_IC: 911 count = hp_elog->_drc_u.ic.count; 912 drc_index = hp_elog->_drc_u.ic.index; 913 rc = dlpar_memory_remove_by_ic(count, drc_index); 914 break; 915 default: 916 rc = -EINVAL; 917 break; 918 } 919 920 break; 921 default: 922 pr_err("Invalid action (%d) specified\n", hp_elog->action); 923 rc = -EINVAL; 924 break; 925 } 926 927 if (!rc) 928 rc = drmem_update_dt(); 929 930 unlock_device_hotplug(); 931 return rc; 932 } 933 934 static int pseries_add_mem_node(struct device_node *np) 935 { 936 const __be32 *prop; 937 unsigned long base; 938 unsigned long lmb_size; 939 int ret = -EINVAL; 940 int addr_cells, size_cells; 941 942 /* 943 * Check to see if we are actually adding memory 944 */ 945 if (!of_node_is_type(np, "memory")) 946 return 0; 947 948 /* 949 * Find the base and size of the memblock 950 */ 951 prop = of_get_property(np, "reg", NULL); 952 if (!prop) 953 return ret; 954 955 addr_cells = of_n_addr_cells(np); 956 size_cells = of_n_size_cells(np); 957 /* 958 * "reg" property represents (addr,size) tuple. 959 */ 960 base = of_read_number(prop, addr_cells); 961 prop += addr_cells; 962 lmb_size = of_read_number(prop, size_cells); 963 964 /* 965 * Update memory region to represent the memory add 966 */ 967 ret = memblock_add(base, lmb_size); 968 return (ret < 0) ? -EINVAL : 0; 969 } 970 971 static int pseries_memory_notifier(struct notifier_block *nb, 972 unsigned long action, void *data) 973 { 974 struct of_reconfig_data *rd = data; 975 int err = 0; 976 977 switch (action) { 978 case OF_RECONFIG_ATTACH_NODE: 979 err = pseries_add_mem_node(rd->dn); 980 break; 981 case OF_RECONFIG_DETACH_NODE: 982 err = pseries_remove_mem_node(rd->dn); 983 break; 984 case OF_RECONFIG_UPDATE_PROPERTY: 985 if (!strcmp(rd->dn->name, 986 "ibm,dynamic-reconfiguration-memory")) 987 drmem_update_lmbs(rd->prop); 988 } 989 return notifier_from_errno(err); 990 } 991 992 static struct notifier_block pseries_mem_nb = { 993 .notifier_call = pseries_memory_notifier, 994 }; 995 996 static int __init pseries_memory_hotplug_init(void) 997 { 998 if (firmware_has_feature(FW_FEATURE_LPAR)) 999 of_reconfig_notifier_register(&pseries_mem_nb); 1000 1001 return 0; 1002 } 1003 machine_device_initcall(pseries, pseries_memory_hotplug_init); 1004