1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Basic Node interface support 4 */ 5 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/memory.h> 10 #include <linux/vmstat.h> 11 #include <linux/notifier.h> 12 #include <linux/node.h> 13 #include <linux/hugetlb.h> 14 #include <linux/compaction.h> 15 #include <linux/cpumask.h> 16 #include <linux/topology.h> 17 #include <linux/nodemask.h> 18 #include <linux/cpu.h> 19 #include <linux/device.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/swap.h> 22 #include <linux/slab.h> 23 24 static struct bus_type node_subsys = { 25 .name = "node", 26 .dev_name = "node", 27 }; 28 29 30 static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) 31 { 32 ssize_t n; 33 cpumask_var_t mask; 34 struct node *node_dev = to_node(dev); 35 36 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ 37 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); 38 39 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 40 return 0; 41 42 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); 43 n = cpumap_print_to_pagebuf(list, buf, mask); 44 free_cpumask_var(mask); 45 46 return n; 47 } 48 49 static inline ssize_t cpumap_show(struct device *dev, 50 struct device_attribute *attr, 51 char *buf) 52 { 53 return node_read_cpumap(dev, false, buf); 54 } 55 56 static DEVICE_ATTR_RO(cpumap); 57 58 static inline ssize_t cpulist_show(struct device *dev, 59 struct device_attribute *attr, 60 char *buf) 61 { 62 return node_read_cpumap(dev, true, buf); 63 } 64 65 static DEVICE_ATTR_RO(cpulist); 66 67 /** 68 * struct node_access_nodes - Access class device to hold user visible 69 * relationships to other nodes. 70 * @dev: Device for this memory access class 71 * @list_node: List element in the node's access list 72 * @access: The access class rank 73 * @hmem_attrs: Heterogeneous memory performance attributes 74 */ 75 struct node_access_nodes { 76 struct device dev; 77 struct list_head list_node; 78 unsigned access; 79 #ifdef CONFIG_HMEM_REPORTING 80 struct node_hmem_attrs hmem_attrs; 81 #endif 82 }; 83 #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) 84 85 static struct attribute *node_init_access_node_attrs[] = { 86 NULL, 87 }; 88 89 static struct attribute *node_targ_access_node_attrs[] = { 90 NULL, 91 }; 92 93 static const struct attribute_group initiators = { 94 .name = "initiators", 95 .attrs = node_init_access_node_attrs, 96 }; 97 98 static const struct attribute_group targets = { 99 .name = "targets", 100 .attrs = node_targ_access_node_attrs, 101 }; 102 103 static const struct attribute_group *node_access_node_groups[] = { 104 &initiators, 105 &targets, 106 NULL, 107 }; 108 109 static void node_remove_accesses(struct node *node) 110 { 111 struct node_access_nodes *c, *cnext; 112 113 list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { 114 list_del(&c->list_node); 115 device_unregister(&c->dev); 116 } 117 } 118 119 static void node_access_release(struct device *dev) 120 { 121 kfree(to_access_nodes(dev)); 122 } 123 124 static struct node_access_nodes *node_init_node_access(struct node *node, 125 unsigned access) 126 { 127 struct node_access_nodes *access_node; 128 struct device *dev; 129 130 list_for_each_entry(access_node, &node->access_list, list_node) 131 if (access_node->access == access) 132 return access_node; 133 134 access_node = kzalloc(sizeof(*access_node), GFP_KERNEL); 135 if (!access_node) 136 return NULL; 137 138 access_node->access = access; 139 dev = &access_node->dev; 140 dev->parent = &node->dev; 141 dev->release = node_access_release; 142 dev->groups = node_access_node_groups; 143 if (dev_set_name(dev, "access%u", access)) 144 goto free; 145 146 if (device_register(dev)) 147 goto free_name; 148 149 pm_runtime_no_callbacks(dev); 150 list_add_tail(&access_node->list_node, &node->access_list); 151 return access_node; 152 free_name: 153 kfree_const(dev->kobj.name); 154 free: 155 kfree(access_node); 156 return NULL; 157 } 158 159 #ifdef CONFIG_HMEM_REPORTING 160 #define ACCESS_ATTR(name) \ 161 static ssize_t name##_show(struct device *dev, \ 162 struct device_attribute *attr, \ 163 char *buf) \ 164 { \ 165 return sysfs_emit(buf, "%u\n", \ 166 to_access_nodes(dev)->hmem_attrs.name); \ 167 } \ 168 static DEVICE_ATTR_RO(name) 169 170 ACCESS_ATTR(read_bandwidth); 171 ACCESS_ATTR(read_latency); 172 ACCESS_ATTR(write_bandwidth); 173 ACCESS_ATTR(write_latency); 174 175 static struct attribute *access_attrs[] = { 176 &dev_attr_read_bandwidth.attr, 177 &dev_attr_read_latency.attr, 178 &dev_attr_write_bandwidth.attr, 179 &dev_attr_write_latency.attr, 180 NULL, 181 }; 182 183 /** 184 * node_set_perf_attrs - Set the performance values for given access class 185 * @nid: Node identifier to be set 186 * @hmem_attrs: Heterogeneous memory performance attributes 187 * @access: The access class the for the given attributes 188 */ 189 void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, 190 unsigned access) 191 { 192 struct node_access_nodes *c; 193 struct node *node; 194 int i; 195 196 if (WARN_ON_ONCE(!node_online(nid))) 197 return; 198 199 node = node_devices[nid]; 200 c = node_init_node_access(node, access); 201 if (!c) 202 return; 203 204 c->hmem_attrs = *hmem_attrs; 205 for (i = 0; access_attrs[i] != NULL; i++) { 206 if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], 207 "initiators")) { 208 pr_info("failed to add performance attribute to node %d\n", 209 nid); 210 break; 211 } 212 } 213 } 214 215 /** 216 * struct node_cache_info - Internal tracking for memory node caches 217 * @dev: Device represeting the cache level 218 * @node: List element for tracking in the node 219 * @cache_attrs:Attributes for this cache level 220 */ 221 struct node_cache_info { 222 struct device dev; 223 struct list_head node; 224 struct node_cache_attrs cache_attrs; 225 }; 226 #define to_cache_info(device) container_of(device, struct node_cache_info, dev) 227 228 #define CACHE_ATTR(name, fmt) \ 229 static ssize_t name##_show(struct device *dev, \ 230 struct device_attribute *attr, \ 231 char *buf) \ 232 { \ 233 return sysfs_emit(buf, fmt "\n", \ 234 to_cache_info(dev)->cache_attrs.name); \ 235 } \ 236 DEVICE_ATTR_RO(name); 237 238 CACHE_ATTR(size, "%llu") 239 CACHE_ATTR(line_size, "%u") 240 CACHE_ATTR(indexing, "%u") 241 CACHE_ATTR(write_policy, "%u") 242 243 static struct attribute *cache_attrs[] = { 244 &dev_attr_indexing.attr, 245 &dev_attr_size.attr, 246 &dev_attr_line_size.attr, 247 &dev_attr_write_policy.attr, 248 NULL, 249 }; 250 ATTRIBUTE_GROUPS(cache); 251 252 static void node_cache_release(struct device *dev) 253 { 254 kfree(dev); 255 } 256 257 static void node_cacheinfo_release(struct device *dev) 258 { 259 struct node_cache_info *info = to_cache_info(dev); 260 kfree(info); 261 } 262 263 static void node_init_cache_dev(struct node *node) 264 { 265 struct device *dev; 266 267 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 268 if (!dev) 269 return; 270 271 dev->parent = &node->dev; 272 dev->release = node_cache_release; 273 if (dev_set_name(dev, "memory_side_cache")) 274 goto free_dev; 275 276 if (device_register(dev)) 277 goto free_name; 278 279 pm_runtime_no_callbacks(dev); 280 node->cache_dev = dev; 281 return; 282 free_name: 283 kfree_const(dev->kobj.name); 284 free_dev: 285 kfree(dev); 286 } 287 288 /** 289 * node_add_cache() - add cache attribute to a memory node 290 * @nid: Node identifier that has new cache attributes 291 * @cache_attrs: Attributes for the cache being added 292 */ 293 void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) 294 { 295 struct node_cache_info *info; 296 struct device *dev; 297 struct node *node; 298 299 if (!node_online(nid) || !node_devices[nid]) 300 return; 301 302 node = node_devices[nid]; 303 list_for_each_entry(info, &node->cache_attrs, node) { 304 if (info->cache_attrs.level == cache_attrs->level) { 305 dev_warn(&node->dev, 306 "attempt to add duplicate cache level:%d\n", 307 cache_attrs->level); 308 return; 309 } 310 } 311 312 if (!node->cache_dev) 313 node_init_cache_dev(node); 314 if (!node->cache_dev) 315 return; 316 317 info = kzalloc(sizeof(*info), GFP_KERNEL); 318 if (!info) 319 return; 320 321 dev = &info->dev; 322 dev->parent = node->cache_dev; 323 dev->release = node_cacheinfo_release; 324 dev->groups = cache_groups; 325 if (dev_set_name(dev, "index%d", cache_attrs->level)) 326 goto free_cache; 327 328 info->cache_attrs = *cache_attrs; 329 if (device_register(dev)) { 330 dev_warn(&node->dev, "failed to add cache level:%d\n", 331 cache_attrs->level); 332 goto free_name; 333 } 334 pm_runtime_no_callbacks(dev); 335 list_add_tail(&info->node, &node->cache_attrs); 336 return; 337 free_name: 338 kfree_const(dev->kobj.name); 339 free_cache: 340 kfree(info); 341 } 342 343 static void node_remove_caches(struct node *node) 344 { 345 struct node_cache_info *info, *next; 346 347 if (!node->cache_dev) 348 return; 349 350 list_for_each_entry_safe(info, next, &node->cache_attrs, node) { 351 list_del(&info->node); 352 device_unregister(&info->dev); 353 } 354 device_unregister(node->cache_dev); 355 } 356 357 static void node_init_caches(unsigned int nid) 358 { 359 INIT_LIST_HEAD(&node_devices[nid]->cache_attrs); 360 } 361 #else 362 static void node_init_caches(unsigned int nid) { } 363 static void node_remove_caches(struct node *node) { } 364 #endif 365 366 #define K(x) ((x) << (PAGE_SHIFT - 10)) 367 static ssize_t node_read_meminfo(struct device *dev, 368 struct device_attribute *attr, char *buf) 369 { 370 int len = 0; 371 int nid = dev->id; 372 struct pglist_data *pgdat = NODE_DATA(nid); 373 struct sysinfo i; 374 unsigned long sreclaimable, sunreclaimable; 375 376 si_meminfo_node(&i, nid); 377 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); 378 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); 379 len = sysfs_emit_at(buf, len, 380 "Node %d MemTotal: %8lu kB\n" 381 "Node %d MemFree: %8lu kB\n" 382 "Node %d MemUsed: %8lu kB\n" 383 "Node %d Active: %8lu kB\n" 384 "Node %d Inactive: %8lu kB\n" 385 "Node %d Active(anon): %8lu kB\n" 386 "Node %d Inactive(anon): %8lu kB\n" 387 "Node %d Active(file): %8lu kB\n" 388 "Node %d Inactive(file): %8lu kB\n" 389 "Node %d Unevictable: %8lu kB\n" 390 "Node %d Mlocked: %8lu kB\n", 391 nid, K(i.totalram), 392 nid, K(i.freeram), 393 nid, K(i.totalram - i.freeram), 394 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + 395 node_page_state(pgdat, NR_ACTIVE_FILE)), 396 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + 397 node_page_state(pgdat, NR_INACTIVE_FILE)), 398 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), 399 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), 400 nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), 401 nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), 402 nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), 403 nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); 404 405 #ifdef CONFIG_HIGHMEM 406 len += sysfs_emit_at(buf, len, 407 "Node %d HighTotal: %8lu kB\n" 408 "Node %d HighFree: %8lu kB\n" 409 "Node %d LowTotal: %8lu kB\n" 410 "Node %d LowFree: %8lu kB\n", 411 nid, K(i.totalhigh), 412 nid, K(i.freehigh), 413 nid, K(i.totalram - i.totalhigh), 414 nid, K(i.freeram - i.freehigh)); 415 #endif 416 len += sysfs_emit_at(buf, len, 417 "Node %d Dirty: %8lu kB\n" 418 "Node %d Writeback: %8lu kB\n" 419 "Node %d FilePages: %8lu kB\n" 420 "Node %d Mapped: %8lu kB\n" 421 "Node %d AnonPages: %8lu kB\n" 422 "Node %d Shmem: %8lu kB\n" 423 "Node %d KernelStack: %8lu kB\n" 424 #ifdef CONFIG_SHADOW_CALL_STACK 425 "Node %d ShadowCallStack:%8lu kB\n" 426 #endif 427 "Node %d PageTables: %8lu kB\n" 428 "Node %d NFS_Unstable: %8lu kB\n" 429 "Node %d Bounce: %8lu kB\n" 430 "Node %d WritebackTmp: %8lu kB\n" 431 "Node %d KReclaimable: %8lu kB\n" 432 "Node %d Slab: %8lu kB\n" 433 "Node %d SReclaimable: %8lu kB\n" 434 "Node %d SUnreclaim: %8lu kB\n" 435 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 436 "Node %d AnonHugePages: %8lu kB\n" 437 "Node %d ShmemHugePages: %8lu kB\n" 438 "Node %d ShmemPmdMapped: %8lu kB\n" 439 "Node %d FileHugePages: %8lu kB\n" 440 "Node %d FilePmdMapped: %8lu kB\n" 441 #endif 442 , 443 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), 444 nid, K(node_page_state(pgdat, NR_WRITEBACK)), 445 nid, K(node_page_state(pgdat, NR_FILE_PAGES)), 446 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), 447 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), 448 nid, K(i.sharedram), 449 nid, node_page_state(pgdat, NR_KERNEL_STACK_KB), 450 #ifdef CONFIG_SHADOW_CALL_STACK 451 nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), 452 #endif 453 nid, K(node_page_state(pgdat, NR_PAGETABLE)), 454 nid, 0UL, 455 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), 456 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 457 nid, K(sreclaimable + 458 node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), 459 nid, K(sreclaimable + sunreclaimable), 460 nid, K(sreclaimable), 461 nid, K(sunreclaimable) 462 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 463 , 464 nid, K(node_page_state(pgdat, NR_ANON_THPS) * 465 HPAGE_PMD_NR), 466 nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * 467 HPAGE_PMD_NR), 468 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * 469 HPAGE_PMD_NR), 470 nid, K(node_page_state(pgdat, NR_FILE_THPS) * 471 HPAGE_PMD_NR), 472 nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) * 473 HPAGE_PMD_NR) 474 #endif 475 ); 476 len += hugetlb_report_node_meminfo(buf, len, nid); 477 return len; 478 } 479 480 #undef K 481 static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL); 482 483 static ssize_t node_read_numastat(struct device *dev, 484 struct device_attribute *attr, char *buf) 485 { 486 return sysfs_emit(buf, 487 "numa_hit %lu\n" 488 "numa_miss %lu\n" 489 "numa_foreign %lu\n" 490 "interleave_hit %lu\n" 491 "local_node %lu\n" 492 "other_node %lu\n", 493 sum_zone_numa_state(dev->id, NUMA_HIT), 494 sum_zone_numa_state(dev->id, NUMA_MISS), 495 sum_zone_numa_state(dev->id, NUMA_FOREIGN), 496 sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), 497 sum_zone_numa_state(dev->id, NUMA_LOCAL), 498 sum_zone_numa_state(dev->id, NUMA_OTHER)); 499 } 500 static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL); 501 502 static ssize_t node_read_vmstat(struct device *dev, 503 struct device_attribute *attr, char *buf) 504 { 505 int nid = dev->id; 506 struct pglist_data *pgdat = NODE_DATA(nid); 507 int i; 508 int len = 0; 509 510 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 511 len += sysfs_emit_at(buf, len, "%s %lu\n", 512 zone_stat_name(i), 513 sum_zone_node_page_state(nid, i)); 514 515 #ifdef CONFIG_NUMA 516 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) 517 len += sysfs_emit_at(buf, len, "%s %lu\n", 518 numa_stat_name(i), 519 sum_zone_numa_state(nid, i)); 520 521 #endif 522 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 523 len += sysfs_emit_at(buf, len, "%s %lu\n", 524 node_stat_name(i), 525 node_page_state_pages(pgdat, i)); 526 527 return len; 528 } 529 static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL); 530 531 static ssize_t node_read_distance(struct device *dev, 532 struct device_attribute *attr, char *buf) 533 { 534 int nid = dev->id; 535 int len = 0; 536 int i; 537 538 /* 539 * buf is currently PAGE_SIZE in length and each node needs 4 chars 540 * at the most (distance + space or newline). 541 */ 542 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); 543 544 for_each_online_node(i) { 545 len += sysfs_emit_at(buf, len, "%s%d", 546 i ? " " : "", node_distance(nid, i)); 547 } 548 549 len += sysfs_emit_at(buf, len, "\n"); 550 return len; 551 } 552 static DEVICE_ATTR(distance, 0444, node_read_distance, NULL); 553 554 static struct attribute *node_dev_attrs[] = { 555 &dev_attr_cpumap.attr, 556 &dev_attr_cpulist.attr, 557 &dev_attr_meminfo.attr, 558 &dev_attr_numastat.attr, 559 &dev_attr_distance.attr, 560 &dev_attr_vmstat.attr, 561 NULL 562 }; 563 ATTRIBUTE_GROUPS(node_dev); 564 565 #ifdef CONFIG_HUGETLBFS 566 /* 567 * hugetlbfs per node attributes registration interface: 568 * When/if hugetlb[fs] subsystem initializes [sometime after this module], 569 * it will register its per node attributes for all online nodes with 570 * memory. It will also call register_hugetlbfs_with_node(), below, to 571 * register its attribute registration functions with this node driver. 572 * Once these hooks have been initialized, the node driver will call into 573 * the hugetlb module to [un]register attributes for hot-plugged nodes. 574 */ 575 static node_registration_func_t __hugetlb_register_node; 576 static node_registration_func_t __hugetlb_unregister_node; 577 578 static inline bool hugetlb_register_node(struct node *node) 579 { 580 if (__hugetlb_register_node && 581 node_state(node->dev.id, N_MEMORY)) { 582 __hugetlb_register_node(node); 583 return true; 584 } 585 return false; 586 } 587 588 static inline void hugetlb_unregister_node(struct node *node) 589 { 590 if (__hugetlb_unregister_node) 591 __hugetlb_unregister_node(node); 592 } 593 594 void register_hugetlbfs_with_node(node_registration_func_t doregister, 595 node_registration_func_t unregister) 596 { 597 __hugetlb_register_node = doregister; 598 __hugetlb_unregister_node = unregister; 599 } 600 #else 601 static inline void hugetlb_register_node(struct node *node) {} 602 603 static inline void hugetlb_unregister_node(struct node *node) {} 604 #endif 605 606 static void node_device_release(struct device *dev) 607 { 608 struct node *node = to_node(dev); 609 610 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) 611 /* 612 * We schedule the work only when a memory section is 613 * onlined/offlined on this node. When we come here, 614 * all the memory on this node has been offlined, 615 * so we won't enqueue new work to this work. 616 * 617 * The work is using node->node_work, so we should 618 * flush work before freeing the memory. 619 */ 620 flush_work(&node->node_work); 621 #endif 622 kfree(node); 623 } 624 625 /* 626 * register_node - Setup a sysfs device for a node. 627 * @num - Node number to use when creating the device. 628 * 629 * Initialize and register the node device. 630 */ 631 static int register_node(struct node *node, int num) 632 { 633 int error; 634 635 node->dev.id = num; 636 node->dev.bus = &node_subsys; 637 node->dev.release = node_device_release; 638 node->dev.groups = node_dev_groups; 639 error = device_register(&node->dev); 640 641 if (error) 642 put_device(&node->dev); 643 else { 644 hugetlb_register_node(node); 645 646 compaction_register_node(node); 647 } 648 return error; 649 } 650 651 /** 652 * unregister_node - unregister a node device 653 * @node: node going away 654 * 655 * Unregisters a node device @node. All the devices on the node must be 656 * unregistered before calling this function. 657 */ 658 void unregister_node(struct node *node) 659 { 660 hugetlb_unregister_node(node); /* no-op, if memoryless node */ 661 node_remove_accesses(node); 662 node_remove_caches(node); 663 device_unregister(&node->dev); 664 } 665 666 struct node *node_devices[MAX_NUMNODES]; 667 668 /* 669 * register cpu under node 670 */ 671 int register_cpu_under_node(unsigned int cpu, unsigned int nid) 672 { 673 int ret; 674 struct device *obj; 675 676 if (!node_online(nid)) 677 return 0; 678 679 obj = get_cpu_device(cpu); 680 if (!obj) 681 return 0; 682 683 ret = sysfs_create_link(&node_devices[nid]->dev.kobj, 684 &obj->kobj, 685 kobject_name(&obj->kobj)); 686 if (ret) 687 return ret; 688 689 return sysfs_create_link(&obj->kobj, 690 &node_devices[nid]->dev.kobj, 691 kobject_name(&node_devices[nid]->dev.kobj)); 692 } 693 694 /** 695 * register_memory_node_under_compute_node - link memory node to its compute 696 * node for a given access class. 697 * @mem_nid: Memory node number 698 * @cpu_nid: Cpu node number 699 * @access: Access class to register 700 * 701 * Description: 702 * For use with platforms that may have separate memory and compute nodes. 703 * This function will export node relationships linking which memory 704 * initiator nodes can access memory targets at a given ranked access 705 * class. 706 */ 707 int register_memory_node_under_compute_node(unsigned int mem_nid, 708 unsigned int cpu_nid, 709 unsigned access) 710 { 711 struct node *init_node, *targ_node; 712 struct node_access_nodes *initiator, *target; 713 int ret; 714 715 if (!node_online(cpu_nid) || !node_online(mem_nid)) 716 return -ENODEV; 717 718 init_node = node_devices[cpu_nid]; 719 targ_node = node_devices[mem_nid]; 720 initiator = node_init_node_access(init_node, access); 721 target = node_init_node_access(targ_node, access); 722 if (!initiator || !target) 723 return -ENOMEM; 724 725 ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets", 726 &targ_node->dev.kobj, 727 dev_name(&targ_node->dev)); 728 if (ret) 729 return ret; 730 731 ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators", 732 &init_node->dev.kobj, 733 dev_name(&init_node->dev)); 734 if (ret) 735 goto err; 736 737 return 0; 738 err: 739 sysfs_remove_link_from_group(&initiator->dev.kobj, "targets", 740 dev_name(&targ_node->dev)); 741 return ret; 742 } 743 744 int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) 745 { 746 struct device *obj; 747 748 if (!node_online(nid)) 749 return 0; 750 751 obj = get_cpu_device(cpu); 752 if (!obj) 753 return 0; 754 755 sysfs_remove_link(&node_devices[nid]->dev.kobj, 756 kobject_name(&obj->kobj)); 757 sysfs_remove_link(&obj->kobj, 758 kobject_name(&node_devices[nid]->dev.kobj)); 759 760 return 0; 761 } 762 763 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 764 static int __ref get_nid_for_pfn(unsigned long pfn) 765 { 766 if (!pfn_valid_within(pfn)) 767 return -1; 768 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 769 if (system_state < SYSTEM_RUNNING) 770 return early_pfn_to_nid(pfn); 771 #endif 772 return pfn_to_nid(pfn); 773 } 774 775 static void do_register_memory_block_under_node(int nid, 776 struct memory_block *mem_blk) 777 { 778 int ret; 779 780 /* 781 * If this memory block spans multiple nodes, we only indicate 782 * the last processed node. 783 */ 784 mem_blk->nid = nid; 785 786 ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, 787 &mem_blk->dev.kobj, 788 kobject_name(&mem_blk->dev.kobj)); 789 if (ret && ret != -EEXIST) 790 dev_err_ratelimited(&node_devices[nid]->dev, 791 "can't create link to %s in sysfs (%d)\n", 792 kobject_name(&mem_blk->dev.kobj), ret); 793 794 ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj, 795 &node_devices[nid]->dev.kobj, 796 kobject_name(&node_devices[nid]->dev.kobj)); 797 if (ret && ret != -EEXIST) 798 dev_err_ratelimited(&mem_blk->dev, 799 "can't create link to %s in sysfs (%d)\n", 800 kobject_name(&node_devices[nid]->dev.kobj), 801 ret); 802 } 803 804 /* register memory section under specified node if it spans that node */ 805 static int register_mem_block_under_node_early(struct memory_block *mem_blk, 806 void *arg) 807 { 808 unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; 809 unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); 810 unsigned long end_pfn = start_pfn + memory_block_pfns - 1; 811 int nid = *(int *)arg; 812 unsigned long pfn; 813 814 for (pfn = start_pfn; pfn <= end_pfn; pfn++) { 815 int page_nid; 816 817 /* 818 * memory block could have several absent sections from start. 819 * skip pfn range from absent section 820 */ 821 if (!pfn_in_present_section(pfn)) { 822 pfn = round_down(pfn + PAGES_PER_SECTION, 823 PAGES_PER_SECTION) - 1; 824 continue; 825 } 826 827 /* 828 * We need to check if page belongs to nid only at the boot 829 * case because node's ranges can be interleaved. 830 */ 831 page_nid = get_nid_for_pfn(pfn); 832 if (page_nid < 0) 833 continue; 834 if (page_nid != nid) 835 continue; 836 837 do_register_memory_block_under_node(nid, mem_blk); 838 return 0; 839 } 840 /* mem section does not span the specified node */ 841 return 0; 842 } 843 844 /* 845 * During hotplug we know that all pages in the memory block belong to the same 846 * node. 847 */ 848 static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, 849 void *arg) 850 { 851 int nid = *(int *)arg; 852 853 do_register_memory_block_under_node(nid, mem_blk); 854 return 0; 855 } 856 857 /* 858 * Unregister a memory block device under the node it spans. Memory blocks 859 * with multiple nodes cannot be offlined and therefore also never be removed. 860 */ 861 void unregister_memory_block_under_nodes(struct memory_block *mem_blk) 862 { 863 if (mem_blk->nid == NUMA_NO_NODE) 864 return; 865 866 sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj, 867 kobject_name(&mem_blk->dev.kobj)); 868 sysfs_remove_link(&mem_blk->dev.kobj, 869 kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); 870 } 871 872 void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, 873 enum meminit_context context) 874 { 875 walk_memory_blocks_func_t func; 876 877 if (context == MEMINIT_HOTPLUG) 878 func = register_mem_block_under_node_hotplug; 879 else 880 func = register_mem_block_under_node_early; 881 882 walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), 883 (void *)&nid, func); 884 return; 885 } 886 887 #ifdef CONFIG_HUGETLBFS 888 /* 889 * Handle per node hstate attribute [un]registration on transistions 890 * to/from memoryless state. 891 */ 892 static void node_hugetlb_work(struct work_struct *work) 893 { 894 struct node *node = container_of(work, struct node, node_work); 895 896 /* 897 * We only get here when a node transitions to/from memoryless state. 898 * We can detect which transition occurred by examining whether the 899 * node has memory now. hugetlb_register_node() already check this 900 * so we try to register the attributes. If that fails, then the 901 * node has transitioned to memoryless, try to unregister the 902 * attributes. 903 */ 904 if (!hugetlb_register_node(node)) 905 hugetlb_unregister_node(node); 906 } 907 908 static void init_node_hugetlb_work(int nid) 909 { 910 INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work); 911 } 912 913 static int node_memory_callback(struct notifier_block *self, 914 unsigned long action, void *arg) 915 { 916 struct memory_notify *mnb = arg; 917 int nid = mnb->status_change_nid; 918 919 switch (action) { 920 case MEM_ONLINE: 921 case MEM_OFFLINE: 922 /* 923 * offload per node hstate [un]registration to a work thread 924 * when transitioning to/from memoryless state. 925 */ 926 if (nid != NUMA_NO_NODE) 927 schedule_work(&node_devices[nid]->node_work); 928 break; 929 930 case MEM_GOING_ONLINE: 931 case MEM_GOING_OFFLINE: 932 case MEM_CANCEL_ONLINE: 933 case MEM_CANCEL_OFFLINE: 934 default: 935 break; 936 } 937 938 return NOTIFY_OK; 939 } 940 #endif /* CONFIG_HUGETLBFS */ 941 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 942 943 #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ 944 !defined(CONFIG_HUGETLBFS) 945 static inline int node_memory_callback(struct notifier_block *self, 946 unsigned long action, void *arg) 947 { 948 return NOTIFY_OK; 949 } 950 951 static void init_node_hugetlb_work(int nid) { } 952 953 #endif 954 955 int __register_one_node(int nid) 956 { 957 int error; 958 int cpu; 959 960 node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); 961 if (!node_devices[nid]) 962 return -ENOMEM; 963 964 error = register_node(node_devices[nid], nid); 965 966 /* link cpu under this node */ 967 for_each_present_cpu(cpu) { 968 if (cpu_to_node(cpu) == nid) 969 register_cpu_under_node(cpu, nid); 970 } 971 972 INIT_LIST_HEAD(&node_devices[nid]->access_list); 973 /* initialize work queue for memory hot plug */ 974 init_node_hugetlb_work(nid); 975 node_init_caches(nid); 976 977 return error; 978 } 979 980 void unregister_one_node(int nid) 981 { 982 if (!node_devices[nid]) 983 return; 984 985 unregister_node(node_devices[nid]); 986 node_devices[nid] = NULL; 987 } 988 989 /* 990 * node states attributes 991 */ 992 993 struct node_attr { 994 struct device_attribute attr; 995 enum node_states state; 996 }; 997 998 static ssize_t show_node_state(struct device *dev, 999 struct device_attribute *attr, char *buf) 1000 { 1001 struct node_attr *na = container_of(attr, struct node_attr, attr); 1002 1003 return sysfs_emit(buf, "%*pbl\n", 1004 nodemask_pr_args(&node_states[na->state])); 1005 } 1006 1007 #define _NODE_ATTR(name, state) \ 1008 { __ATTR(name, 0444, show_node_state, NULL), state } 1009 1010 static struct node_attr node_state_attr[] = { 1011 [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE), 1012 [N_ONLINE] = _NODE_ATTR(online, N_ONLINE), 1013 [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), 1014 #ifdef CONFIG_HIGHMEM 1015 [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), 1016 #endif 1017 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), 1018 [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), 1019 [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator, 1020 N_GENERIC_INITIATOR), 1021 }; 1022 1023 static struct attribute *node_state_attrs[] = { 1024 &node_state_attr[N_POSSIBLE].attr.attr, 1025 &node_state_attr[N_ONLINE].attr.attr, 1026 &node_state_attr[N_NORMAL_MEMORY].attr.attr, 1027 #ifdef CONFIG_HIGHMEM 1028 &node_state_attr[N_HIGH_MEMORY].attr.attr, 1029 #endif 1030 &node_state_attr[N_MEMORY].attr.attr, 1031 &node_state_attr[N_CPU].attr.attr, 1032 &node_state_attr[N_GENERIC_INITIATOR].attr.attr, 1033 NULL 1034 }; 1035 1036 static struct attribute_group memory_root_attr_group = { 1037 .attrs = node_state_attrs, 1038 }; 1039 1040 static const struct attribute_group *cpu_root_attr_groups[] = { 1041 &memory_root_attr_group, 1042 NULL, 1043 }; 1044 1045 #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ 1046 static int __init register_node_type(void) 1047 { 1048 int ret; 1049 1050 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); 1051 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); 1052 1053 ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); 1054 if (!ret) { 1055 static struct notifier_block node_memory_callback_nb = { 1056 .notifier_call = node_memory_callback, 1057 .priority = NODE_CALLBACK_PRI, 1058 }; 1059 register_hotmemory_notifier(&node_memory_callback_nb); 1060 } 1061 1062 /* 1063 * Note: we're not going to unregister the node class if we fail 1064 * to register the node state class attribute files. 1065 */ 1066 return ret; 1067 } 1068 postcore_initcall(register_node_type); 1069