| /linux/include/linux/ |
| H A D | topology.h | 80 DECLARE_PER_CPU(int, numa_node); 86 return raw_cpu_read(numa_node); in numa_node_id() 93 return per_cpu(numa_node, cpu); in cpu_to_node() 100 this_cpu_write(numa_node, node); in set_numa_node() 107 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node()
|
| /linux/Documentation/driver-api/cxl/ |
| H A D | theory-of-operation.rst | 64 "numa_node":1, 76 "numa_node":1, 94 "numa_node":1, 106 "numa_node":1, 130 "numa_node":0, 142 "numa_node":0, 160 "numa_node":0, 172 "numa_node":0, 243 "numa_node":0, 270 "numa_node":0, [all …]
|
| /linux/drivers/virt/nitro_enclaves/ |
| H A D | ne_misc_dev.c | 124 int numa_node; member 186 int numa_node = -1; in ne_setup_cpu_pool() local 228 if (numa_node < 0) { in ne_setup_cpu_pool() 229 numa_node = cpu_to_node(cpu); in ne_setup_cpu_pool() 230 if (numa_node < 0) { in ne_setup_cpu_pool() 232 ne_misc_dev.name, numa_node); in ne_setup_cpu_pool() 239 if (numa_node != cpu_to_node(cpu)) { in ne_setup_cpu_pool() 353 ne_cpu_pool.numa_node = numa_node; in ne_setup_cpu_pool() 373 ne_cpu_pool.numa_node = -1; in ne_setup_cpu_pool() 416 ne_cpu_pool.numa_node = -1; in ne_teardown_cpu_pool() [all …]
|
| H A D | ne_misc_dev.h | 77 int numa_node; member
|
| /linux/drivers/net/ethernet/fungible/funeth/ |
| H A D | funeth_rx.c | 622 int numa_node; in fun_rxq_create_sw() local 624 numa_node = fun_irq_node(irq); in fun_rxq_create_sw() 625 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); in fun_rxq_create_sw() 633 q->numa_node = numa_node; in fun_rxq_create_sw() 639 sizeof(*q->bufs), false, numa_node, in fun_rxq_create_sw() 645 false, numa_node, &q->cq_dma_addr, NULL, in fun_rxq_create_sw() 650 err = fun_rxq_init_cache(&q->cache, nrqe, numa_node); in fun_rxq_create_sw() 654 err = fun_rxq_alloc_bufs(q, numa_node); in fun_rxq_create_sw() 751 q->numa_node, q->headroom); in fun_rxq_create_dev()
|
| H A D | funeth_tx.c | 630 int numa_node; in fun_txq_create_sw() local 633 numa_node = fun_irq_node(irq); /* skb Tx queue */ in fun_txq_create_sw() 635 numa_node = cpu_to_node(qidx); /* XDP Tx queue */ in fun_txq_create_sw() 637 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); in fun_txq_create_sw() 643 sizeof(*q->info), true, numa_node, in fun_txq_create_sw() 652 q->numa_node = numa_node; in fun_txq_create_sw() 719 q->ethid, q->numa_node); in fun_txq_create_dev()
|
| H A D | funeth_txrx.h | 126 int numa_node; member 196 int numa_node; member
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | send.c | 682 int numa_node, in hws_send_ring_alloc_sq() argument 696 param.db_numa_node = numa_node; in hws_send_ring_alloc_sq() 697 param.buf_numa_node = numa_node; in hws_send_ring_alloc_sq() 836 int numa_node, in hws_send_ring_open_sq() argument 857 err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data); in hws_send_ring_open_sq() 877 int numa_node, in hws_send_ring_alloc_cq() argument 888 param.buf_numa_node = numa_node; in hws_send_ring_alloc_cq() 889 param.db_numa_node = numa_node; in hws_send_ring_alloc_cq() 949 int numa_node, in hws_send_ring_open_cq() argument 962 err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq); in hws_send_ring_open_cq() [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | pci.c | 255 int numa_node) in pci_init_dev_archdata() argument 261 sd->numa_node = numa_node; in pci_init_dev_archdata() 280 pbm->numa_node); in of_create_pci_dev() 284 sd->numa_node = pbm->numa_node; in of_create_pci_dev() 744 return pbm->numa_node; in pcibus_to_node() 858 psd->numa_node); in pcibios_device_add()
|
| H A D | of_device_common.c | 69 op->dev.archdata.numa_node = bus_sd->numa_node; in of_propagate_archdata()
|
| H A D | iommu.c | 95 int numa_node) in iommu_table_init() argument 111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init() 122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0); in iommu_table_init() 133 page = alloc_pages_node(numa_node, GFP_KERNEL, order); in iommu_table_init() 213 nid = dev->archdata.numa_node; in dma_4u_alloc_coherent()
|
| /linux/drivers/nvdimm/ |
| H A D | of_pmem.c | 63 ndr_desc.numa_node = dev_to_node(&pdev->dev); in of_pmem_region_probe() 64 ndr_desc.target_node = ndr_desc.numa_node; in of_pmem_region_probe()
|
| /linux/Documentation/driver-api/cxl/linux/example-configurations/ |
| H A D | multi-interleave.rst | 96 "numa_node":0, 122 "numa_node":0, 216 "numa_node":0, 242 "numa_node":0,
|
| /linux/kernel/bpf/ |
| H A D | ringbuf.c | 92 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) in bpf_ringbuf_area_alloc() argument 122 pages = bpf_map_area_alloc(array_size, numa_node); in bpf_ringbuf_area_alloc() 127 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc() 171 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node, bool overwrite_mode) in bpf_ringbuf_alloc() argument 175 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc() 218 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node, overwrite_mode); in ringbuf_map_alloc()
|
| H A D | bloom_filter.c | 98 int numa_node = bpf_map_attr_numa_node(attr); in bloom_map_alloc() local 146 bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node); in bloom_map_alloc()
|
| H A D | queue_stack_maps.c | 67 int numa_node = bpf_map_attr_numa_node(attr); in queue_stack_map_alloc() local 74 qs = bpf_map_area_alloc(queue_size, numa_node); in queue_stack_map_alloc()
|
| /linux/tools/perf/util/ |
| H A D | env.h | 29 struct numa_node { struct 101 struct numa_node *numa_nodes;
|
| /linux/net/xdp/ |
| H A D | xskmap.c | 67 int numa_node; in xsk_map_alloc() local 75 numa_node = bpf_map_attr_numa_node(attr); in xsk_map_alloc() 78 m = bpf_map_area_alloc(size, numa_node); in xsk_map_alloc()
|
| /linux/arch/sparc/include/asm/ |
| H A D | device.h | 18 int numa_node; member
|
| H A D | iommu_64.h | 91 int numa_node);
|
| /linux/drivers/net/ethernet/amazon/ena/ |
| H A D | ena_eth_com.h | 173 u8 numa_node) in ena_com_update_numa_node() argument 180 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) in ena_com_update_numa_node()
|
| H A D | ena_netdev.c | 178 ring->numa_node = 0; in ena_init_io_rings_common() 282 tx_ring->numa_node = node; in ena_setup_tx_resources() 414 rx_ring->numa_node = node; in ena_setup_rx_resources() 1405 int numa_node; in ena_update_ring_numa_node() local 1415 numa_node = cpu_to_node(cpu); in ena_update_ring_numa_node() 1417 if (likely(tx_ring->numa_node == numa_node)) in ena_update_ring_numa_node() 1422 if (numa_node != NUMA_NO_NODE) { in ena_update_ring_numa_node() 1423 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node() 1424 tx_ring->numa_node = numa_node; in ena_update_ring_numa_node() 1426 rx_ring->numa_node = numa_node; in ena_update_ring_numa_node() [all …]
|
| /linux/drivers/hv/ |
| H A D | channel_mgmt.c | 749 int numa_node; in init_vp_index() local 770 numa_node = next_numa_node_id++; in init_vp_index() 771 if (numa_node == nr_node_ids) { in init_vp_index() 775 if (cpumask_empty(cpumask_of_node(numa_node))) in init_vp_index() 779 allocated_mask = &hv_context.hv_numa_map[numa_node]; in init_vp_index() 782 cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node)); in init_vp_index()
|
| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_driver.h | 61 u32 numa_node; member
|
| /linux/include/uapi/rdma/hfi/ |
| H A D | hfi1_ioctl.h | 90 __u16 numa_node; /* NUMA node of the assigned device */ member
|