| /linux/arch/x86/platform/uv/ |
| H A D | uv_time.c | 50 int next_cpu; member 159 head->next_cpu = -1; in uv_rtc_allocate_timers() 176 head->next_cpu = -1; in uv_rtc_find_next_timer() 185 head->next_cpu = bcpu; in uv_rtc_find_next_timer() 209 int next_cpu; in uv_rtc_set_timer() local 213 next_cpu = head->next_cpu; in uv_rtc_set_timer() 217 if (next_cpu < 0 || bcpu == next_cpu || in uv_rtc_set_timer() 218 expires < head->cpu[next_cpu].expires) { in uv_rtc_set_timer() 219 head->next_cpu = bcpu; in uv_rtc_set_timer() 249 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) in uv_rtc_unset_timer() [all …]
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | test_lru_map.c | 165 int next_cpu = 0; in test_lru_sanity0() local 170 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity0() 258 int next_cpu = 0; in test_lru_sanity1() local 267 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity1() 327 int next_cpu = 0; in test_lru_sanity2() local 336 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity2() 432 int next_cpu = 0; in test_lru_sanity3() local 441 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity3() 492 int next_cpu = 0; in test_lru_sanity4() local 497 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity4() [all …]
|
| H A D | bench.c | 467 static int next_cpu(struct cpu_set *cpu_set) in next_cpu() 473 for (i = cpu_set->next_cpu; i < cpu_set->cpus_len; i++) { in next_cpu() 475 cpu_set->next_cpu = i + 1; in next_cpu() 483 return cpu_set->next_cpu++ % env.nr_cpus; 703 next_cpu(&env.cons_cpus)); in collect_measurements() 710 env.prod_cpus.next_cpu = env.cons_cpus.next_cpu; in collect_measurements() 726 next_cpu(&env.prod_cpus)); in main() 463 static int next_cpu(struct cpu_set *cpu_set) next_cpu() function
|
| H A D | bench.h | 18 int next_cpu; member
|
| /linux/arch/parisc/kernel/ |
| H A D | irq.c | 324 static int next_cpu = -1; in txn_alloc_addr() local 326 next_cpu++; /* assign to "next" CPU we want this bugger on */ in txn_alloc_addr() 329 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr() 330 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 331 !cpu_online(next_cpu))) in txn_alloc_addr() 332 next_cpu++; in txn_alloc_addr() 334 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr() 335 next_cpu = 0; /* nothing else, assign monarch */ in txn_alloc_addr() 337 return txn_affinity_addr(virt_irq, next_cpu); in txn_alloc_addr()
|
| /linux/kernel/trace/ |
| H A D | trace_hwlat.c | 316 int next_cpu; in move_to_next_cpu() local 328 next_cpu = cpumask_next_wrap(raw_smp_processor_id(), current_mask); in move_to_next_cpu() 331 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ in move_to_next_cpu() 335 cpumask_set_cpu(next_cpu, current_mask); in move_to_next_cpu() 421 int next_cpu; in start_single_kthread() local 438 next_cpu = cpumask_first(current_mask); in start_single_kthread() 440 cpumask_set_cpu(next_cpu, current_mask); in start_single_kthread()
|
| H A D | trace_entries.h | 173 __field( unsigned int, next_cpu ) \ 190 __entry->next_cpu) 208 __entry->next_cpu)
|
| H A D | trace_sched_wakeup.c | 408 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 434 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
|
| H A D | trace_output.c | 1255 field->next_cpu, in trace_ctxwake_print() 1289 field->next_cpu, in trace_ctxwake_raw() 1325 SEQ_PUT_HEX_FIELD(s, field->next_cpu); in trace_ctxwake_hex() 1356 SEQ_PUT_FIELD(s, field->next_cpu); in trace_ctxwake_bin()
|
| H A D | trace.c | 3591 int next_cpu = -1; in __find_next_entry() local 3621 next_cpu = cpu; in __find_next_entry() 3631 *ent_cpu = next_cpu; in __find_next_entry()
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | rseq_test.c | 51 static int next_cpu(int cpu) in next_cpu() function 82 for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { in migration_worker()
|
| /linux/arch/powerpc/lib/ |
| H A D | qspinlock.c | 688 int next_cpu = next->cpu; in queued_spin_lock_mcs_queue() local 692 if (vcpu_is_preempted(next_cpu)) in queued_spin_lock_mcs_queue() 693 prod_cpu(next_cpu); in queued_spin_lock_mcs_queue()
|
| /linux/kernel/time/ |
| H A D | tick-broadcast.c | 694 int cpu, next_cpu = 0; in tick_handle_oneshot_broadcast() local 723 next_cpu = cpu; in tick_handle_oneshot_broadcast() 760 tick_broadcast_set_event(dev, next_cpu, next_event); in tick_handle_oneshot_broadcast()
|
| H A D | clocksource.c | 428 int next_cpu, reset_pending; in clocksource_watchdog() local 586 next_cpu = cpumask_next_wrap(raw_smp_processor_id(), cpu_online_mask); in clocksource_watchdog() 594 add_timer_on(&watchdog_timer, next_cpu); in clocksource_watchdog()
|
| /linux/block/ |
| H A D | blk-mq.c | 2244 * ->next_cpu is always calculated from hctx->cpumask, so simply use 2249 return hctx->next_cpu >= nr_cpu_ids; in blk_mq_hctx_empty_cpumask() 2261 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu() local 2269 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu() 2271 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu() 2272 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu() 2280 if (!cpu_online(next_cpu)) { in blk_mq_hctx_next_cpu() 2290 hctx->next_cpu in blk_mq_hctx_next_cpu() [all...] |
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | hash_utils.c | 1296 int next_cpu; in stress_hpt_timer_fn() local 1302 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in stress_hpt_timer_fn() 1303 if (next_cpu >= nr_cpu_ids) in stress_hpt_timer_fn() 1304 next_cpu = cpumask_first(cpu_online_mask); in stress_hpt_timer_fn() 1306 add_timer_on(&stress_hpt_timer, next_cpu); in stress_hpt_timer_fn()
|
| /linux/include/linux/ |
| H A D | blk-mq.h | 349 int next_cpu; member
|
| /linux/net/core/ |
| H A D | dev.c | 4978 struct rps_dev_flow *rflow, u16 next_cpu, u32 hash, in set_rps_cpu() argument 4981 if (next_cpu < nr_cpu_ids) { in set_rps_cpu() 4996 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); in set_rps_cpu() 5012 next_cpu == tmp_cpu) in set_rps_cpu() 5031 head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); in set_rps_cpu() 5035 WRITE_ONCE(rflow->cpu, next_cpu); in set_rps_cpu() 5084 u32 next_cpu; in get_rps_cpu() local 5094 next_cpu = ident & net_hotdata.rps_cpu_mask; in get_rps_cpu() 5114 if (unlikely(tcpu != next_cpu) && in get_rps_cpu() 5118 tcpu = next_cpu; in get_rps_cpu() [all …]
|
| /linux/drivers/net/ethernet/mediatek/ |
| H A D | mtk_eth_soc.c | 2464 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma() local 2487 cpu = next_cpu; in mtk_poll_tx_qdma()
|
| /linux/kernel/sched/ |
| H A D | fair.c | 13829 goto next_cpu; in sched_group_set_idle() 13849 next_cpu: in sched_group_set_idle()
|