Lines Matching +full:init +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0
16 #include <linux/delay.h>
53 int delay; /* ms */ member
61 /* per-cpu worker */
73 int delay; member
90 atomic_long_add(size, &test->data.kmalloc.alloc); in ot_kzalloc()
98 atomic_long_add(size, &test->data.kmalloc.free); in ot_kfree()
106 pr_info("memory allocation summary for %s\n", test->name); in ot_mem_report()
108 alloc = atomic_long_read(&test->data.kmalloc.alloc); in ot_mem_report()
109 free = atomic_long_read(&test->data.kmalloc.free); in ot_mem_report()
110 pr_info(" kmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free); in ot_mem_report()
112 alloc = atomic_long_read(&test->data.vmalloc.alloc); in ot_mem_report()
113 free = atomic_long_read(&test->data.vmalloc.free); in ot_mem_report()
114 pr_info(" vmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free); in ot_mem_report()
139 init_rwsem(&data->start); in ot_init_data()
140 init_completion(&data->wait); in ot_init_data()
141 init_completion(&data->rcu); in ot_init_data()
142 atomic_set(&data->nthreads, 1); in ot_init_data()
152 on->owner = &sop->pool; in ot_init_node()
159 struct ot_test *test = item->test; in ot_hrtimer_handler()
161 if (atomic_read_acquire(&test->data.stop)) in ot_hrtimer_handler()
164 /* do bulk-testings for objects pop/push */ in ot_hrtimer_handler()
165 item->worker(item, 1); in ot_hrtimer_handler()
167 hrtimer_forward(hrt, hrt->base->get_time(), item->hrtcycle); in ot_hrtimer_handler()
173 if (!item->test->hrtimer) in ot_start_hrtimer()
175 hrtimer_start(&item->hrtimer, item->hrtcycle, HRTIMER_MODE_REL); in ot_start_hrtimer()
180 if (!item->test->hrtimer) in ot_stop_hrtimer()
182 hrtimer_cancel(&item->hrtimer); in ot_stop_hrtimer()
187 struct hrtimer *hrt = &item->hrtimer; in ot_init_hrtimer()
190 return -ENOENT; in ot_init_hrtimer()
192 item->hrtcycle = ktime_set(0, hrtimer * 1000000UL); in ot_init_hrtimer()
194 hrt->function = ot_hrtimer_handler; in ot_init_hrtimer()
204 item->pool = pool; in ot_init_cpu_item()
205 item->test = test; in ot_init_cpu_item()
206 item->worker = worker; in ot_init_cpu_item()
208 item->bulk[0] = test->bulk_normal; in ot_init_cpu_item()
209 item->bulk[1] = test->bulk_irq; in ot_init_cpu_item()
210 item->delay = test->delay; in ot_init_cpu_item()
213 ot_init_hrtimer(item, item->test->hrtimer); in ot_init_cpu_item()
220 struct ot_test *test = item->test; in ot_thread_worker()
223 atomic_inc(&test->data.nthreads); in ot_thread_worker()
224 down_read(&test->data.start); in ot_thread_worker()
225 up_read(&test->data.start); in ot_thread_worker()
229 if (atomic_read_acquire(&test->data.stop)) in ot_thread_worker()
231 /* do bulk-testings for objects pop/push */ in ot_thread_worker()
232 item->worker(item, 0); in ot_thread_worker()
235 item->duration = (u64) ktime_us_delta(ktime_get(), start); in ot_thread_worker()
236 if (atomic_dec_and_test(&test->data.nthreads)) in ot_thread_worker()
237 complete(&test->data.wait); in ot_thread_worker()
248 pr_info("Testing summary for %s\n", test->name); in ot_perf_report()
252 if (!item->duration) in ot_perf_report()
254 normal.nhits += item->stat[0].nhits; in ot_perf_report()
255 normal.nmiss += item->stat[0].nmiss; in ot_perf_report()
256 irq.nhits += item->stat[1].nhits; in ot_perf_report()
257 irq.nmiss += item->stat[1].nmiss; in ot_perf_report()
258 pr_info("CPU: %d duration: %lluus\n", cpu, item->duration); in ot_perf_report()
260 item->stat[0].nhits, item->stat[0].nmiss); in ot_perf_report()
262 item->stat[1].nhits, item->stat[1].nmiss); in ot_perf_report()
264 item->stat[0].nhits + item->stat[1].nhits, in ot_perf_report()
265 item->stat[0].nmiss + item->stat[1].nmiss); in ot_perf_report()
276 test->data.objects = total; in ot_perf_report()
277 test->data.duration = duration; in ot_perf_report()
294 sop->test = test; in ot_init_sync_m0()
295 if (test->objsz < 512) in ot_init_sync_m0()
298 if (objpool_init(&sop->pool, max, test->objsz, in ot_init_sync_m0()
303 WARN_ON(max != sop->pool.nr_objs); in ot_init_sync_m0()
310 objpool_fini(&sop->pool); in ot_fini_sync()
311 ot_kfree(sop->test, sop, sizeof(*sop)); in ot_fini_sync()
315 struct ot_context * (*init)(struct ot_test *oc); member
318 {.init = ot_init_sync_m0, .fini = ot_fini_sync},
330 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_sync()
331 nods[i] = objpool_pop(item->pool); in ot_bulk_sync()
333 if (!irq && (item->delay || !(++(item->niters) & 0x7FFF))) in ot_bulk_sync()
334 msleep(item->delay); in ot_bulk_sync()
336 while (i-- > 0) { in ot_bulk_sync()
339 on->refs++; in ot_bulk_sync()
340 objpool_push(on, item->pool); in ot_bulk_sync()
341 item->stat[irq].nhits++; in ot_bulk_sync()
343 item->stat[irq].nmiss++; in ot_bulk_sync()
357 sop = g_ot_sync_ops[test->mode].init(test); in ot_start_sync()
359 return -ENOMEM; in ot_start_sync()
362 down_write(&test->data.start); in ot_start_sync()
368 ot_init_cpu_item(item, test, &sop->pool, ot_bulk_sync); in ot_start_sync()
388 if (atomic_dec_and_test(&test->data.nthreads)) in ot_start_sync()
389 complete(&test->data.wait); in ot_start_sync()
395 up_write(&test->data.start); in ot_start_sync()
398 timeout = msecs_to_jiffies(test->duration); in ot_start_sync()
402 atomic_set_release(&test->data.stop, 1); in ot_start_sync()
405 wait_for_completion(&test->data.wait); in ot_start_sync()
409 g_ot_sync_ops[test->mode].fini(sop); in ot_start_sync()
427 struct ot_test *test = sop->test; in ot_fini_async_rcu()
429 /* here all cpus are aware of the stop event: test->data.stop = 1 */ in ot_fini_async_rcu()
430 WARN_ON(!atomic_read_acquire(&test->data.stop)); in ot_fini_async_rcu()
432 objpool_fini(&sop->pool); in ot_fini_async_rcu()
433 complete(&test->data.rcu); in ot_fini_async_rcu()
439 call_rcu(&sop->rcu, ot_fini_async_rcu); in ot_fini_async()
446 WARN_ON(!head || !sop || head != &sop->pool); in ot_objpool_release()
450 ot_kfree(sop->test, sop, sizeof(*sop)); in ot_objpool_release()
464 sop->test = test; in ot_init_async_m0()
465 if (test->objsz < 512) in ot_init_async_m0()
468 if (objpool_init(&sop->pool, max, test->objsz, gfp, sop, in ot_init_async_m0()
473 WARN_ON(max != sop->pool.nr_objs); in ot_init_async_m0()
479 struct ot_context * (*init)(struct ot_test *oc); member
482 {.init = ot_init_async_m0, .fini = ot_fini_async},
490 on->refs++; in ot_nod_recycle()
499 WARN_ON(sop != pool->context); in ot_nod_recycle()
507 struct ot_test *test = item->test; in ot_bulk_async()
511 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_async()
512 nods[i] = objpool_pop(item->pool); in ot_bulk_async()
515 if (item->delay || !(++(item->niters) & 0x7FFF)) in ot_bulk_async()
516 msleep(item->delay); in ot_bulk_async()
520 stop = atomic_read_acquire(&test->data.stop); in ot_bulk_async()
523 while (i-- > 0) { in ot_bulk_async()
527 on->refs++; in ot_bulk_async()
528 ot_nod_recycle(on, item->pool, stop); in ot_bulk_async()
529 item->stat[irq].nhits++; in ot_bulk_async()
531 item->stat[irq].nmiss++; in ot_bulk_async()
548 sop = g_ot_async_ops[test->mode].init(test); in ot_start_async()
550 return -ENOMEM; in ot_start_async()
553 down_write(&test->data.start); in ot_start_async()
559 ot_init_cpu_item(item, test, &sop->pool, ot_bulk_async); in ot_start_async()
579 if (atomic_dec_and_test(&test->data.nthreads)) in ot_start_async()
580 complete(&test->data.wait); in ot_start_async()
584 up_write(&test->data.start); in ot_start_async()
587 timeout = msecs_to_jiffies(test->duration); in ot_start_async()
591 atomic_set_release(&test->data.stop, 1); in ot_start_async()
593 /* do async-finalization */ in ot_start_async()
594 g_ot_async_ops[test->mode].fini(sop); in ot_start_async()
597 wait_for_completion(&test->data.wait); in ot_start_async()
601 wait_for_completion(&test->data.rcu); in ot_start_async()
625 * delay: int, delay (in ms) between each iteration
680 return -EAGAIN; in ot_mod_init()