1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Test module for page_frag cache 5 * 6 * Copyright (C) 2024 Yunsheng Lin <linyunsheng@huawei.com> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/cpumask.h> 11 #include <linux/completion.h> 12 #include <linux/ptr_ring.h> 13 #include <linux/kthread.h> 14 #include <linux/page_frag_cache.h> 15 16 #define TEST_FAILED_PREFIX "page_frag_test failed: " 17 18 static struct ptr_ring ptr_ring; 19 static int nr_objs = 512; 20 static atomic_t nthreads; 21 static struct completion wait; 22 static struct page_frag_cache test_nc; 23 static int test_popped; 24 static int test_pushed; 25 static bool force_exit; 26 27 static int nr_test = 2000000; 28 module_param(nr_test, int, 0); 29 MODULE_PARM_DESC(nr_test, "number of iterations to test"); 30 31 static bool test_align; 32 module_param(test_align, bool, 0); 33 MODULE_PARM_DESC(test_align, "use align API for testing"); 34 35 static int test_alloc_len = 2048; 36 module_param(test_alloc_len, int, 0); 37 MODULE_PARM_DESC(test_alloc_len, "alloc len for testing"); 38 39 static int test_push_cpu; 40 module_param(test_push_cpu, int, 0); 41 MODULE_PARM_DESC(test_push_cpu, "test cpu for pushing fragment"); 42 43 static int test_pop_cpu; 44 module_param(test_pop_cpu, int, 0); 45 MODULE_PARM_DESC(test_pop_cpu, "test cpu for popping fragment"); 46 47 static int page_frag_pop_thread(void *arg) 48 { 49 struct ptr_ring *ring = arg; 50 51 pr_info("page_frag pop test thread begins on cpu %d\n", 52 smp_processor_id()); 53 54 while (test_popped < nr_test) { 55 void *obj = __ptr_ring_consume(ring); 56 57 if (obj) { 58 test_popped++; 59 page_frag_free(obj); 60 } else { 61 if (force_exit) 62 break; 63 64 cond_resched(); 65 } 66 } 67 68 if (atomic_dec_and_test(&nthreads)) 69 complete(&wait); 70 71 pr_info("page_frag pop test thread exits on cpu %d\n", 72 smp_processor_id()); 73 74 return 0; 75 } 76 77 static int page_frag_push_thread(void *arg) 78 { 79 struct ptr_ring *ring = arg; 80 81 pr_info("page_frag push test thread begins on cpu %d\n", 82 smp_processor_id()); 83 84 while (test_pushed < nr_test && !force_exit) { 85 void *va; 86 int ret; 87 88 if (test_align) { 89 va = page_frag_alloc_align(&test_nc, test_alloc_len, 90 GFP_KERNEL, SMP_CACHE_BYTES); 91 92 if ((unsigned long)va & (SMP_CACHE_BYTES - 1)) { 93 force_exit = true; 94 WARN_ONCE(true, TEST_FAILED_PREFIX "unaligned va returned\n"); 95 } 96 } else { 97 va = page_frag_alloc(&test_nc, test_alloc_len, GFP_KERNEL); 98 } 99 100 if (!va) 101 continue; 102 103 ret = __ptr_ring_produce(ring, va); 104 if (ret) { 105 page_frag_free(va); 106 cond_resched(); 107 } else { 108 test_pushed++; 109 } 110 } 111 112 pr_info("page_frag push test thread exits on cpu %d\n", 113 smp_processor_id()); 114 115 if (atomic_dec_and_test(&nthreads)) 116 complete(&wait); 117 118 return 0; 119 } 120 121 static int __init page_frag_test_init(void) 122 { 123 struct task_struct *tsk_push, *tsk_pop; 124 int last_pushed = 0, last_popped = 0; 125 ktime_t start; 126 u64 duration; 127 int ret; 128 129 page_frag_cache_init(&test_nc); 130 atomic_set(&nthreads, 2); 131 init_completion(&wait); 132 133 if (test_alloc_len > PAGE_SIZE || test_alloc_len <= 0 || 134 !cpu_active(test_push_cpu) || !cpu_active(test_pop_cpu)) 135 return -EINVAL; 136 137 ret = ptr_ring_init(&ptr_ring, nr_objs, GFP_KERNEL); 138 if (ret) 139 return ret; 140 141 tsk_push = kthread_create_on_cpu(page_frag_push_thread, &ptr_ring, 142 test_push_cpu, "page_frag_push"); 143 if (IS_ERR(tsk_push)) 144 return PTR_ERR(tsk_push); 145 146 tsk_pop = kthread_create_on_cpu(page_frag_pop_thread, &ptr_ring, 147 test_pop_cpu, "page_frag_pop"); 148 if (IS_ERR(tsk_pop)) { 149 kthread_stop(tsk_push); 150 return PTR_ERR(tsk_pop); 151 } 152 153 start = ktime_get(); 154 wake_up_process(tsk_push); 155 wake_up_process(tsk_pop); 156 157 pr_info("waiting for test to complete\n"); 158 159 while (!wait_for_completion_timeout(&wait, msecs_to_jiffies(10000))) { 160 /* exit if there is no progress for push or pop size */ 161 if (last_pushed == test_pushed || last_popped == test_popped) { 162 WARN_ONCE(true, TEST_FAILED_PREFIX "no progress\n"); 163 force_exit = true; 164 continue; 165 } 166 167 last_pushed = test_pushed; 168 last_popped = test_popped; 169 pr_info("page_frag_test progress: pushed = %d, popped = %d\n", 170 test_pushed, test_popped); 171 } 172 173 if (force_exit) { 174 pr_err(TEST_FAILED_PREFIX "exit with error\n"); 175 goto out; 176 } 177 178 duration = (u64)ktime_us_delta(ktime_get(), start); 179 pr_info("%d of iterations for %s testing took: %lluus\n", nr_test, 180 test_align ? "aligned" : "non-aligned", duration); 181 182 out: 183 ptr_ring_cleanup(&ptr_ring, NULL); 184 page_frag_cache_drain(&test_nc); 185 186 return -EAGAIN; 187 } 188 189 static void __exit page_frag_test_exit(void) 190 { 191 } 192 193 module_init(page_frag_test_init); 194 module_exit(page_frag_test_exit); 195 196 MODULE_LICENSE("GPL"); 197 MODULE_AUTHOR("Yunsheng Lin <linyunsheng@huawei.com>"); 198 MODULE_DESCRIPTION("Test module for page_frag"); 199