1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2023 Meta, Inc */ 3 #include <linux/bpf.h> 4 #include <linux/bpf_mem_alloc.h> 5 #include <linux/btf.h> 6 #include <linux/btf_ids.h> 7 #include <linux/cpumask.h> 8 9 /** 10 * struct bpf_cpumask - refcounted BPF cpumask wrapper structure 11 * @cpumask: The actual cpumask embedded in the struct. 12 * @usage: Object reference counter. When the refcount goes to 0, the 13 * memory is released back to the BPF allocator, which provides 14 * RCU safety. 15 * 16 * Note that we explicitly embed a cpumask_t rather than a cpumask_var_t. This 17 * is done to avoid confusing the verifier due to the typedef of cpumask_var_t 18 * changing depending on whether CONFIG_CPUMASK_OFFSTACK is defined or not. See 19 * the details in <linux/cpumask.h>. The consequence is that this structure is 20 * likely a bit larger than it needs to be when CONFIG_CPUMASK_OFFSTACK is 21 * defined due to embedding the whole NR_CPUS-size bitmap, but the extra memory 22 * overhead is minimal. For the more typical case of CONFIG_CPUMASK_OFFSTACK 23 * not being defined, the structure is the same size regardless. 24 */ 25 struct bpf_cpumask { 26 cpumask_t cpumask; 27 refcount_t usage; 28 }; 29 30 static struct bpf_mem_alloc bpf_cpumask_ma; 31 32 static bool cpu_valid(u32 cpu) 33 { 34 return cpu < nr_cpu_ids; 35 } 36 37 __diag_push(); 38 __diag_ignore_all("-Wmissing-prototypes", 39 "Global kfuncs as their definitions will be in BTF"); 40 41 /** 42 * bpf_cpumask_create() - Create a mutable BPF cpumask. 43 * 44 * Allocates a cpumask that can be queried, mutated, acquired, and released by 45 * a BPF program. The cpumask returned by this function must either be embedded 46 * in a map as a kptr, or freed with bpf_cpumask_release(). 47 * 48 * bpf_cpumask_create() allocates memory using the BPF memory allocator, and 49 * will not block. It may return NULL if no memory is available. 50 */ 51 __bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void) 52 { 53 struct bpf_cpumask *cpumask; 54 55 /* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */ 56 BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0); 57 58 cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask)); 59 if (!cpumask) 60 return NULL; 61 62 memset(cpumask, 0, sizeof(*cpumask)); 63 refcount_set(&cpumask->usage, 1); 64 65 return cpumask; 66 } 67 68 /** 69 * bpf_cpumask_acquire() - Acquire a reference to a BPF cpumask. 70 * @cpumask: The BPF cpumask being acquired. The cpumask must be a trusted 71 * pointer. 72 * 73 * Acquires a reference to a BPF cpumask. The cpumask returned by this function 74 * must either be embedded in a map as a kptr, or freed with 75 * bpf_cpumask_release(). 76 */ 77 __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) 78 { 79 refcount_inc(&cpumask->usage); 80 return cpumask; 81 } 82 83 /** 84 * bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask 85 * stored in a map. 86 * @cpumaskp: A pointer to a BPF cpumask map value. 87 * 88 * Attempts to acquire a reference to a BPF cpumask stored in a map value. The 89 * cpumask returned by this function must either be embedded in a map as a 90 * kptr, or freed with bpf_cpumask_release(). This function may return NULL if 91 * no BPF cpumask was found in the specified map value. 92 */ 93 __bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) 94 { 95 struct bpf_cpumask *cpumask; 96 97 /* The BPF memory allocator frees memory backing its caches in an RCU 98 * callback. Thus, we can safely use RCU to ensure that the cpumask is 99 * safe to read. 100 */ 101 rcu_read_lock(); 102 103 cpumask = READ_ONCE(*cpumaskp); 104 if (cpumask && !refcount_inc_not_zero(&cpumask->usage)) 105 cpumask = NULL; 106 107 rcu_read_unlock(); 108 return cpumask; 109 } 110 111 /** 112 * bpf_cpumask_release() - Release a previously acquired BPF cpumask. 113 * @cpumask: The cpumask being released. 114 * 115 * Releases a previously acquired reference to a BPF cpumask. When the final 116 * reference of the BPF cpumask has been released, it is subsequently freed in 117 * an RCU callback in the BPF memory allocator. 118 */ 119 __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) 120 { 121 if (!cpumask) 122 return; 123 124 if (refcount_dec_and_test(&cpumask->usage)) { 125 migrate_disable(); 126 bpf_mem_free(&bpf_cpumask_ma, cpumask); 127 migrate_enable(); 128 } 129 } 130 131 /** 132 * bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask. 133 * @cpumask: The cpumask being queried. 134 * 135 * Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask 136 * pointer may be safely passed to this function. 137 */ 138 __bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask) 139 { 140 return cpumask_first(cpumask); 141 } 142 143 /** 144 * bpf_cpumask_first_zero() - Get the index of the first unset bit in the 145 * cpumask. 146 * @cpumask: The cpumask being queried. 147 * 148 * Find the index of the first unset bit of the cpumask. A struct bpf_cpumask 149 * pointer may be safely passed to this function. 150 */ 151 __bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) 152 { 153 return cpumask_first_zero(cpumask); 154 } 155 156 /** 157 * bpf_cpumask_set_cpu() - Set a bit for a CPU in a BPF cpumask. 158 * @cpu: The CPU to be set in the cpumask. 159 * @cpumask: The BPF cpumask in which a bit is being set. 160 */ 161 __bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) 162 { 163 if (!cpu_valid(cpu)) 164 return; 165 166 cpumask_set_cpu(cpu, (struct cpumask *)cpumask); 167 } 168 169 /** 170 * bpf_cpumask_clear_cpu() - Clear a bit for a CPU in a BPF cpumask. 171 * @cpu: The CPU to be cleared from the cpumask. 172 * @cpumask: The BPF cpumask in which a bit is being cleared. 173 */ 174 __bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) 175 { 176 if (!cpu_valid(cpu)) 177 return; 178 179 cpumask_clear_cpu(cpu, (struct cpumask *)cpumask); 180 } 181 182 /** 183 * bpf_cpumask_test_cpu() - Test whether a CPU is set in a cpumask. 184 * @cpu: The CPU being queried for. 185 * @cpumask: The cpumask being queried for containing a CPU. 186 * 187 * Return: 188 * * true - @cpu is set in the cpumask 189 * * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu. 190 */ 191 __bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) 192 { 193 if (!cpu_valid(cpu)) 194 return false; 195 196 return cpumask_test_cpu(cpu, (struct cpumask *)cpumask); 197 } 198 199 /** 200 * bpf_cpumask_test_and_set_cpu() - Atomically test and set a CPU in a BPF cpumask. 201 * @cpu: The CPU being set and queried for. 202 * @cpumask: The BPF cpumask being set and queried for containing a CPU. 203 * 204 * Return: 205 * * true - @cpu is set in the cpumask 206 * * false - @cpu was not set in the cpumask, or @cpu is invalid. 207 */ 208 __bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) 209 { 210 if (!cpu_valid(cpu)) 211 return false; 212 213 return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask); 214 } 215 216 /** 217 * bpf_cpumask_test_and_clear_cpu() - Atomically test and clear a CPU in a BPF 218 * cpumask. 219 * @cpu: The CPU being cleared and queried for. 220 * @cpumask: The BPF cpumask being cleared and queried for containing a CPU. 221 * 222 * Return: 223 * * true - @cpu is set in the cpumask 224 * * false - @cpu was not set in the cpumask, or @cpu is invalid. 225 */ 226 __bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) 227 { 228 if (!cpu_valid(cpu)) 229 return false; 230 231 return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask); 232 } 233 234 /** 235 * bpf_cpumask_setall() - Set all of the bits in a BPF cpumask. 236 * @cpumask: The BPF cpumask having all of its bits set. 237 */ 238 __bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask) 239 { 240 cpumask_setall((struct cpumask *)cpumask); 241 } 242 243 /** 244 * bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask. 245 * @cpumask: The BPF cpumask being cleared. 246 */ 247 __bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask) 248 { 249 cpumask_clear((struct cpumask *)cpumask); 250 } 251 252 /** 253 * bpf_cpumask_and() - AND two cpumasks and store the result. 254 * @dst: The BPF cpumask where the result is being stored. 255 * @src1: The first input. 256 * @src2: The second input. 257 * 258 * Return: 259 * * true - @dst has at least one bit set following the operation 260 * * false - @dst is empty following the operation 261 * 262 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 263 */ 264 __bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst, 265 const struct cpumask *src1, 266 const struct cpumask *src2) 267 { 268 return cpumask_and((struct cpumask *)dst, src1, src2); 269 } 270 271 /** 272 * bpf_cpumask_or() - OR two cpumasks and store the result. 273 * @dst: The BPF cpumask where the result is being stored. 274 * @src1: The first input. 275 * @src2: The second input. 276 * 277 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 278 */ 279 __bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst, 280 const struct cpumask *src1, 281 const struct cpumask *src2) 282 { 283 cpumask_or((struct cpumask *)dst, src1, src2); 284 } 285 286 /** 287 * bpf_cpumask_xor() - XOR two cpumasks and store the result. 288 * @dst: The BPF cpumask where the result is being stored. 289 * @src1: The first input. 290 * @src2: The second input. 291 * 292 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 293 */ 294 __bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst, 295 const struct cpumask *src1, 296 const struct cpumask *src2) 297 { 298 cpumask_xor((struct cpumask *)dst, src1, src2); 299 } 300 301 /** 302 * bpf_cpumask_equal() - Check two cpumasks for equality. 303 * @src1: The first input. 304 * @src2: The second input. 305 * 306 * Return: 307 * * true - @src1 and @src2 have the same bits set. 308 * * false - @src1 and @src2 differ in at least one bit. 309 * 310 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 311 */ 312 __bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) 313 { 314 return cpumask_equal(src1, src2); 315 } 316 317 /** 318 * bpf_cpumask_intersects() - Check two cpumasks for overlap. 319 * @src1: The first input. 320 * @src2: The second input. 321 * 322 * Return: 323 * * true - @src1 and @src2 have at least one of the same bits set. 324 * * false - @src1 and @src2 don't have any of the same bits set. 325 * 326 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 327 */ 328 __bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) 329 { 330 return cpumask_intersects(src1, src2); 331 } 332 333 /** 334 * bpf_cpumask_subset() - Check if a cpumask is a subset of another. 335 * @src1: The first cpumask being checked as a subset. 336 * @src2: The second cpumask being checked as a superset. 337 * 338 * Return: 339 * * true - All of the bits of @src1 are set in @src2. 340 * * false - At least one bit in @src1 is not set in @src2. 341 * 342 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 343 */ 344 __bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) 345 { 346 return cpumask_subset(src1, src2); 347 } 348 349 /** 350 * bpf_cpumask_empty() - Check if a cpumask is empty. 351 * @cpumask: The cpumask being checked. 352 * 353 * Return: 354 * * true - None of the bits in @cpumask are set. 355 * * false - At least one bit in @cpumask is set. 356 * 357 * A struct bpf_cpumask pointer may be safely passed to @cpumask. 358 */ 359 __bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask) 360 { 361 return cpumask_empty(cpumask); 362 } 363 364 /** 365 * bpf_cpumask_full() - Check if a cpumask has all bits set. 366 * @cpumask: The cpumask being checked. 367 * 368 * Return: 369 * * true - All of the bits in @cpumask are set. 370 * * false - At least one bit in @cpumask is cleared. 371 * 372 * A struct bpf_cpumask pointer may be safely passed to @cpumask. 373 */ 374 __bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask) 375 { 376 return cpumask_full(cpumask); 377 } 378 379 /** 380 * bpf_cpumask_copy() - Copy the contents of a cpumask into a BPF cpumask. 381 * @dst: The BPF cpumask being copied into. 382 * @src: The cpumask being copied. 383 * 384 * A struct bpf_cpumask pointer may be safely passed to @src. 385 */ 386 __bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) 387 { 388 cpumask_copy((struct cpumask *)dst, src); 389 } 390 391 /** 392 * bpf_cpumask_any() - Return a random set CPU from a cpumask. 393 * @cpumask: The cpumask being queried. 394 * 395 * Return: 396 * * A random set bit within [0, num_cpus) if at least one bit is set. 397 * * >= num_cpus if no bit is set. 398 * 399 * A struct bpf_cpumask pointer may be safely passed to @src. 400 */ 401 __bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask) 402 { 403 return cpumask_any(cpumask); 404 } 405 406 /** 407 * bpf_cpumask_any_and() - Return a random set CPU from the AND of two 408 * cpumasks. 409 * @src1: The first cpumask. 410 * @src2: The second cpumask. 411 * 412 * Return: 413 * * A random set bit within [0, num_cpus) if at least one bit is set. 414 * * >= num_cpus if no bit is set. 415 * 416 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. 417 */ 418 __bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) 419 { 420 return cpumask_any_and(src1, src2); 421 } 422 423 __diag_pop(); 424 425 BTF_SET8_START(cpumask_kfunc_btf_ids) 426 BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) 427 BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) 428 BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) 429 BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) 430 BTF_ID_FLAGS(func, bpf_cpumask_first, KF_TRUSTED_ARGS) 431 BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_TRUSTED_ARGS) 432 BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_TRUSTED_ARGS) 433 BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_TRUSTED_ARGS) 434 BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_TRUSTED_ARGS) 435 BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_TRUSTED_ARGS) 436 BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_TRUSTED_ARGS) 437 BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_TRUSTED_ARGS) 438 BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_TRUSTED_ARGS) 439 BTF_ID_FLAGS(func, bpf_cpumask_and, KF_TRUSTED_ARGS) 440 BTF_ID_FLAGS(func, bpf_cpumask_or, KF_TRUSTED_ARGS) 441 BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_TRUSTED_ARGS) 442 BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_TRUSTED_ARGS) 443 BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_TRUSTED_ARGS) 444 BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_TRUSTED_ARGS) 445 BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS) 446 BTF_ID_FLAGS(func, bpf_cpumask_full, KF_TRUSTED_ARGS) 447 BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_TRUSTED_ARGS) 448 BTF_ID_FLAGS(func, bpf_cpumask_any, KF_TRUSTED_ARGS) 449 BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_TRUSTED_ARGS) 450 BTF_SET8_END(cpumask_kfunc_btf_ids) 451 452 static const struct btf_kfunc_id_set cpumask_kfunc_set = { 453 .owner = THIS_MODULE, 454 .set = &cpumask_kfunc_btf_ids, 455 }; 456 457 BTF_ID_LIST(cpumask_dtor_ids) 458 BTF_ID(struct, bpf_cpumask) 459 BTF_ID(func, bpf_cpumask_release) 460 461 static int __init cpumask_kfunc_init(void) 462 { 463 int ret; 464 const struct btf_id_dtor_kfunc cpumask_dtors[] = { 465 { 466 .btf_id = cpumask_dtor_ids[0], 467 .kfunc_btf_id = cpumask_dtor_ids[1] 468 }, 469 }; 470 471 ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false); 472 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set); 473 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set); 474 return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors, 475 ARRAY_SIZE(cpumask_dtors), 476 THIS_MODULE); 477 } 478 479 late_initcall(cpumask_kfunc_init); 480