Lines Matching defs:cands
7571 } cands[];
7614 if (btf_is_module(cc->cands[0].btf)) {
7620 kern_type_id = cc->cands[0].id;
8938 static void bpf_free_cands(struct bpf_cand_cache *cands)
8940 if (!cands->cnt)
8943 kfree(cands);
8946 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
8948 kfree(cands->name);
8949 kfree(cands);
8971 bpf_log(log, "%d", cc->cands[j].id);
8990 static u32 hash_cands(struct bpf_cand_cache *cands)
8992 return jhash(cands->name, cands->name_len, 0);
8995 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
8999 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
9001 if (cc && cc->name_len == cands->name_len &&
9002 !strncmp(cc->name, cands->name, cands->name_len))
9009 return offsetof(struct bpf_cand_cache, cands[cnt]);
9012 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
9016 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
9022 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
9024 bpf_free_cands(cands);
9028 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
9030 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
9031 bpf_free_cands(cands);
9054 * that matches cached cands.
9064 if (cc->cands[j].btf == btf) {
9082 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
9094 if (btf_kind(t) != cands->kind)
9106 if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9110 if (targ_essent_len != cands->name_len)
9114 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
9116 bpf_free_cands(cands);
9120 memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9121 bpf_free_cands(cands);
9122 cands = new_cands;
9123 cands->cands[cands->cnt].btf = targ_btf;
9124 cands->cands[cands->cnt].id = i;
9125 cands->cnt++;
9127 return cands;
9133 struct bpf_cand_cache *cands, *cc, local_cand = {};
9157 cands = &local_cand;
9158 cands->name = name;
9159 cands->kind = btf_kind(local_type);
9160 cands->name_len = local_essent_len;
9162 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9163 /* cands is a pointer to stack here */
9171 cands = bpf_core_add_cands(cands, main_btf, 1);
9172 if (IS_ERR(cands))
9173 return ERR_CAST(cands);
9175 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9177 /* populate cache even when cands->cnt == 0 */
9178 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9187 /* cands is a pointer to stack here and cands->cnt == 0 */
9188 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9203 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
9205 if (IS_ERR(cands))
9206 return ERR_CAST(cands);
9210 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
9211 * or pointer to stack if cands->cnd == 0.
9212 * Copy it into the cache even when cands->cnt == 0 and
9215 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9222 struct bpf_core_cand_list cands = {};
9256 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
9257 if (!cands.cands) {
9265 btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9266 cands.cands[i].btf = cc->cands[i].btf;
9267 cands.cands[i].id = cc->cands[i].id;
9269 cands.len = cc->cnt;
9277 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9288 kfree(cands.cands);