1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/bpf_verifier.h> 5 #include <linux/hashtable.h> 6 #include <linux/jhash.h> 7 #include <linux/slab.h> 8 9 /* 10 * This file implements live stack slots analysis. After accumulating 11 * stack usage data, the analysis answers queries about whether a 12 * particular stack slot may be read by an instruction or any of it's 13 * successors. This data is consumed by the verifier states caching 14 * mechanism to decide which stack slots are important when looking for a 15 * visited state corresponding to the current state. 16 * 17 * The analysis is call chain sensitive, meaning that data is collected 18 * and queried for tuples (call chain, subprogram instruction index). 19 * Such sensitivity allows identifying if some subprogram call always 20 * leads to writes in the caller's stack. 21 * 22 * The basic idea is as follows: 23 * - As the verifier accumulates a set of visited states, the analysis instance 24 * accumulates a conservative estimate of stack slots that can be read 25 * or must be written for each visited tuple (call chain, instruction index). 26 * - If several states happen to visit the same instruction with the same 27 * call chain, stack usage information for the corresponding tuple is joined: 28 * - "may_read" set represents a union of all possibly read slots 29 * (any slot in "may_read" set might be read at or after the instruction); 30 * - "must_write" set represents an intersection of all possibly written slots 31 * (any slot in "must_write" set is guaranteed to be written by the instruction). 32 * - The analysis is split into two phases: 33 * - read and write marks accumulation; 34 * - read and write marks propagation. 35 * - The propagation phase is a textbook live variable data flow analysis: 36 * 37 * state[cc, i].live_after = U [state[cc, s].live_before for s in insn_successors(i)] 38 * state[cc, i].live_before = 39 * (state[cc, i].live_after / state[cc, i].must_write) U state[i].may_read 40 * 41 * Where: 42 * - `U` stands for set union 43 * - `/` stands for set difference; 44 * - `cc` stands for a call chain; 45 * - `i` and `s` are instruction indexes; 46 * 47 * The above equations are computed for each call chain and instruction 48 * index until state stops changing. 49 * - Additionally, in order to transfer "must_write" information from a 50 * subprogram to call instructions invoking this subprogram, 51 * the "must_write_acc" set is tracked for each (cc, i) tuple. 52 * A set of stack slots that are guaranteed to be written by this 53 * instruction or any of its successors (within the subprogram). 54 * The equation for "must_write_acc" propagation looks as follows: 55 * 56 * state[cc, i].must_write_acc = 57 * ∩ [state[cc, s].must_write_acc for s in insn_successors(i)] 58 * U state[cc, i].must_write 59 * 60 * (An intersection of all "must_write_acc" for instruction successors 61 * plus all "must_write" slots for the instruction itself). 62 * - After the propagation phase completes for a subprogram, information from 63 * (cc, 0) tuple (subprogram entry) is transferred to the caller's call chain: 64 * - "must_write_acc" set is intersected with the call site's "must_write" set; 65 * - "may_read" set is added to the call site's "may_read" set. 66 * - Any live stack queries must be taken after the propagation phase. 67 * - Accumulation and propagation phases can be entered multiple times, 68 * at any point in time: 69 * - "may_read" set only grows; 70 * - "must_write" set only shrinks; 71 * - for each visited verifier state with zero branches, all relevant 72 * read and write marks are already recorded by the analysis instance. 73 * 74 * Technically, the analysis is facilitated by the following data structures: 75 * - Call chain: for given verifier state, the call chain is a tuple of call 76 * instruction indexes leading to the current subprogram plus the subprogram 77 * entry point index. 78 * - Function instance: for a given call chain, for each instruction in 79 * the current subprogram, a mapping between instruction index and a 80 * set of "may_read", "must_write" and other marks accumulated for this 81 * instruction. 82 * - A hash table mapping call chains to function instances. 83 */ 84 85 struct callchain { 86 u32 callsites[MAX_CALL_FRAMES]; /* instruction pointer for each frame */ 87 /* cached subprog_info[*].start for functions owning the frames: 88 * - sp_starts[curframe] used to get insn relative index within current function; 89 * - sp_starts[0..current-1] used for fast callchain_frame_up(). 90 */ 91 u32 sp_starts[MAX_CALL_FRAMES]; 92 u32 curframe; /* depth of callsites and sp_starts arrays */ 93 }; 94 95 struct per_frame_masks { 96 u64 may_read; /* stack slots that may be read by this instruction */ 97 u64 must_write; /* stack slots written by this instruction */ 98 u64 must_write_acc; /* stack slots written by this instruction and its successors */ 99 u64 live_before; /* stack slots that may be read by this insn and its successors */ 100 }; 101 102 /* 103 * A function instance created for a specific callchain. 104 * Encapsulates read and write marks for each instruction in the function. 105 * Marks are tracked for each frame in the callchain. 106 */ 107 struct func_instance { 108 struct hlist_node hl_node; 109 struct callchain callchain; 110 u32 insn_cnt; /* cached number of insns in the function */ 111 bool updated; 112 bool must_write_dropped; 113 /* Per frame, per instruction masks, frames allocated lazily. */ 114 struct per_frame_masks *frames[MAX_CALL_FRAMES]; 115 /* For each instruction a flag telling if "must_write" had been initialized for it. */ 116 bool *must_write_set; 117 }; 118 119 struct live_stack_query { 120 struct func_instance *instances[MAX_CALL_FRAMES]; /* valid in range [0..curframe] */ 121 u32 curframe; 122 u32 insn_idx; 123 }; 124 125 struct bpf_liveness { 126 DECLARE_HASHTABLE(func_instances, 8); /* maps callchain to func_instance */ 127 struct live_stack_query live_stack_query; /* cache to avoid repetitive ht lookups */ 128 /* Cached instance corresponding to env->cur_state, avoids per-instruction ht lookup */ 129 struct func_instance *cur_instance; 130 /* 131 * Below fields are used to accumulate stack write marks for instruction at 132 * @write_insn_idx before submitting the marks to @cur_instance. 133 */ 134 u64 write_masks_acc[MAX_CALL_FRAMES]; 135 u32 write_insn_idx; 136 }; 137 138 /* Compute callchain corresponding to state @st at depth @frameno */ 139 static void compute_callchain(struct bpf_verifier_env *env, struct bpf_verifier_state *st, 140 struct callchain *callchain, u32 frameno) 141 { 142 struct bpf_subprog_info *subprog_info = env->subprog_info; 143 u32 i; 144 145 memset(callchain, 0, sizeof(*callchain)); 146 for (i = 0; i <= frameno; i++) { 147 callchain->sp_starts[i] = subprog_info[st->frame[i]->subprogno].start; 148 if (i < st->curframe) 149 callchain->callsites[i] = st->frame[i + 1]->callsite; 150 } 151 callchain->curframe = frameno; 152 callchain->callsites[callchain->curframe] = callchain->sp_starts[callchain->curframe]; 153 } 154 155 static u32 hash_callchain(struct callchain *callchain) 156 { 157 return jhash2(callchain->callsites, callchain->curframe, 0); 158 } 159 160 static bool same_callsites(struct callchain *a, struct callchain *b) 161 { 162 int i; 163 164 if (a->curframe != b->curframe) 165 return false; 166 for (i = a->curframe; i >= 0; i--) 167 if (a->callsites[i] != b->callsites[i]) 168 return false; 169 return true; 170 } 171 172 /* 173 * Find existing or allocate new function instance corresponding to @callchain. 174 * Instances are accumulated in env->liveness->func_instances and persist 175 * until the end of the verification process. 176 */ 177 static struct func_instance *__lookup_instance(struct bpf_verifier_env *env, 178 struct callchain *callchain) 179 { 180 struct bpf_liveness *liveness = env->liveness; 181 struct bpf_subprog_info *subprog; 182 struct func_instance *result; 183 u32 subprog_sz, size, key; 184 185 key = hash_callchain(callchain); 186 hash_for_each_possible(liveness->func_instances, result, hl_node, key) 187 if (same_callsites(&result->callchain, callchain)) 188 return result; 189 190 subprog = bpf_find_containing_subprog(env, callchain->sp_starts[callchain->curframe]); 191 subprog_sz = (subprog + 1)->start - subprog->start; 192 size = sizeof(struct func_instance); 193 result = kvzalloc(size, GFP_KERNEL_ACCOUNT); 194 if (!result) 195 return ERR_PTR(-ENOMEM); 196 result->must_write_set = kvcalloc(subprog_sz, sizeof(*result->must_write_set), 197 GFP_KERNEL_ACCOUNT); 198 if (!result->must_write_set) 199 return ERR_PTR(-ENOMEM); 200 memcpy(&result->callchain, callchain, sizeof(*callchain)); 201 result->insn_cnt = subprog_sz; 202 hash_add(liveness->func_instances, &result->hl_node, key); 203 return result; 204 } 205 206 static struct func_instance *lookup_instance(struct bpf_verifier_env *env, 207 struct bpf_verifier_state *st, 208 u32 frameno) 209 { 210 struct callchain callchain; 211 212 compute_callchain(env, st, &callchain, frameno); 213 return __lookup_instance(env, &callchain); 214 } 215 216 int bpf_stack_liveness_init(struct bpf_verifier_env *env) 217 { 218 env->liveness = kvzalloc(sizeof(*env->liveness), GFP_KERNEL_ACCOUNT); 219 if (!env->liveness) 220 return -ENOMEM; 221 hash_init(env->liveness->func_instances); 222 return 0; 223 } 224 225 void bpf_stack_liveness_free(struct bpf_verifier_env *env) 226 { 227 struct func_instance *instance; 228 struct hlist_node *tmp; 229 int bkt, i; 230 231 if (!env->liveness) 232 return; 233 hash_for_each_safe(env->liveness->func_instances, bkt, tmp, instance, hl_node) { 234 for (i = 0; i <= instance->callchain.curframe; i++) 235 kvfree(instance->frames[i]); 236 kvfree(instance->must_write_set); 237 kvfree(instance); 238 } 239 kvfree(env->liveness); 240 } 241 242 /* 243 * Convert absolute instruction index @insn_idx to an index relative 244 * to start of the function corresponding to @instance. 245 */ 246 static int relative_idx(struct func_instance *instance, u32 insn_idx) 247 { 248 return insn_idx - instance->callchain.sp_starts[instance->callchain.curframe]; 249 } 250 251 static struct per_frame_masks *get_frame_masks(struct func_instance *instance, 252 u32 frame, u32 insn_idx) 253 { 254 if (!instance->frames[frame]) 255 return NULL; 256 257 return &instance->frames[frame][relative_idx(instance, insn_idx)]; 258 } 259 260 static struct per_frame_masks *alloc_frame_masks(struct bpf_verifier_env *env, 261 struct func_instance *instance, 262 u32 frame, u32 insn_idx) 263 { 264 struct per_frame_masks *arr; 265 266 if (!instance->frames[frame]) { 267 arr = kvcalloc(instance->insn_cnt, sizeof(*arr), GFP_KERNEL_ACCOUNT); 268 instance->frames[frame] = arr; 269 if (!arr) 270 return ERR_PTR(-ENOMEM); 271 } 272 return get_frame_masks(instance, frame, insn_idx); 273 } 274 275 void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env) 276 { 277 env->liveness->cur_instance = NULL; 278 } 279 280 /* If @env->liveness->cur_instance is null, set it to instance corresponding to @env->cur_state. */ 281 static int ensure_cur_instance(struct bpf_verifier_env *env) 282 { 283 struct bpf_liveness *liveness = env->liveness; 284 struct func_instance *instance; 285 286 if (liveness->cur_instance) 287 return 0; 288 289 instance = lookup_instance(env, env->cur_state, env->cur_state->curframe); 290 if (IS_ERR(instance)) 291 return PTR_ERR(instance); 292 293 liveness->cur_instance = instance; 294 return 0; 295 } 296 297 /* Accumulate may_read masks for @frame at @insn_idx */ 298 static int mark_stack_read(struct bpf_verifier_env *env, 299 struct func_instance *instance, u32 frame, u32 insn_idx, u64 mask) 300 { 301 struct per_frame_masks *masks; 302 u64 new_may_read; 303 304 masks = alloc_frame_masks(env, instance, frame, insn_idx); 305 if (IS_ERR(masks)) 306 return PTR_ERR(masks); 307 new_may_read = masks->may_read | mask; 308 if (new_may_read != masks->may_read && 309 ((new_may_read | masks->live_before) != masks->live_before)) 310 instance->updated = true; 311 masks->may_read |= mask; 312 return 0; 313 } 314 315 int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frame, u32 insn_idx, u64 mask) 316 { 317 int err; 318 319 err = ensure_cur_instance(env); 320 err = err ?: mark_stack_read(env, env->liveness->cur_instance, frame, insn_idx, mask); 321 return err; 322 } 323 324 static void reset_stack_write_marks(struct bpf_verifier_env *env, 325 struct func_instance *instance, u32 insn_idx) 326 { 327 struct bpf_liveness *liveness = env->liveness; 328 int i; 329 330 liveness->write_insn_idx = insn_idx; 331 for (i = 0; i <= instance->callchain.curframe; i++) 332 liveness->write_masks_acc[i] = 0; 333 } 334 335 int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx) 336 { 337 struct bpf_liveness *liveness = env->liveness; 338 int err; 339 340 err = ensure_cur_instance(env); 341 if (err) 342 return err; 343 344 reset_stack_write_marks(env, liveness->cur_instance, insn_idx); 345 return 0; 346 } 347 348 void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frame, u64 mask) 349 { 350 env->liveness->write_masks_acc[frame] |= mask; 351 } 352 353 static int commit_stack_write_marks(struct bpf_verifier_env *env, 354 struct func_instance *instance) 355 { 356 struct bpf_liveness *liveness = env->liveness; 357 u32 idx, frame, curframe, old_must_write; 358 struct per_frame_masks *masks; 359 u64 mask; 360 361 if (!instance) 362 return 0; 363 364 curframe = instance->callchain.curframe; 365 idx = relative_idx(instance, liveness->write_insn_idx); 366 for (frame = 0; frame <= curframe; frame++) { 367 mask = liveness->write_masks_acc[frame]; 368 /* avoid allocating frames for zero masks */ 369 if (mask == 0 && !instance->must_write_set[idx]) 370 continue; 371 masks = alloc_frame_masks(env, instance, frame, liveness->write_insn_idx); 372 if (IS_ERR(masks)) 373 return PTR_ERR(masks); 374 old_must_write = masks->must_write; 375 /* 376 * If instruction at this callchain is seen for a first time, set must_write equal 377 * to @mask. Otherwise take intersection with the previous value. 378 */ 379 if (instance->must_write_set[idx]) 380 mask &= old_must_write; 381 if (old_must_write != mask) { 382 masks->must_write = mask; 383 instance->updated = true; 384 } 385 if (old_must_write & ~mask) 386 instance->must_write_dropped = true; 387 } 388 instance->must_write_set[idx] = true; 389 liveness->write_insn_idx = 0; 390 return 0; 391 } 392 393 /* 394 * Merge stack writes marks in @env->liveness->write_masks_acc 395 * with information already in @env->liveness->cur_instance. 396 */ 397 int bpf_commit_stack_write_marks(struct bpf_verifier_env *env) 398 { 399 return commit_stack_write_marks(env, env->liveness->cur_instance); 400 } 401 402 static char *fmt_callchain(struct bpf_verifier_env *env, struct callchain *callchain) 403 { 404 char *buf_end = env->tmp_str_buf + sizeof(env->tmp_str_buf); 405 char *buf = env->tmp_str_buf; 406 int i; 407 408 buf += snprintf(buf, buf_end - buf, "("); 409 for (i = 0; i <= callchain->curframe; i++) 410 buf += snprintf(buf, buf_end - buf, "%s%d", i ? "," : "", callchain->callsites[i]); 411 snprintf(buf, buf_end - buf, ")"); 412 return env->tmp_str_buf; 413 } 414 415 static void log_mask_change(struct bpf_verifier_env *env, struct callchain *callchain, 416 char *pfx, u32 frame, u32 insn_idx, u64 old, u64 new) 417 { 418 u64 changed_bits = old ^ new; 419 u64 new_ones = new & changed_bits; 420 u64 new_zeros = ~new & changed_bits; 421 422 if (!changed_bits) 423 return; 424 bpf_log(&env->log, "%s frame %d insn %d ", fmt_callchain(env, callchain), frame, insn_idx); 425 if (new_ones) { 426 bpf_fmt_stack_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_ones); 427 bpf_log(&env->log, "+%s %s ", pfx, env->tmp_str_buf); 428 } 429 if (new_zeros) { 430 bpf_fmt_stack_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_zeros); 431 bpf_log(&env->log, "-%s %s", pfx, env->tmp_str_buf); 432 } 433 bpf_log(&env->log, "\n"); 434 } 435 436 static struct func_instance *get_outer_instance(struct bpf_verifier_env *env, 437 struct func_instance *instance) 438 { 439 struct callchain callchain = instance->callchain; 440 441 /* Adjust @callchain to represent callchain one frame up */ 442 callchain.callsites[callchain.curframe] = 0; 443 callchain.sp_starts[callchain.curframe] = 0; 444 callchain.curframe--; 445 callchain.callsites[callchain.curframe] = callchain.sp_starts[callchain.curframe]; 446 return __lookup_instance(env, &callchain); 447 } 448 449 static u32 callchain_subprog_start(struct callchain *callchain) 450 { 451 return callchain->sp_starts[callchain->curframe]; 452 } 453 454 /* 455 * Transfer @may_read and @must_write_acc marks from the first instruction of @instance, 456 * to the call instruction in function instance calling @instance. 457 */ 458 static int propagate_to_outer_instance(struct bpf_verifier_env *env, 459 struct func_instance *instance) 460 { 461 struct callchain *callchain = &instance->callchain; 462 u32 this_subprog_start, callsite, frame; 463 struct func_instance *outer_instance; 464 struct per_frame_masks *insn; 465 int err; 466 467 this_subprog_start = callchain_subprog_start(callchain); 468 outer_instance = get_outer_instance(env, instance); 469 callsite = callchain->callsites[callchain->curframe - 1]; 470 471 reset_stack_write_marks(env, outer_instance, callsite); 472 for (frame = 0; frame < callchain->curframe; frame++) { 473 insn = get_frame_masks(instance, frame, this_subprog_start); 474 if (!insn) 475 continue; 476 bpf_mark_stack_write(env, frame, insn->must_write_acc); 477 err = mark_stack_read(env, outer_instance, frame, callsite, insn->live_before); 478 if (err) 479 return err; 480 } 481 commit_stack_write_marks(env, outer_instance); 482 return 0; 483 } 484 485 static inline bool update_insn(struct bpf_verifier_env *env, 486 struct func_instance *instance, u32 frame, u32 insn_idx) 487 { 488 struct bpf_insn_aux_data *aux = env->insn_aux_data; 489 u64 new_before, new_after, must_write_acc; 490 struct per_frame_masks *insn, *succ_insn; 491 u32 succ_num, s, succ[2]; 492 bool changed; 493 494 succ_num = bpf_insn_successors(env->prog, insn_idx, succ); 495 if (unlikely(succ_num == 0)) 496 return false; 497 498 changed = false; 499 insn = get_frame_masks(instance, frame, insn_idx); 500 new_before = 0; 501 new_after = 0; 502 /* 503 * New "must_write_acc" is an intersection of all "must_write_acc" 504 * of successors plus all "must_write" slots of instruction itself. 505 */ 506 must_write_acc = U64_MAX; 507 for (s = 0; s < succ_num; ++s) { 508 succ_insn = get_frame_masks(instance, frame, succ[s]); 509 new_after |= succ_insn->live_before; 510 must_write_acc &= succ_insn->must_write_acc; 511 } 512 must_write_acc |= insn->must_write; 513 /* 514 * New "live_before" is a union of all "live_before" of successors 515 * minus slots written by instruction plus slots read by instruction. 516 */ 517 new_before = (new_after & ~insn->must_write) | insn->may_read; 518 changed |= new_before != insn->live_before; 519 changed |= must_write_acc != insn->must_write_acc; 520 if (unlikely(env->log.level & BPF_LOG_LEVEL2) && 521 (insn->may_read || insn->must_write || 522 insn_idx == callchain_subprog_start(&instance->callchain) || 523 aux[insn_idx].prune_point)) { 524 log_mask_change(env, &instance->callchain, "live", 525 frame, insn_idx, insn->live_before, new_before); 526 log_mask_change(env, &instance->callchain, "written", 527 frame, insn_idx, insn->must_write_acc, must_write_acc); 528 } 529 insn->live_before = new_before; 530 insn->must_write_acc = must_write_acc; 531 return changed; 532 } 533 534 /* Fixed-point computation of @live_before and @must_write_acc marks */ 535 static int update_instance(struct bpf_verifier_env *env, struct func_instance *instance) 536 { 537 u32 i, frame, po_start, po_end, cnt, this_subprog_start; 538 struct callchain *callchain = &instance->callchain; 539 int *insn_postorder = env->cfg.insn_postorder; 540 struct bpf_subprog_info *subprog; 541 struct per_frame_masks *insn; 542 bool changed; 543 int err; 544 545 this_subprog_start = callchain_subprog_start(callchain); 546 /* 547 * If must_write marks were updated must_write_acc needs to be reset 548 * (to account for the case when new must_write sets became smaller). 549 */ 550 if (instance->must_write_dropped) { 551 for (frame = 0; frame <= callchain->curframe; frame++) { 552 if (!instance->frames[frame]) 553 continue; 554 555 for (i = 0; i < instance->insn_cnt; i++) { 556 insn = get_frame_masks(instance, frame, this_subprog_start + i); 557 insn->must_write_acc = 0; 558 } 559 } 560 } 561 562 subprog = bpf_find_containing_subprog(env, this_subprog_start); 563 po_start = subprog->postorder_start; 564 po_end = (subprog + 1)->postorder_start; 565 cnt = 0; 566 /* repeat until fixed point is reached */ 567 do { 568 cnt++; 569 changed = false; 570 for (frame = 0; frame <= instance->callchain.curframe; frame++) { 571 if (!instance->frames[frame]) 572 continue; 573 574 for (i = po_start; i < po_end; i++) 575 changed |= update_insn(env, instance, frame, insn_postorder[i]); 576 } 577 } while (changed); 578 579 if (env->log.level & BPF_LOG_LEVEL2) 580 bpf_log(&env->log, "%s live stack update done in %d iterations\n", 581 fmt_callchain(env, callchain), cnt); 582 583 /* transfer marks accumulated for outer frames to outer func instance (caller) */ 584 if (callchain->curframe > 0) { 585 err = propagate_to_outer_instance(env, instance); 586 if (err) 587 return err; 588 } 589 590 return 0; 591 } 592 593 /* 594 * Prepare all callchains within @env->cur_state for querying. 595 * This function should be called after each verifier.c:pop_stack() 596 * and whenever verifier.c:do_check_insn() processes subprogram exit. 597 * This would guarantee that visited verifier states with zero branches 598 * have their bpf_mark_stack_{read,write}() effects propagated in 599 * @env->liveness. 600 */ 601 int bpf_update_live_stack(struct bpf_verifier_env *env) 602 { 603 struct func_instance *instance; 604 int err, frame; 605 606 bpf_reset_live_stack_callchain(env); 607 for (frame = env->cur_state->curframe; frame >= 0; --frame) { 608 instance = lookup_instance(env, env->cur_state, frame); 609 if (IS_ERR(instance)) 610 return PTR_ERR(instance); 611 612 if (instance->updated) { 613 err = update_instance(env, instance); 614 if (err) 615 return err; 616 instance->updated = false; 617 instance->must_write_dropped = false; 618 } 619 } 620 return 0; 621 } 622 623 static bool is_live_before(struct func_instance *instance, u32 insn_idx, u32 frameno, u32 spi) 624 { 625 struct per_frame_masks *masks; 626 627 masks = get_frame_masks(instance, frameno, insn_idx); 628 return masks && (masks->live_before & BIT(spi)); 629 } 630 631 int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 632 { 633 struct live_stack_query *q = &env->liveness->live_stack_query; 634 struct func_instance *instance; 635 u32 frame; 636 637 memset(q, 0, sizeof(*q)); 638 for (frame = 0; frame <= st->curframe; frame++) { 639 instance = lookup_instance(env, st, frame); 640 if (IS_ERR(instance)) 641 return PTR_ERR(instance); 642 q->instances[frame] = instance; 643 } 644 q->curframe = st->curframe; 645 q->insn_idx = st->insn_idx; 646 return 0; 647 } 648 649 bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi) 650 { 651 /* 652 * Slot is alive if it is read before q->st->insn_idx in current func instance, 653 * or if for some outer func instance: 654 * - alive before callsite if callsite calls callback, otherwise 655 * - alive after callsite 656 */ 657 struct live_stack_query *q = &env->liveness->live_stack_query; 658 struct func_instance *instance, *curframe_instance; 659 u32 i, callsite; 660 bool alive; 661 662 curframe_instance = q->instances[q->curframe]; 663 if (is_live_before(curframe_instance, q->insn_idx, frameno, spi)) 664 return true; 665 666 for (i = frameno; i < q->curframe; i++) { 667 callsite = curframe_instance->callchain.callsites[i]; 668 instance = q->instances[i]; 669 alive = bpf_calls_callback(env, callsite) 670 ? is_live_before(instance, callsite, frameno, spi) 671 : is_live_before(instance, callsite + 1, frameno, spi); 672 if (alive) 673 return true; 674 } 675 676 return false; 677 } 678