Lines Matching +full:dont +full:- +full:validate

1 // SPDX-License-Identifier: GPL-2.0-only
15 * - lock inversion scenarios
16 * - circular lock dependencies
17 * - hardirq/softirq safe/unsafe locking bugs
124 if (current->lockdep_recursion) in lockdep_enabled()
135 * to use a raw spinlock - we really dont want the spinlock
178 * walking the graph we dont change it (while the other in graph_lock()
217 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
229 unsigned int class_idx = hlock->class_idx; in hlock_class()
231 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */ in hlock_class()
243 * At this point, if the passed hlock->class_idx is still garbage, in hlock_class()
275 if (time > lt->max) in lock_time_inc()
276 lt->max = time; in lock_time_inc()
278 if (time < lt->min || !lt->nr) in lock_time_inc()
279 lt->min = time; in lock_time_inc()
281 lt->total += time; in lock_time_inc()
282 lt->nr++; in lock_time_inc()
287 if (!src->nr) in lock_time_add()
290 if (src->max > dst->max) in lock_time_add()
291 dst->max = src->max; in lock_time_add()
293 if (src->min < dst->min || !dst->nr) in lock_time_add()
294 dst->min = src->min; in lock_time_add()
296 dst->total += src->total; in lock_time_add()
297 dst->nr += src->nr; in lock_time_add()
307 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; in lock_stats()
309 for (i = 0; i < ARRAY_SIZE(stats->contention_point); i++) in lock_stats()
310 stats->contention_point[i] += pcs->contention_point[i]; in lock_stats()
312 for (i = 0; i < ARRAY_SIZE(stats->contending_point); i++) in lock_stats()
313 stats->contending_point[i] += pcs->contending_point[i]; in lock_stats()
315 lock_time_add(&pcs->read_waittime, &stats->read_waittime); in lock_stats()
316 lock_time_add(&pcs->write_waittime, &stats->write_waittime); in lock_stats()
318 lock_time_add(&pcs->read_holdtime, &stats->read_holdtime); in lock_stats()
319 lock_time_add(&pcs->write_holdtime, &stats->write_holdtime); in lock_stats()
321 for (i = 0; i < ARRAY_SIZE(stats->bounces); i++) in lock_stats()
322 stats->bounces[i] += pcs->bounces[i]; in lock_stats()
332 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; in clear_lock_stats()
336 memset(class->contention_point, 0, sizeof(class->contention_point)); in clear_lock_stats()
337 memset(class->contending_point, 0, sizeof(class->contending_point)); in clear_lock_stats()
342 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes]; in get_lock_stats()
353 holdtime = lockstat_clock() - hlock->holdtime_stamp; in lock_release_holdtime()
356 if (hlock->read) in lock_release_holdtime()
357 lock_time_inc(&stats->read_holdtime, holdtime); in lock_release_holdtime()
359 lock_time_inc(&stats->write_holdtime, holdtime); in lock_release_holdtime()
377 * struct pending_free - information about data structures about to be freed
388 * struct delayed_free - data structures used for delayed freeing
406 * The lockdep classes are in a hash-table as well, for fast lookup:
408 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
416 * We put the lock dependency chains into a hash-table as well, to cache
419 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
433 return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS)); in hlock_id()
438 return hlock_id & (MAX_LOCKDEP_KEYS - 1); in chain_hlock_class_idx()
444 * It's a 64-bit hash, because it's important for the keys to be
458 task->lockdep_depth = 0; /* no locks held yet */ in lockdep_init_task()
459 task->curr_chain_key = INITIAL_CHAIN_KEY; in lockdep_init_task()
460 task->lockdep_recursion = 0; in lockdep_init_task()
502 if (class->name_version == 1 && in class_filter()
503 !strcmp(class->name, "lockname")) in class_filter()
505 if (class->name_version == 1 && in class_filter()
506 !strcmp(class->name, "&struct->lockfield")) in class_filter()
535 * struct lock_trace - single stack backtrace
550 * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
557 return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries && in traces_identical()
558 memcmp(t1->entries, t2->entries, in traces_identical()
559 t1->nr_entries * sizeof(t1->entries[0])) == 0; in traces_identical()
573 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - in save_trace()
587 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); in save_trace()
589 hash = jhash(trace->entries, trace->nr_entries * in save_trace()
590 sizeof(trace->entries[0]), 0); in save_trace()
591 trace->hash = hash; in save_trace()
592 hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1)); in save_trace()
597 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries; in save_trace()
598 hlist_add_head(&trace->hash_entry, hash_head); in save_trace()
651 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
652 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
653 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
654 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
664 [LOCK_USAGE_STATES] = "IN-NMI",
690 * - '+': irq is enabled and not in irq context in get_usage_char()
691 * - '-': in irq context and irq is disabled in get_usage_char()
692 * - '?': in irq context and irq is enabled in get_usage_char()
694 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) { in get_usage_char()
696 if (class->usage_mask & lock_flag(bit)) in get_usage_char()
698 } else if (class->usage_mask & lock_flag(bit)) in get_usage_char()
699 c = '-'; in get_usage_char()
722 name = class->name; in __print_lock_name()
724 name = __get_key_name(class->key, str); in __print_lock_name()
728 if (class->name_version > 1) in __print_lock_name()
729 printk(KERN_CONT "#%d", class->name_version); in __print_lock_name()
730 if (class->subclass) in __print_lock_name()
731 printk(KERN_CONT "/%d", class->subclass); in __print_lock_name()
732 if (hlock && class->print_fn) in __print_lock_name()
733 class->print_fn(hlock->instance); in __print_lock_name()
745 printk(KERN_CONT "){%s}-{%d:%d}", usage, in print_lock_name()
746 class->wait_type_outer ?: class->wait_type_inner, in print_lock_name()
747 class->wait_type_inner); in print_lock_name()
755 name = lock->name; in print_lockdep_cache()
757 name = __get_key_name(lock->key->subkeys, str); in print_lockdep_cache()
781 printk(KERN_CONT "%px", hlock->instance); in print_lock()
783 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); in print_lock()
788 int i, depth = READ_ONCE(p->lockdep_depth); in lockdep_print_held_locks()
791 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p)); in lockdep_print_held_locks()
794 str_plural(depth), p->comm, task_pid_nr(p)); in lockdep_print_held_locks()
803 print_lock(p->held_locks + i); in lockdep_print_held_locks()
809 printk("%s %.*s %s\n", init_utsname()->release, in print_kernel_ident()
810 (int)strcspn(init_utsname()->version, " "), in print_kernel_ident()
811 init_utsname()->version, in print_kernel_ident()
850 * in-kernel percpu var? in static_obj()
864 * class->name_version generation counter. The caller must hold the graph
872 if (!new_class->name) in count_matching_names()
876 if (new_class->key - new_class->subclass == class->key) in count_matching_names()
877 return class->name_version; in count_matching_names()
878 if (class->name && !strcmp(class->name, new_class->name)) in count_matching_names()
879 count = max(count, class->name_version); in count_matching_names()
885 /* used from NMI context -- must be lockless */
911 if (unlikely(!lock->key)) in look_up_lock_class()
915 * NOTE: the class-key must be unique. For dynamic locks, a static in look_up_lock_class()
917 * (or spin_lock_init()) call - which acts as the key. For static in look_up_lock_class()
923 key = lock->key->subkeys + subclass; in look_up_lock_class()
934 if (class->key == key) { in look_up_lock_class()
939 WARN_ONCE(class->name != lock->name && in look_up_lock_class()
940 lock->key != &__lockdep_no_validate__, in look_up_lock_class()
942 lock->name, lock->key, class->name); in look_up_lock_class()
951 * Static locks do not have their class-keys yet - for them the key is
971 lock->key = (void *)can_addr; in assign_lock_key()
973 lock->key = (void *)can_addr; in assign_lock_key()
975 lock->key = (void *)lock; in assign_lock_key()
977 /* Debug-check: all keys must be persistent! */ in assign_lock_key()
980 pr_err("INFO: trying to register non-static key.\n"); in assign_lock_key()
1018 if (in_list(e, &class->locks_after) || in in_any_class_list()
1019 in_list(e, &class->locks_before)) in in_any_class_list()
1030 if (e->links_to != c) { in class_lock_list_valid()
1032 c->name ? : "(?)", in class_lock_list_valid()
1033 (unsigned long)(e - list_entries), in class_lock_list_valid()
1034 e->links_to && e->links_to->name ? in class_lock_list_valid()
1035 e->links_to->name : "(?)", in class_lock_list_valid()
1036 e->class && e->class->name ? e->class->name : in class_lock_list_valid()
1054 for (i = chain->base; i < chain->base + chain->depth; i++) in check_lock_chain_key()
1060 if (chain->chain_key != chain_key) { in check_lock_chain_key()
1062 (unsigned long long)(chain - lock_chains), in check_lock_chain_key()
1063 (unsigned long long)chain->chain_key, in check_lock_chain_key()
1077 if (in_list(&class->lock_entry, &pf->zapped)) in in_any_zapped_class_list()
1095 if (!in_list(&class->lock_entry, &all_lock_classes) && in __check_data_structures()
1096 !in_list(&class->lock_entry, &free_lock_classes) && in __check_data_structures()
1099 class, class->name ? : "(?)"); in __check_data_structures()
1107 if (!class_lock_list_valid(class, &class->locks_before)) in __check_data_structures()
1109 if (!class_lock_list_valid(class, &class->locks_after)) in __check_data_structures()
1128 if (!in_any_class_list(&e->entry)) { in __check_data_structures()
1130 (unsigned int)(e - list_entries), in __check_data_structures()
1131 e->class->name ? : "(?)", in __check_data_structures()
1132 e->links_to->name ? : "(?)"); in __check_data_structures()
1143 if (in_any_class_list(&e->entry)) { in __check_data_structures()
1145 (unsigned int)(e - list_entries), in __check_data_structures()
1146 e->class && e->class->name ? e->class->name : in __check_data_structures()
1148 e->links_to && e->links_to->name ? in __check_data_structures()
1149 e->links_to->name : "(?)"); in __check_data_structures()
1238 hlist_add_head_rcu(&key->hash_entry, hash_head); in lockdep_register_key()
1260 * a use-after-free in that case by returning early. in is_dynamic_key()
1280 * Register a lock's class in the hash-table, if the class is not present
1298 if (!lock->key) { in register_lock_class()
1301 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { in register_lock_class()
1305 key = lock->key->subkeys + subclass; in register_lock_class()
1312 * We have to do the hash-walk again, to avoid races in register_lock_class()
1316 if (class->key == key) in register_lock_class()
1337 __set_bit(class - lock_classes, lock_classes_in_use); in register_lock_class()
1339 class->key = key; in register_lock_class()
1340 class->name = lock->name; in register_lock_class()
1341 class->subclass = subclass; in register_lock_class()
1342 WARN_ON_ONCE(!list_empty(&class->locks_before)); in register_lock_class()
1343 WARN_ON_ONCE(!list_empty(&class->locks_after)); in register_lock_class()
1344 class->name_version = count_matching_names(class); in register_lock_class()
1345 class->wait_type_inner = lock->wait_type_inner; in register_lock_class()
1346 class->wait_type_outer = lock->wait_type_outer; in register_lock_class()
1347 class->lock_type = lock->lock_type; in register_lock_class()
1349 * We use RCU's safe list-add method to make in register_lock_class()
1350 * parallel walking of the hash-list safe: in register_lock_class()
1352 hlist_add_head_rcu(&class->hash_entry, hash_head); in register_lock_class()
1357 list_move_tail(&class->lock_entry, &all_lock_classes); in register_lock_class()
1358 idx = class - lock_classes; in register_lock_class()
1366 printk("\nnew class %px: %s", class->key, class->name); in register_lock_class()
1367 if (class->name_version > 1) in register_lock_class()
1368 printk(KERN_CONT "#%d", class->name_version); in register_lock_class()
1382 lock->class_cache[0] = class; in register_lock_class()
1384 lock->class_cache[subclass] = class; in register_lock_class()
1388 * hash but the subclass -- which is hashed in -- didn't match. in register_lock_class()
1390 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) in register_lock_class()
1431 * Lock not present yet - get a new dependency struct and in add_lock_to_list()
1438 entry->class = this; in add_lock_to_list()
1439 entry->links_to = links_to; in add_lock_to_list()
1440 entry->dep = dep; in add_lock_to_list()
1441 entry->distance = distance; in add_lock_to_list()
1442 entry->trace = trace; in add_lock_to_list()
1445 * iteration is under RCU-sched; see look_up_lock_class() and in add_lock_to_list()
1448 list_add_tail_rcu(&entry->entry, head); in add_lock_to_list()
1457 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1461 * breadth-first search (BFS) algorithm, by which we can determine
1464 * indicates that adding the <prev> -> <next> lock dependency will
1465 * produce a circle in the graph. Breadth-first search instead of
1466 * depth-first search is used in order to find the shortest (circular)
1482 cq->front = cq->rear = 0; in __cq_init()
1488 return (cq->front == cq->rear); in __cq_empty()
1493 return ((cq->rear + 1) & CQ_MASK) == cq->front; in __cq_full()
1499 return -1; in __cq_enqueue()
1501 cq->element[cq->rear] = elem; in __cq_enqueue()
1502 cq->rear = (cq->rear + 1) & CQ_MASK; in __cq_enqueue()
1517 lock = cq->element[cq->front]; in __cq_dequeue()
1518 cq->front = (cq->front + 1) & CQ_MASK; in __cq_dequeue()
1525 return (cq->rear - cq->front) & CQ_MASK; in __cq_get_elem_count()
1530 lock->class->dep_gen_id = lockdep_dependency_gen_id; in mark_lock_accessed()
1536 lock->parent = parent; in visit_lock_entry()
1541 return lock->class->dep_gen_id == lockdep_dependency_gen_id; in lock_accessed()
1546 return child->parent; in get_lock_parent()
1570 void *lock_class = lock->class; in get_dep_list()
1591 BFS_EINVALIDNODE = -2,
1592 BFS_EQUEUEFULL = -1,
1608 * For dependency @prev -> @next:
1610 * SR: @prev is shared reader (->read != 0) and @next is recursive reader
1611 * (->read == 2)
1612 * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
1613 * SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
1614 * EN: @prev is exclusive locker and @next is non-recursive locker
1617 * bit0 is prev->read == 0
1618 * bit1 is next->read != 2
1633 return (prev->read == 0) + ((next->read != 2) << 1); in __calc_dep_bit()
1648 return (next->read != 2) + ((prev->read == 0) << 1); in __calc_dep_bitb()
1663 lock->class = class; in __bfs_init_root()
1664 lock->parent = NULL; in __bfs_init_root()
1665 lock->only_xr = 0; in __bfs_init_root()
1672 * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
1673 * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1674 * and -(S*)->.
1680 lock->only_xr = (hlock->read == 2); in bfs_init_root()
1686 * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
1687 * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
1688 * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
1694 lock->only_xr = (hlock->read != 0); in bfs_init_rootb()
1699 if (!lock || !lock->parent) in __bfs_next()
1702 return list_next_or_null_rcu(get_dep_list(lock->parent, offset), in __bfs_next()
1703 &lock->entry, struct lock_list, entry); in __bfs_next()
1707 * Breadth-First Search to find a strong path in the dependency graph.
1720 * dependencies as -(*R)-> -(S*)->, please see:
1722 * Documentation/locking/lockdep-design.rst
1729 * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
1730 * filter out any -(S*)-> in the current dependency and after that, the
1731 * ->only_xr is set according to whether we only have -(*R)-> left.
1753 if (!lock->class) in __bfs()
1761 * @lock->class->locks_{after,before}) we skip, otherwise go in __bfs()
1774 if (lock->parent) { /* Parent exists, check prev dependency */ in __bfs()
1775 u8 dep = lock->dep; in __bfs()
1776 bool prev_only_xr = lock->parent->only_xr; in __bfs()
1779 * Mask out all -(S*)-> if we only have *R in previous in __bfs()
1780 * step, because -(*R)-> -(S*)-> don't make up a strong in __bfs()
1790 /* If there are only -(*R)-> left, set that for the next step */ in __bfs()
1791 lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK)); in __bfs()
1868 stack_trace_print(trace->entries, trace->nr_entries, spaces); in print_lock_trace()
1880 printk("\n-> #%u", depth); in print_circular_bug_entry()
1881 print_lock_name(NULL, target->class); in print_circular_bug_entry()
1883 print_lock_trace(target->trace, 6); in print_circular_bug_entry()
1893 struct lock_class *parent = prt->class; in print_circular_lock_scenario()
1894 int src_read = src->read; in print_circular_lock_scenario()
1895 int tgt_read = tgt->read; in print_circular_lock_scenario()
1913 printk(KERN_CONT " --> "); in print_circular_lock_scenario()
1915 printk(KERN_CONT " --> "); in print_circular_lock_scenario()
1922 printk(" ---- ----\n"); in print_circular_lock_scenario()
1937 else if (src->sync) in print_circular_lock_scenario()
1964 pr_warn("------------------------------------------------------\n"); in print_circular_bug_header()
1966 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1979 * We are about to add B -> A into the dependency graph, and in __bfs() a
1980 * strong dependency path A -> .. -> B is found: hlock_class equals
1981 * entry->class.
1983 * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong
1988 * a) B -> A is -(E*)->
1992 * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B)
1994 * as then we don't have -(*R)-> -(S*)-> in the cycle.
2000 return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ in hlock_conflict()
2001 (hlock->read == 0 || /* B -> A is -(E*)-> */ in hlock_conflict()
2002 !entry->only_xr); /* A -> .. -> B is -(*N)-> */ in hlock_conflict()
2018 this->trace = save_trace(); in print_circular_bug()
2019 if (!this->trace) in print_circular_bug()
2032 print_circular_bug_entry(parent, --depth); in print_circular_bug()
2054 * Breadth-first-search failed, graph got corrupted? in print_bfs_bug()
2144 * <target> -> <src> dependency.
2172 if (src->class_idx == target->class_idx) in check_noncircular()
2186 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
2188 * A irq safe->unsafe deadlock happens with the following conditions:
2190 * 1) We have a strong dependency path A -> ... -> B
2193 * irq can create a new dependency B -> A (consider the case that a holder
2196 * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a
2200 * a) if A -> B is -(*N)->, then B -> A could be any type, so any
2202 * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only
2206 * c) if A -> B is -(E*)->, then B -> A could be any type, so any
2208 * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only
2213 * There is a strong dependency path in the dependency graph: A -> B, and now
2215 * safe->unsafe bugs.
2217 * Note that usage_accumulate() is used in backwards search, so ->only_xr
2218 * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
2220 * As above, if only_xr is false, which means A -> B has -(E*)-> dependency
2226 if (!entry->only_xr) in usage_accumulate()
2227 *(unsigned long *)mask |= entry->class->usage_mask; in usage_accumulate()
2229 *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ); in usage_accumulate()
2235 * There is a strong dependency path in the dependency graph: A -> B, and now
2237 * i.e. which usage bit of B may introduce safe->unsafe deadlocks.
2239 * As above, if only_xr is false, which means A -> B has -(*N)-> dependency
2245 if (!entry->only_xr) in usage_match()
2246 return !!(entry->class->usage_mask & *(unsigned long *)mask); in usage_match()
2248 return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask); in usage_match()
2253 if (entry->class->lock_type == LD_LOCK_NORMAL) in usage_skip()
2269 * where lock(B) cannot sleep, and we have a dependency B -> ... -> A. in usage_skip()
2272 * have the observation for any lock chain L1 -> ... -> Ln, for any in usage_skip()
2277 * way the local_lock() exists in the dependency B -> ... -> A. in usage_skip()
2282 if (entry->class->lock_type == LD_LOCK_PERCPU && in usage_skip()
2283 DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) in usage_skip()
2287 * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually in usage_skip()
2295 * Find a node in the forwards-direction dependency sub-graph starting
2296 * at @root->class that matches @bit.
2315 * Find a node in the backwards-direction dependency sub-graph starting
2316 * at @root->class that matches @bit.
2335 printk("%*s->", depth, ""); in print_lock_class_header()
2343 if (class->usage_mask & (1 << bit)) { in print_lock_class_header()
2348 print_lock_trace(class->usage_traces[bit], len); in print_lock_class_header()
2354 depth, "", class->key, class->key); in print_lock_class_header()
2360 * After BFS we get a lock dependency path (linked via ->parent of lock_list),
2366 * for a lock dependency A -> B, there are two lock_lists:
2368 * a) lock_list in the ->locks_after list of A, whose ->class is B and
2369 * ->links_to is A. In this case, we can say the lock_list is
2370 * "A -> B" (forwards case).
2372 * b) lock_list in the ->locks_before list of B, whose ->class is A
2373 * and ->links_to is B. In this case, we can say the lock_list is
2374 * "B <- A" (bacwards case).
2376 * The ->trace of both a) and b) point to the call trace where B was
2382 * ->class is A, as a result BFS will search all dependencies starting with
2383 * A, e.g. A -> B or A -> C.
2385 * The notation of a forwards helper lock_list is like "-> A", which means
2386 * we should search the forwards dependencies starting with "A", e.g A -> B
2387 * or A -> C.
2389 * The notation of a bacwards helper lock_list is like "<- B", which means
2391 * B <- A or B <- C.
2402 * ->parent ->parent
2403 * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
2404 * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
2406 * , so it's natural that we start from @leaf and print every ->class and
2407 * ->trace until we reach the @root.
2420 print_lock_class_header(entry->class, depth); in print_shortest_lock_dependencies()
2422 print_lock_trace(entry->trace, 2); in print_shortest_lock_dependencies()
2431 depth--; in print_shortest_lock_dependencies()
2443 * ->parent ->parent
2444 * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
2445 * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
2448 * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
2450 * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
2451 * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
2467 print_lock_class_header(entry->class, depth); in print_shortest_lock_dependencies_backwards()
2478 trace = entry->trace; in print_shortest_lock_dependencies_backwards()
2486 depth--; in print_shortest_lock_dependencies_backwards()
2496 struct lock_class *safe_class = safe_entry->class; in print_irq_lock_scenario()
2497 struct lock_class *unsafe_class = unsafe_entry->class; in print_irq_lock_scenario()
2519 printk(KERN_CONT " --> "); in print_irq_lock_scenario()
2521 printk(KERN_CONT " --> "); in print_irq_lock_scenario()
2528 printk(" ---- ----\n"); in print_irq_lock_scenario()
2565 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", in print_bad_irq_dependency()
2568 pr_warn("-----------------------------------------------------\n"); in print_bad_irq_dependency()
2570 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
2572 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
2574 curr->softirqs_enabled); in print_bad_irq_dependency()
2581 pr_cont(" ->"); in print_bad_irq_dependency()
2585 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", in print_bad_irq_dependency()
2587 print_lock_name(NULL, backwards_entry->class); in print_bad_irq_dependency()
2588 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); in print_bad_irq_dependency()
2590 print_lock_trace(backwards_entry->class->usage_traces[bit1], 1); in print_bad_irq_dependency()
2592 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); in print_bad_irq_dependency()
2593 print_lock_name(NULL, forwards_entry->class); in print_bad_irq_dependency()
2594 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); in print_bad_irq_dependency()
2597 print_lock_trace(forwards_entry->class->usage_traces[bit2], 1); in print_bad_irq_dependency()
2605 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); in print_bad_irq_dependency()
2609 pr_warn(" and %s-irq-unsafe lock:\n", irqclass); in print_bad_irq_dependency()
2610 next_root->trace = save_trace(); in print_bad_irq_dependency()
2611 if (!next_root->trace) in print_bad_irq_dependency()
2630 __stringify(__STATE)"-READ",
2648 * bit2-n: state
2663 * right shift of the mask transforms the individual bitnrs as -1 and
2704 * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*
2705 * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*
2706 * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ
2707 * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ
2771 return -1; in find_exclusive_match()
2775 * Prove that the new dependency does not connect a hardirq-safe(-read)
2776 * lock with a hardirq-unsafe lock - to achieve this we search
2777 * the backwards-subgraph starting at <prev>, and the
2778 * forwards-subgraph starting at <next>:
2830 * When trying to add A -> B to the graph, we find that there is a in check_irq_usage()
2831 * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M, in check_irq_usage()
2832 * that B -> ... -> M. However M is **softirq-safe**, if we use exact in check_irq_usage()
2834 * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not in check_irq_usage()
2837 backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL); in check_irq_usage()
2851 ret = find_exclusive_match(target_entry->class->usage_mask, in check_irq_usage()
2852 target_entry1->class->usage_mask, in check_irq_usage()
2854 if (DEBUG_LOCKS_WARN_ON(ret == -1)) in check_irq_usage()
2883 * We are about to add A -> B into the dependency graph, and in __bfs() a
2884 * strong dependency path A -> .. -> B is found: hlock_class equals
2885 * entry->class.
2887 * If A -> .. -> B can replace A -> B in any __bfs() search (means the former
2888 * is _stronger_ than or equal to the latter), we consider A -> B as redundant.
2889 * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
2890 * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
2891 * dependency graph, as any strong path ..-> A -> B ->.. we can get with
2892 * having dependency A -> B, we could already get a equivalent path ..-> A ->
2893 * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
2895 * We need to make sure both the start and the end of A -> .. -> B is not
2896 * weaker than A -> B. For the start part, please see the comment in
2901 * a) A -> B is -(*R)-> (everything is not weaker than that)
2905 * b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
2912 return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ in hlock_equal()
2913 (hlock->read == 2 || /* A -> B is -(*R)-> */ in hlock_equal()
2914 !entry->only_xr); /* A -> .. -> B is -(*N)-> */ in hlock_equal()
2919 * <target> or not. If it can, <src> -> <target> dependency is already
2937 * is equal to or stronger than <src> -> <target>. So if <src> is E, in check_redundant()
2938 * we need to let __bfs() only search for a path starting at a -(E*)->, in check_redundant()
2939 * we achieve this by setting the initial node's ->only_xr to true in in check_redundant()
2940 * that case. And if <prev> is S, we set initial ->only_xr to false in check_redundant()
2941 * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant. in check_redundant()
2943 src_entry.only_xr = src->read == 0; in check_redundant()
2949 * comment in usage_skip(), A -> local_lock() -> B and A -> B are not in check_redundant()
2983 nr_hardirq_chains--; in dec_chains()
2985 nr_softirq_chains--; in dec_chains()
2987 nr_process_chains--; in dec_chains()
2998 printk(" ----\n"); in print_deadlock_scenario()
3024 pr_warn("--------------------------------------------\n"); in print_deadlock_bug()
3026 curr->comm, task_pid_nr(curr)); in print_deadlock_bug()
3031 if (class->cmp_fn) { in print_deadlock_bug()
3033 class->cmp_fn(prev->instance, next->instance)); in print_deadlock_bug()
3064 for (i = 0; i < curr->lockdep_depth; i++) { in check_deadlock()
3065 prev = curr->held_locks + i; in check_deadlock()
3067 if (prev->instance == next->nest_lock) in check_deadlock()
3074 * Allow read-after-read recursion of the same in check_deadlock()
3077 if ((next->read == 2) && prev->read) in check_deadlock()
3082 if (class->cmp_fn && in check_deadlock()
3083 class->cmp_fn(prev->instance, next->instance) < 0) in check_deadlock()
3100 * There was a chain-cache miss, and we are about to add a new dependency
3101 * to a previous lock. We validate the following rules:
3103 * - would the adding of the <prev> -> <next> dependency create a
3106 * - does the new prev->next dependency connect any hardirq-safe lock
3107 * (in the full backwards-subgraph starting at <prev>) with any
3108 * hardirq-unsafe lock (in the full forwards-subgraph starting at
3111 * - does the new prev->next dependency connect any softirq-safe lock
3112 * (in the full backwards-subgraph starting at <prev>) with any
3113 * softirq-unsafe lock (in the full forwards-subgraph starting at
3129 if (!hlock_class(prev)->key || !hlock_class(next)->key) { in check_prev_add()
3131 * The warning statements below may trigger a use-after-free in check_prev_add()
3132 * of the class name. It is better to trigger a use-after free in check_prev_add()
3136 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key, in check_prev_add()
3137 "Detected use-after-free of lock class %px/%s\n", in check_prev_add()
3139 hlock_class(prev)->name); in check_prev_add()
3140 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key, in check_prev_add()
3141 "Detected use-after-free of lock class %px/%s\n", in check_prev_add()
3143 hlock_class(next)->name); in check_prev_add()
3147 if (prev->class_idx == next->class_idx) { in check_prev_add()
3150 if (class->cmp_fn && in check_prev_add()
3151 class->cmp_fn(prev->instance, next->instance) < 0) in check_prev_add()
3156 * Prove that the new <prev> -> <next> dependency would not in check_prev_add()
3158 * a breadth-first search into the graph starting at <next>, in check_prev_add()
3173 * Is the <prev> -> <next> dependency already present? in check_prev_add()
3176 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 in check_prev_add()
3177 * chains - the second one will be new, but L1 already has in check_prev_add()
3180 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { in check_prev_add()
3181 if (entry->class == hlock_class(next)) { in check_prev_add()
3183 entry->distance = 1; in check_prev_add()
3184 entry->dep |= calc_dep(prev, next); in check_prev_add()
3188 * ->locks_before list. in check_prev_add()
3202 list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) { in check_prev_add()
3203 if (entry->class == hlock_class(prev)) { in check_prev_add()
3205 entry->distance = 1; in check_prev_add()
3206 entry->dep |= calc_depb(prev, next); in check_prev_add()
3217 * Is the <prev> -> <next> link redundant? in check_prev_add()
3236 &hlock_class(prev)->locks_after, distance, in check_prev_add()
3243 &hlock_class(next)->locks_before, distance, in check_prev_add()
3252 * Add the dependency to all directly-previous locks that are 'relevant'.
3254 * all consecutive trylock entries and the final non-trylock entry - or
3255 * the end of this context's lock-chain - whichever comes first.
3261 int depth = curr->lockdep_depth; in check_prevs_add()
3267 * Depth must not be zero for a non-head lock: in check_prevs_add()
3275 if (curr->held_locks[depth].irq_context != in check_prevs_add()
3276 curr->held_locks[depth-1].irq_context) in check_prevs_add()
3280 u16 distance = curr->lockdep_depth - depth + 1; in check_prevs_add()
3281 hlock = curr->held_locks + depth - 1; in check_prevs_add()
3283 if (hlock->check) { in check_prevs_add()
3289 * Stop after the first non-trylock entry, in check_prevs_add()
3290 * as non-trylock entries have added their in check_prevs_add()
3294 if (!hlock->trylock) in check_prevs_add()
3298 depth--; in check_prevs_add()
3300 * End of lock-stack? in check_prevs_add()
3307 if (curr->held_locks[depth].irq_context != in check_prevs_add()
3308 curr->held_locks[depth-1].irq_context) in check_prevs_add()
3339 * Bit 15 - always set to 1 (it is not a class index)
3340 * Bits 0-14 - upper 15 bits of the next block index
3341 * entry[1] - lower 16 bits of next block index
3345 * On the unsized bucket (bucket-0), the 3rd and 4th entries contain
3348 * entry[2] - upper 16 bits of the chain block size
3349 * entry[3] - lower 16 bits of the chain block size
3362 return size - 1; in size_to_bucket()
3369 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3374 * next block or -1
3383 return -1; in chain_block_next()
3393 * bucket-0 only
3423 * the block before it is re-added. in add_chain_block()
3468 nr_free_chain_hlocks -= size; in del_chain_block()
3472 nr_large_chain_blocks--; in del_chain_block()
3480 chain_block_buckets[i] = -1; in init_chain_block_buckets()
3486 * Return offset of a chain block of the right size or -1 if not found.
3488 * Fairly simple worst-fit allocator with the addition of a number of size
3499 BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG); in alloc_chain_hlocks()
3504 return -1; in alloc_chain_hlocks()
3532 add_chain_block(curr + req, size - req); in alloc_chain_hlocks()
3540 for (size = MAX_CHAIN_BUCKETS; size > req; size--) { in alloc_chain_hlocks()
3547 add_chain_block(curr + req, size - req); in alloc_chain_hlocks()
3551 return -1; in alloc_chain_hlocks()
3561 u16 chain_hlock = chain_hlocks[chain->base + i]; in lock_chain_get_class()
3576 for (i = curr->lockdep_depth - 1; i >= 0; i--) { in get_first_held_lock()
3577 hlock_curr = curr->held_locks + i; in get_first_held_lock()
3578 if (hlock_curr->irq_context != hlock->irq_context) in get_first_held_lock()
3594 printk(" hlock_id:%d -> chain_key:%016Lx", in print_chain_key_iteration()
3605 int depth = curr->lockdep_depth; in print_chain_keys_held_locks()
3608 printk("depth: %u (irq_context %u)\n", depth - i + 1, in print_chain_keys_held_locks()
3609 hlock_next->irq_context); in print_chain_keys_held_locks()
3611 hlock = curr->held_locks + i; in print_chain_keys_held_locks()
3627 printk("depth: %u\n", chain->depth); in print_chain_keys_chain()
3628 for (i = 0; i < chain->depth; i++) { in print_chain_keys_chain()
3629 hlock_id = chain_hlocks[chain->base + i]; in print_chain_keys_chain()
3647 pr_warn("----------------------------\n"); in print_collision()
3648 pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); in print_collision()
3679 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { in check_no_collision()
3684 for (j = 0; j < chain->depth - 1; j++, i++) { in check_no_collision()
3685 id = hlock_id(&curr->held_locks[i]); in check_no_collision()
3687 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { in check_no_collision()
3697 * Given an index that is >= -1, return the index of the next lock chain.
3698 * Return -2 if there is no next lock chain.
3703 return i < ARRAY_SIZE(lock_chains) ? i : -2; in lockdep_next_lockchain()
3740 * disabled to make this an IRQ-safe lock.. for recursion reasons in add_chain_cache()
3757 chain->chain_key = chain_key; in add_chain_cache()
3758 chain->irq_context = hlock->irq_context; in add_chain_cache()
3760 chain->depth = curr->lockdep_depth + 1 - i; in add_chain_cache()
3763 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); in add_chain_cache()
3766 j = alloc_chain_hlocks(chain->depth); in add_chain_cache()
3778 chain->base = j; in add_chain_cache()
3779 for (j = 0; j < chain->depth - 1; j++, i++) { in add_chain_cache()
3780 int lock_id = hlock_id(curr->held_locks + i); in add_chain_cache()
3782 chain_hlocks[chain->base + j] = lock_id; in add_chain_cache()
3784 chain_hlocks[chain->base + j] = hlock_id(hlock); in add_chain_cache()
3785 hlist_add_head_rcu(&chain->entry, hash_head); in add_chain_cache()
3787 inc_chains(chain->irq_context); in add_chain_cache()
3802 if (READ_ONCE(chain->chain_key) == chain_key) { in lookup_chain_cache()
3812 * add it and return 1 - in this case the new dependency chain is
3832 class->key, class->name); in lookup_chain_cache_add()
3840 (unsigned long long)chain_key, class->key, class->name); in lookup_chain_cache_add()
3847 * We have to walk the chain again locked - to avoid duplicates: in lookup_chain_cache_add()
3875 if (!hlock->trylock && hlock->check && in validate_chain()
3880 * - is irq-safe, if this lock is irq-unsafe in validate_chain()
3881 * - is softirq-safe, if this lock is hardirq-unsafe in validate_chain()
3886 * - within the current held-lock stack in validate_chain()
3887 * - across our accumulated lock dependency records in validate_chain()
3933 * We are building curr_chain_key incrementally, so double-check
3943 for (i = 0; i < curr->lockdep_depth; i++) { in check_chain_key()
3944 hlock = curr->held_locks + i; in check_chain_key()
3945 if (chain_key != hlock->prev_chain_key) { in check_chain_key()
3952 curr->lockdep_depth, i, in check_chain_key()
3954 (unsigned long long)hlock->prev_chain_key); in check_chain_key()
3959 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is in check_chain_key()
3962 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use))) in check_chain_key()
3965 if (prev_hlock && (prev_hlock->irq_context != in check_chain_key()
3966 hlock->irq_context)) in check_chain_key()
3971 if (chain_key != curr->curr_chain_key) { in check_chain_key()
3978 curr->lockdep_depth, i, in check_chain_key()
3980 (unsigned long long)curr->curr_chain_key); in check_chain_key()
3995 printk(" ----\n"); in print_usage_bug_scenario()
4019 pr_warn("--------------------------------\n"); in print_usage_bug()
4021 pr_warn("inconsistent {%s} -> {%s} usage.\n", in print_usage_bug()
4025 curr->comm, task_pid_nr(curr), in print_usage_bug()
4033 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1); in print_usage_bug()
4054 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) { in valid_state()
4085 pr_warn("--------------------------------------------------------\n"); in print_irq_inversion_bug()
4087 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug()
4090 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); in print_irq_inversion_bug()
4092 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); in print_irq_inversion_bug()
4093 print_lock_name(NULL, other->class); in print_irq_inversion_bug()
4107 depth--; in print_irq_inversion_bug()
4111 middle ? middle->class : root->class, other->class); in print_irq_inversion_bug()
4114 middle ? middle->class : other->class, root->class); in print_irq_inversion_bug()
4119 root->trace = save_trace(); in print_irq_inversion_bug()
4120 if (!root->trace) in print_irq_inversion_bug()
4131 * Prove that in the forwards-direction subgraph starting at <this>
4154 if (target_entry->class->usage_mask & lock_flag(bit)) { in check_usage_forwards()
4166 * Prove that in the backwards-direction subgraph starting at <this>
4189 if (target_entry->class->usage_mask & lock_flag(bit)) { in check_usage_backwards()
4202 const struct irqtrace_events *trace = &curr->irqtrace; in print_irqtrace_events()
4206 printk("irq event stamp: %u\n", trace->irq_events); in print_irqtrace_events()
4208 trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip, in print_irqtrace_events()
4209 (void *)trace->hardirq_enable_ip); in print_irqtrace_events()
4211 trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip, in print_irqtrace_events()
4212 (void *)trace->hardirq_disable_ip); in print_irqtrace_events()
4214 trace->softirq_enable_event, (void *)trace->softirq_enable_ip, in print_irqtrace_events()
4215 (void *)trace->softirq_enable_ip); in print_irqtrace_events()
4217 trace->softirq_disable_event, (void *)trace->softirq_disable_ip, in print_irqtrace_events()
4218 (void *)trace->softirq_disable_ip); in print_irqtrace_events()
4264 * Validate that this particular lock does not have conflicting in mark_lock_irq()
4279 * Validate that the lock dependencies don't have conflicting usage in mark_lock_irq()
4284 * mark ENABLED has to look backwards -- to ensure no dependee in mark_lock_irq()
4291 * mark USED_IN has to look forwards -- to ensure no dependency in mark_lock_irq()
4313 for (i = 0; i < curr->lockdep_depth; i++) { in mark_held_locks()
4315 hlock = curr->held_locks + i; in mark_held_locks()
4317 if (hlock->read) in mark_held_locks()
4322 if (!hlock->check) in mark_held_locks()
4350 if (curr->softirqs_enabled) in __trace_hardirqs_on_caller()
4355 * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
4407 current->hardirq_chain_key = current->curr_chain_key; in lockdep_hardirqs_on_prepare()
4417 struct irqtrace_events *trace = &current->irqtrace; in lockdep_hardirqs_on()
4435 * - recursion check, because NMI can hit lockdep; in lockdep_hardirqs_on()
4436 * - hardware state check, because above; in lockdep_hardirqs_on()
4437 * - chain_key check, see lockdep_hardirqs_on_prepare(). in lockdep_hardirqs_on()
4467 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key != in lockdep_hardirqs_on()
4468 current->curr_chain_key); in lockdep_hardirqs_on()
4471 /* we'll do an OFF -> ON transition: */ in lockdep_hardirqs_on()
4473 trace->hardirq_enable_ip = ip; in lockdep_hardirqs_on()
4474 trace->hardirq_enable_event = ++trace->irq_events; in lockdep_hardirqs_on()
4506 struct irqtrace_events *trace = &current->irqtrace; in lockdep_hardirqs_off()
4509 * We have done an ON -> OFF transition: in lockdep_hardirqs_off()
4512 trace->hardirq_disable_ip = ip; in lockdep_hardirqs_off()
4513 trace->hardirq_disable_event = ++trace->irq_events; in lockdep_hardirqs_off()
4526 struct irqtrace_events *trace = &current->irqtrace; in lockdep_softirqs_on()
4538 if (current->softirqs_enabled) { in lockdep_softirqs_on()
4545 * We'll do an OFF -> ON transition: in lockdep_softirqs_on()
4547 current->softirqs_enabled = 1; in lockdep_softirqs_on()
4548 trace->softirq_enable_ip = ip; in lockdep_softirqs_on()
4549 trace->softirq_enable_event = ++trace->irq_events; in lockdep_softirqs_on()
4575 if (current->softirqs_enabled) { in lockdep_softirqs_off()
4576 struct irqtrace_events *trace = &current->irqtrace; in lockdep_softirqs_off()
4579 * We have done an ON -> OFF transition: in lockdep_softirqs_off()
4581 current->softirqs_enabled = 0; in lockdep_softirqs_off()
4582 trace->softirq_disable_ip = ip; in lockdep_softirqs_off()
4583 trace->softirq_disable_event = ++trace->irq_events; in lockdep_softirqs_off()
4594 * lockdep_cleanup_dead_cpu - Ensure CPU lockdep state is cleanly stopped
4624 * If non-trylock use in a hardirq or softirq context, then in mark_usage()
4627 if (!hlock->trylock) { in mark_usage()
4628 if (hlock->read) { in mark_usage()
4633 if (curr->softirq_context) in mark_usage()
4641 if (curr->softirq_context) in mark_usage()
4652 if (!hlock->hardirqs_off && !hlock->sync) { in mark_usage()
4653 if (hlock->read) { in mark_usage()
4657 if (curr->softirqs_enabled) in mark_usage()
4665 if (curr->softirqs_enabled) in mark_usage()
4683 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; in task_irq_context()
4689 unsigned int depth = curr->lockdep_depth; in separate_irq_context()
4697 prev_hlock = curr->held_locks + depth-1; in separate_irq_context()
4703 if (prev_hlock->irq_context != hlock->irq_context) in separate_irq_context()
4710 * Mark a lock with a usage bit, and validate the state transition:
4722 if (new_bit == LOCK_USED && this->read) in mark_lock()
4731 if (likely(hlock_class(this)->usage_mask & new_mask)) in mark_lock()
4739 if (unlikely(hlock_class(this)->usage_mask & new_mask)) in mark_lock()
4742 if (!hlock_class(this)->usage_mask) in mark_lock()
4745 hlock_class(this)->usage_mask |= new_mask; in mark_lock()
4748 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) in mark_lock()
4786 if (curr->hardirq_threaded || curr->irq_config) in task_wait_context()
4790 } else if (curr->softirq_context) { in task_wait_context()
4817 pr_warn("-----------------------------\n"); in print_lock_invalid_wait_context()
4819 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_invalid_wait_context()
4825 pr_warn("context-{%d:%d}\n", curr_inner, curr_inner); in print_lock_invalid_wait_context()
4840 * This check validates we take locks in the right wait-type order; that is it
4854 u8 next_inner = hlock_class(next)->wait_type_inner; in check_wait_context()
4855 u8 next_outer = hlock_class(next)->wait_type_outer; in check_wait_context()
4859 if (!next_inner || next->trylock) in check_wait_context()
4868 for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) { in check_wait_context()
4869 struct held_lock *prev = curr->held_locks + depth; in check_wait_context()
4870 if (prev->irq_context != next->irq_context) in check_wait_context()
4877 for (; depth < curr->lockdep_depth; depth++) { in check_wait_context()
4878 struct held_lock *prev = curr->held_locks + depth; in check_wait_context()
4880 u8 prev_inner = class->wait_type_inner; in check_wait_context()
4892 * Allow override for annotations -- this is typically in check_wait_context()
4896 if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) in check_wait_context()
4935 * Initialize a lock instance's lock-class mapping info:
4944 lock->class_cache[i] = NULL; in lockdep_init_map_type()
4947 lock->cpu = raw_smp_processor_id(); in lockdep_init_map_type()
4954 lock->name = "NULL"; in lockdep_init_map_type()
4958 lock->name = name; in lockdep_init_map_type()
4960 lock->wait_type_outer = outer; in lockdep_init_map_type()
4961 lock->wait_type_inner = inner; in lockdep_init_map_type()
4962 lock->lock_type = lock_type; in lockdep_init_map_type()
4970 * Sanity check, the lock-class key must either have been allocated in lockdep_init_map_type()
4979 lock->key = key; in lockdep_init_map_type()
5009 struct lock_class *class = lock->class_cache[0]; in lockdep_set_lock_cmp_fn()
5019 WARN_ON(class->cmp_fn && class->cmp_fn != cmp_fn); in lockdep_set_lock_cmp_fn()
5020 WARN_ON(class->print_fn && class->print_fn != print_fn); in lockdep_set_lock_cmp_fn()
5022 class->cmp_fn = cmp_fn; in lockdep_set_lock_cmp_fn()
5023 class->print_fn = print_fn; in lockdep_set_lock_cmp_fn()
5047 pr_warn("----------------------------------\n"); in print_lock_nested_lock_not_held()
5049 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_nested_lock_not_held()
5053 pr_warn("%s\n", hlock->nest_lock->name); in print_lock_nested_lock_not_held()
5071 * We maintain the dependency maps and validate the locking attempt:
5093 if (unlikely(lock->key == &__lockdep_no_track__)) in __lock_acquire()
5098 if (!prove_locking || lock->key == &__lockdep_no_validate__) { in __lock_acquire()
5107 class = lock->class_cache[subclass]; in __lock_acquire()
5121 printk("\nacquire class [%px] %s", class->key, class->name); in __lock_acquire()
5122 if (class->name_version > 1) in __lock_acquire()
5123 printk(KERN_CONT "#%d", class->name_version); in __lock_acquire()
5131 * (we dont increase the depth just yet, up until the in __lock_acquire()
5134 depth = curr->lockdep_depth; in __lock_acquire()
5136 * Ran out of static storage for our per-task lock stack again have we? in __lock_acquire()
5141 class_idx = class - lock_classes; in __lock_acquire()
5145 hlock = curr->held_locks + depth - 1; in __lock_acquire()
5146 if (hlock->class_idx == class_idx && nest_lock) { in __lock_acquire()
5150 if (!hlock->references) in __lock_acquire()
5151 hlock->references++; in __lock_acquire()
5153 hlock->references += references; in __lock_acquire()
5156 if (DEBUG_LOCKS_WARN_ON(hlock->references < references)) in __lock_acquire()
5163 hlock = curr->held_locks + depth; in __lock_acquire()
5170 hlock->class_idx = class_idx; in __lock_acquire()
5171 hlock->acquire_ip = ip; in __lock_acquire()
5172 hlock->instance = lock; in __lock_acquire()
5173 hlock->nest_lock = nest_lock; in __lock_acquire()
5174 hlock->irq_context = task_irq_context(curr); in __lock_acquire()
5175 hlock->trylock = trylock; in __lock_acquire()
5176 hlock->read = read; in __lock_acquire()
5177 hlock->check = check; in __lock_acquire()
5178 hlock->sync = !!sync; in __lock_acquire()
5179 hlock->hardirqs_off = !!hardirqs_off; in __lock_acquire()
5180 hlock->references = references; in __lock_acquire()
5182 hlock->waittime_stamp = 0; in __lock_acquire()
5183 hlock->holdtime_stamp = lockstat_clock(); in __lock_acquire()
5185 hlock->pin_count = pin_count; in __lock_acquire()
5202 * the hash, not class->key. in __lock_acquire()
5210 chain_key = curr->curr_chain_key; in __lock_acquire()
5220 hlock->prev_chain_key = chain_key; in __lock_acquire()
5227 if (nest_lock && !__lock_is_held(nest_lock, -1)) { in __lock_acquire()
5233 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); in __lock_acquire()
5234 WARN_ON_ONCE(!hlock_class(hlock)->key); in __lock_acquire()
5241 if (hlock->sync) in __lock_acquire()
5244 curr->curr_chain_key = chain_key; in __lock_acquire()
5245 curr->lockdep_depth++; in __lock_acquire()
5251 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { in __lock_acquire()
5256 curr->lockdep_depth, MAX_LOCK_DEPTH); in __lock_acquire()
5266 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) in __lock_acquire()
5267 max_lockdep_depth = curr->lockdep_depth; in __lock_acquire()
5287 pr_warn("-------------------------------------\n"); in print_unlock_imbalance_bug()
5289 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug()
5306 if (hlock->instance == lock) in match_held_lock()
5309 if (hlock->references) { in match_held_lock()
5310 const struct lock_class *class = lock->class_cache[0]; in match_held_lock()
5325 * References, but not a lock we're actually ref-counting? in match_held_lock()
5326 * State got messed up, follow the sites that change ->references in match_held_lock()
5329 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) in match_held_lock()
5332 if (hlock->class_idx == class - lock_classes) in match_held_lock()
5347 i = depth - 1; in find_held_lock()
5348 hlock = curr->held_locks + i; in find_held_lock()
5354 for (i--, prev_hlock = hlock--; in find_held_lock()
5356 i--, prev_hlock = hlock--) { in find_held_lock()
5360 if (prev_hlock->irq_context != hlock->irq_context) { in find_held_lock()
5384 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { in reacquire_held_locks()
5385 switch (__lock_acquire(hlock->instance, in reacquire_held_locks()
5386 hlock_class(hlock)->subclass, in reacquire_held_locks()
5387 hlock->trylock, in reacquire_held_locks()
5388 hlock->read, hlock->check, in reacquire_held_locks()
5389 hlock->hardirqs_off, in reacquire_held_locks()
5390 hlock->nest_lock, hlock->acquire_ip, in reacquire_held_locks()
5391 hlock->references, hlock->pin_count, 0)) { in reacquire_held_locks()
5421 depth = curr->lockdep_depth; in __lock_set_class()
5436 lock->wait_type_inner, in __lock_set_class()
5437 lock->wait_type_outer, in __lock_set_class()
5438 lock->lock_type); in __lock_set_class()
5440 hlock->class_idx = class - lock_classes; in __lock_set_class()
5442 curr->lockdep_depth = i; in __lock_set_class()
5443 curr->curr_chain_key = hlock->prev_chain_key; in __lock_set_class()
5452 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) in __lock_set_class()
5467 depth = curr->lockdep_depth; in __lock_downgrade()
5481 curr->lockdep_depth = i; in __lock_downgrade()
5482 curr->curr_chain_key = hlock->prev_chain_key; in __lock_downgrade()
5484 WARN(hlock->read, "downgrading a read lock"); in __lock_downgrade()
5485 hlock->read = 1; in __lock_downgrade()
5486 hlock->acquire_ip = ip; in __lock_downgrade()
5499 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_downgrade()
5506 * Remove the lock from the list of currently held locks - this gets
5521 depth = curr->lockdep_depth; in __lock_release()
5541 if (hlock->instance == lock) in __lock_release()
5544 WARN(hlock->pin_count, "releasing a pinned lock\n"); in __lock_release()
5546 if (hlock->references) { in __lock_release()
5547 hlock->references--; in __lock_release()
5548 if (hlock->references) { in __lock_release()
5564 curr->lockdep_depth = i; in __lock_release()
5565 curr->curr_chain_key = hlock->prev_chain_key; in __lock_release()
5571 if (i == depth-1) in __lock_release()
5579 * there's not N-1 bottles of beer left on the wall... in __lock_release()
5582 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); in __lock_release()
5598 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_is_held()
5599 struct held_lock *hlock = curr->held_locks + i; in __lock_is_held()
5602 if (read == -1 || !!hlock->read == read) in __lock_is_held()
5621 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_pin_lock()
5622 struct held_lock *hlock = curr->held_locks + i; in __lock_pin_lock()
5631 hlock->pin_count += cookie.val; in __lock_pin_lock()
5648 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_repin_lock()
5649 struct held_lock *hlock = curr->held_locks + i; in __lock_repin_lock()
5652 hlock->pin_count += cookie.val; in __lock_repin_lock()
5668 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_unpin_lock()
5669 struct held_lock *hlock = curr->held_locks + i; in __lock_unpin_lock()
5672 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) in __lock_unpin_lock()
5675 hlock->pin_count -= cookie.val; in __lock_unpin_lock()
5677 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n")) in __lock_unpin_lock()
5678 hlock->pin_count = 0; in __lock_unpin_lock()
5688 * Check whether we follow the irq-flags state precisely:
5701 printk("possible reason: unannotated irqs-off.\n"); in check_flags()
5705 printk("possible reason: unannotated irqs-on.\n"); in check_flags()
5711 * We dont accurately track softirq state in e.g. in check_flags()
5718 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); in check_flags()
5721 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); in check_flags()
5782 * READ locks, there is no deadlock possible -- RCU. in verify_lock_unused()
5784 if (!hlock->read) in verify_lock_unused()
5787 if (!(class->usage_mask & mask)) in verify_lock_unused()
5790 hlock->class_idx = class - lock_classes; in verify_lock_unused()
5822 * We are not always called with irqs disabled - do that here,
5839 * kasan_check_byte() here to check for use-after-free and other in lock_acquire()
5882 lock->key == &__lockdep_no_track__)) in lock_release()
5897 * lock_sync() - A special annotation for synchronize_{s,}rcu()-like API.
6019 pr_warn("---------------------------------\n"); in print_lock_contention_bug()
6021 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug()
6044 depth = curr->lockdep_depth; in __lock_contended()
6052 if (unlikely(lock->key == &__lockdep_no_track__)) in __lock_contended()
6061 if (hlock->instance != lock) in __lock_contended()
6064 hlock->waittime_stamp = lockstat_clock(); in __lock_contended()
6066 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); in __lock_contended()
6067 contending_point = lock_point(hlock_class(hlock)->contending_point, in __lock_contended()
6068 lock->ip); in __lock_contended()
6072 stats->contention_point[contention_point]++; in __lock_contended()
6074 stats->contending_point[contending_point]++; in __lock_contended()
6075 if (lock->cpu != smp_processor_id()) in __lock_contended()
6076 stats->bounces[bounce_contended + !!hlock->read]++; in __lock_contended()
6089 depth = curr->lockdep_depth; in __lock_acquired()
6097 if (unlikely(lock->key == &__lockdep_no_track__)) in __lock_acquired()
6106 if (hlock->instance != lock) in __lock_acquired()
6110 if (hlock->waittime_stamp) { in __lock_acquired()
6112 waittime = now - hlock->waittime_stamp; in __lock_acquired()
6113 hlock->holdtime_stamp = now; in __lock_acquired()
6118 if (hlock->read) in __lock_acquired()
6119 lock_time_inc(&stats->read_waittime, waittime); in __lock_acquired()
6121 lock_time_inc(&stats->write_waittime, waittime); in __lock_acquired()
6123 if (lock->cpu != cpu) in __lock_acquired()
6124 stats->bounces[bounce_acquired + !!hlock->read]++; in __lock_acquired()
6126 lock->cpu = cpu; in __lock_acquired()
6127 lock->ip = ip; in __lock_acquired()
6179 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); in lockdep_reset()
6197 for (i = chain->base; i < chain->base + chain->depth; i++) { in remove_class_from_lock_chain()
6198 if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes) in remove_class_from_lock_chain()
6210 free_chain_hlocks(chain->base, chain->depth); in remove_class_from_lock_chain()
6212 WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY); in remove_class_from_lock_chain()
6213 dec_chains(chain->irq_context); in remove_class_from_lock_chain()
6219 hlist_del_rcu(&chain->entry); in remove_class_from_lock_chain()
6220 __set_bit(chain - lock_chains, pf->lock_chains_being_freed); in remove_class_from_lock_chain()
6249 WARN_ON_ONCE(!class->key); in zap_class()
6257 if (entry->class != class && entry->links_to != class) in zap_class()
6260 nr_list_entries--; in zap_class()
6261 list_del_rcu(&entry->entry); in zap_class()
6263 if (list_empty(&class->locks_after) && in zap_class()
6264 list_empty(&class->locks_before)) { in zap_class()
6265 list_move_tail(&class->lock_entry, &pf->zapped); in zap_class()
6266 hlist_del_rcu(&class->hash_entry); in zap_class()
6267 WRITE_ONCE(class->key, NULL); in zap_class()
6268 WRITE_ONCE(class->name, NULL); in zap_class()
6269 /* Class allocated but not used, -1 in nr_unused_locks */ in zap_class()
6270 if (class->usage_mask == 0) in zap_class()
6272 nr_lock_classes--; in zap_class()
6273 __clear_bit(class - lock_classes, lock_classes_in_use); in zap_class()
6274 if (class - lock_classes == max_lock_class_idx) in zap_class()
6275 max_lock_class_idx--; in zap_class()
6278 class->name); in zap_class()
6287 WARN_ON_ONCE(!class->lock_entry.next); in reinit_class()
6288 WARN_ON_ONCE(!list_empty(&class->locks_after)); in reinit_class()
6289 WARN_ON_ONCE(!list_empty(&class->locks_before)); in reinit_class()
6291 WARN_ON_ONCE(!class->lock_entry.next); in reinit_class()
6292 WARN_ON_ONCE(!list_empty(&class->locks_after)); in reinit_class()
6293 WARN_ON_ONCE(!list_empty(&class->locks_before)); in reinit_class()
6324 if (list_empty(&pf->zapped)) in prepare_call_rcu_zapped()
6345 list_for_each_entry(class, &pf->zapped, lock_entry) in __free_zapped_classes()
6348 list_splice_init(&pf->zapped, &free_lock_classes); in __free_zapped_classes()
6352 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains)); in __free_zapped_classes()
6353 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains)); in __free_zapped_classes()
6404 if (!within(class->key, start, size) && in __lockdep_free_key_range()
6405 !within(class->name, start, size)) in __lockdep_free_key_range()
6414 * freed; and possibly re-used by other modules.
6417 * guaranteed nobody will look up these exact classes -- they're properly dead
6474 * Check whether any element of the @lock->class_cache[] array refers to a
6488 if (lock->class_cache[j] == class) in lock_class_cache_is_registered()
6601 hlist_del_rcu(&k->hash_entry); in lockdep_unregister_key()
6611 nr_dynamic_keys--; in lockdep_unregister_key()
6620 * Wait until is_dynamic_key() has finished accessing k->hash_entry. in lockdep_unregister_key()
6667 pr_info(" per task-struct memory footprint: %zu bytes\n", in lockdep_init()
6668 sizeof(((struct task_struct *)NULL)->held_locks)); in lockdep_init()
6686 pr_warn("-------------------------\n"); in print_freed_lock_bug()
6687 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n", in print_freed_lock_bug()
6688 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug()
6707 * is destroyed or reinitialized - this code checks whether there is
6721 for (i = 0; i < curr->lockdep_depth; i++) { in debug_check_no_locks_freed()
6722 hlock = curr->held_locks + i; in debug_check_no_locks_freed()
6724 if (not_in_range(mem_from, mem_len, hlock->instance, in debug_check_no_locks_freed()
6725 sizeof(*hlock->instance))) in debug_check_no_locks_freed()
6747 current->comm, task_pid_nr(current)); in print_held_locks_bug()
6749 pr_warn("------------------------------------\n"); in print_held_locks_bug()
6759 if (unlikely(current->lockdep_depth > 0)) in debug_check_no_locks_held()
6777 if (!p->lockdep_depth) in debug_show_all_locks()
6809 if (unlikely(curr->lockdep_depth)) { in lockdep_sys_exit()
6817 pr_warn("------------------------------------------------\n"); in lockdep_sys_exit()
6819 curr->comm, curr->pid); in lockdep_sys_exit()
6843 pr_warn("-----------------------------\n"); in lockdep_rcu_suspicious()
6854 * If a CPU is in the RCU-free window in idle (ie: in the section in lockdep_rcu_suspicious()
6860 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well in lockdep_rcu_suspicious()
6862 * choice here: we need to keep an RCU-free window in idle where in lockdep_rcu_suspicious()