xref: /linux/tools/perf/util/threads.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "threads.h"
3 #include "machine.h"
4 #include "thread.h"
5 
6 static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
7 {
8 	/* Cast it to handle tid == -1 */
9 	return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
10 }
11 
12 static size_t key_hash(long key, void *ctx __maybe_unused)
13 {
14 	/* The table lookup removes low bit entropy, but this is just ignored here. */
15 	return key;
16 }
17 
18 static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
19 {
20 	return key1 == key2;
21 }
22 
23 void threads__init(struct threads *threads)
24 {
25 	for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
26 		struct threads_table_entry *table = &threads->table[i];
27 
28 		hashmap__init(&table->shard, key_hash, key_equal, NULL);
29 		init_rwsem(&table->lock);
30 		table->last_match = NULL;
31 	}
32 }
33 
34 void threads__exit(struct threads *threads)
35 {
36 	threads__remove_all_threads(threads);
37 	for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
38 		struct threads_table_entry *table = &threads->table[i];
39 
40 		hashmap__clear(&table->shard);
41 		exit_rwsem(&table->lock);
42 	}
43 }
44 
45 size_t threads__nr(struct threads *threads)
46 {
47 	size_t nr = 0;
48 
49 	for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
50 		struct threads_table_entry *table = &threads->table[i];
51 
52 		down_read(&table->lock);
53 		nr += hashmap__size(&table->shard);
54 		up_read(&table->lock);
55 	}
56 	return nr;
57 }
58 
59 /*
60  * Front-end cache - TID lookups come in blocks,
61  * so most of the time we dont have to look up
62  * the full rbtree:
63  */
64 static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table,
65 							    pid_t tid)
66 {
67 	struct thread *th, *res = NULL;
68 
69 	th = table->last_match;
70 	if (th != NULL) {
71 		if (thread__tid(th) == tid)
72 			res = thread__get(th);
73 	}
74 	return res;
75 }
76 
77 static void __threads_table_entry__set_last_match(struct threads_table_entry *table,
78 						  struct thread *th)
79 {
80 	thread__put(table->last_match);
81 	table->last_match = thread__get(th);
82 }
83 
84 static void threads_table_entry__set_last_match(struct threads_table_entry *table,
85 						struct thread *th)
86 {
87 	down_write(&table->lock);
88 	__threads_table_entry__set_last_match(table, th);
89 	up_write(&table->lock);
90 }
91 
92 struct thread *threads__find(struct threads *threads, pid_t tid)
93 {
94 	struct threads_table_entry *table  = threads__table(threads, tid);
95 	struct thread *res;
96 
97 	down_read(&table->lock);
98 	res = __threads_table_entry__get_last_match(table, tid);
99 	if (!res) {
100 		if (hashmap__find(&table->shard, tid, &res))
101 			res = thread__get(res);
102 	}
103 	up_read(&table->lock);
104 	if (res)
105 		threads_table_entry__set_last_match(table, res);
106 	return res;
107 }
108 
109 struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
110 {
111 	struct threads_table_entry *table  = threads__table(threads, tid);
112 	struct thread *res = NULL;
113 
114 	*created = false;
115 	down_write(&table->lock);
116 	res = thread__new(pid, tid);
117 	if (res) {
118 		if (hashmap__add(&table->shard, tid, res)) {
119 			/* Add failed. Assume a race so find other entry. */
120 			thread__put(res);
121 			res = NULL;
122 			if (hashmap__find(&table->shard, tid, &res))
123 				res = thread__get(res);
124 		} else {
125 			res = thread__get(res);
126 			*created = true;
127 		}
128 		if (res)
129 			__threads_table_entry__set_last_match(table, res);
130 	}
131 	up_write(&table->lock);
132 	return res;
133 }
134 
135 void threads__remove_all_threads(struct threads *threads)
136 {
137 	for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
138 		struct threads_table_entry *table = &threads->table[i];
139 		struct hashmap_entry *cur, *tmp;
140 		size_t bkt;
141 
142 		down_write(&table->lock);
143 		__threads_table_entry__set_last_match(table, NULL);
144 		hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) {
145 			struct thread *old_value;
146 
147 			hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
148 			thread__put(old_value);
149 		}
150 		up_write(&table->lock);
151 	}
152 }
153 
154 void threads__remove(struct threads *threads, struct thread *thread)
155 {
156 	struct threads_table_entry *table  = threads__table(threads, thread__tid(thread));
157 	struct thread *old_value;
158 
159 	down_write(&table->lock);
160 	if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
161 		__threads_table_entry__set_last_match(table, NULL);
162 
163 	hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
164 	thread__put(old_value);
165 	up_write(&table->lock);
166 }
167 
168 int threads__for_each_thread(struct threads *threads,
169 			     int (*fn)(struct thread *thread, void *data),
170 			     void *data)
171 {
172 	for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
173 		struct threads_table_entry *table = &threads->table[i];
174 		struct hashmap_entry *cur;
175 		size_t bkt;
176 
177 		down_read(&table->lock);
178 		hashmap__for_each_entry((&table->shard), cur, bkt) {
179 			int rc = fn((struct thread *)cur->pvalue, data);
180 
181 			if (rc != 0) {
182 				up_read(&table->lock);
183 				return rc;
184 			}
185 		}
186 		up_read(&table->lock);
187 	}
188 	return 0;
189 
190 }
191