Lines Matching +full:- +full:edge

1 // SPDX-License-Identifier: GPL-2.0-or-later
13 * - object w/ a bit
14 * - free list
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
25 * - don't just push entire root set; process in place
38 * upon the beginning and unmark non-junk ones.
56 * parents (->gc_tree).
58 * Damn. Added missing check for ->dead in listen queues scanning.
98 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
101 struct sock *sk = sock->sk;
103 ops = READ_ONCE(sock->ops);
106 if (sk && ops && ops->family == PF_UNIX)
113 static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
118 if (edge->successor->listener)
119 return unix_sk(edge->successor->listener)->vertex;
121 return edge->successor->vertex;
149 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
151 struct unix_vertex *vertex = edge->predecessor->vertex;
154 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
155 vertex->index = unix_vertex_unvisited_index;
156 vertex->out_degree = 0;
157 INIT_LIST_HEAD(&vertex->edges);
158 INIT_LIST_HEAD(&vertex->scc_entry);
160 list_move_tail(&vertex->entry, &unix_unvisited_vertices);
161 edge->predecessor->vertex = vertex;
164 vertex->out_degree++;
165 list_add_tail(&edge->vertex_entry, &vertex->edges);
167 unix_update_graph(unix_edge_successor(edge));
170 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
172 struct unix_vertex *vertex = edge->predecessor->vertex;
174 if (!fpl->dead)
175 unix_update_graph(unix_edge_successor(edge));
177 list_del(&edge->vertex_entry);
178 vertex->out_degree--;
180 if (!vertex->out_degree) {
181 edge->predecessor->vertex = NULL;
182 list_move_tail(&vertex->entry, &fpl->vertices);
190 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
191 list_del(&vertex->entry);
205 if (!fpl->count_unix)
209 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
210 struct unix_edge *edge;
215 edge = fpl->edges + i++;
216 edge->predecessor = inflight;
217 edge->successor = receiver;
219 unix_add_edge(fpl, edge);
220 } while (i < fpl->count_unix);
222 receiver->scm_stat.nr_unix_fds += fpl->count_unix;
223 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
225 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
229 fpl->inflight = true;
241 if (!fpl->count_unix)
245 struct unix_edge *edge = fpl->edges + i++;
247 unix_del_edge(fpl, edge);
248 } while (i < fpl->count_unix);
250 if (!fpl->dead) {
251 receiver = fpl->edges[0].successor;
252 receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
254 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
256 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
260 fpl->inflight = false;
269 if (!receiver->scm_stat.nr_unix_fds) {
270 receiver->listener = NULL;
273 unix_update_graph(unix_sk(receiver->listener)->vertex);
274 receiver->listener = NULL;
284 if (!fpl->count_unix)
287 for (i = 0; i < fpl->count_unix; i++) {
292 list_add(&vertex->entry, &fpl->vertices);
295 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
297 if (!fpl->edges)
304 return -ENOMEM;
309 if (fpl->inflight)
312 kvfree(fpl->edges);
318 struct unix_edge *edge;
322 list_for_each_entry(edge, &vertex->edges, vertex_entry) {
323 struct unix_vertex *next_vertex = unix_edge_successor(edge);
325 /* The vertex's fd can be received by a non-inflight socket. */
332 if (next_vertex->scc_index != vertex->scc_index)
338 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
339 u = edge->predecessor;
340 total_ref = file_count(u->sk.sk_socket->file);
343 if (total_ref != vertex->out_degree)
355 struct unix_edge *edge;
358 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
359 u = edge->predecessor;
360 queue = &u->sk.sk_receive_queue;
362 spin_lock(&queue->lock);
364 if (u->sk.sk_state == TCP_LISTEN) {
368 struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
370 spin_lock(&embryo_queue->lock);
372 spin_unlock(&embryo_queue->lock);
378 spin_unlock(&queue->lock);
385 struct unix_edge *edge;
393 /* Self-reference or a embryo-listener circle ? */
394 list_for_each_entry(edge, &vertex->edges, vertex_entry) {
395 if (unix_edge_successor(edge) == vertex)
409 struct unix_edge *edge;
413 /* Push vertex to vertex_stack and mark it as on-stack
417 list_add(&vertex->scc_entry, &vertex_stack);
419 vertex->index = *last_index;
420 vertex->scc_index = *last_index;
424 list_for_each_entry(edge, &vertex->edges, vertex_entry) {
425 struct unix_vertex *next_vertex = unix_edge_successor(edge);
430 if (next_vertex->index == unix_vertex_unvisited_index) {
433 * 1. Push a forward edge to edge_stack and set
436 list_add(&edge->stack_entry, &edge_stack);
441 /* 2. Pop the edge directed to the current vertex
445 edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
446 list_del_init(&edge->stack_entry);
449 vertex = edge->predecessor->vertex;
455 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
456 } else if (next_vertex->index != unix_vertex_grouped_index) {
457 /* Loop detected by a back/cross edge.
463 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
469 if (vertex->index == vertex->scc_index) {
479 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
483 list_move_tail(&v->entry, &unix_visited_vertices);
485 /* Mark vertex as off-stack. */
486 v->index = unix_vertex_grouped_index;
537 list_add(&scc, &vertex->scc_entry);
540 list_move_tail(&vertex->entry, &unix_visited_vertices);
582 UNIXCB(skb).fp->dead = true;
616 if (!fpl || !fpl->count_unix ||
617 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)