xref: /linux/drivers/net/wireguard/queueing.c (revision bbcd53c960713507ae764bf81970651b5577b95a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #include "queueing.h"
7 
8 struct multicore_worker __percpu *
9 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
10 {
11 	int cpu;
12 	struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
13 
14 	if (!worker)
15 		return NULL;
16 
17 	for_each_possible_cpu(cpu) {
18 		per_cpu_ptr(worker, cpu)->ptr = ptr;
19 		INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
20 	}
21 	return worker;
22 }
23 
24 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
25 			 unsigned int len)
26 {
27 	int ret;
28 
29 	memset(queue, 0, sizeof(*queue));
30 	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
31 	if (ret)
32 		return ret;
33 	queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
34 	if (!queue->worker) {
35 		ptr_ring_cleanup(&queue->ring, NULL);
36 		return -ENOMEM;
37 	}
38 	return 0;
39 }
40 
41 void wg_packet_queue_free(struct crypt_queue *queue)
42 {
43 	free_percpu(queue->worker);
44 	WARN_ON(!__ptr_ring_empty(&queue->ring));
45 	ptr_ring_cleanup(&queue->ring, NULL);
46 }
47 
48 #define NEXT(skb) ((skb)->prev)
49 #define STUB(queue) ((struct sk_buff *)&queue->empty)
50 
51 void wg_prev_queue_init(struct prev_queue *queue)
52 {
53 	NEXT(STUB(queue)) = NULL;
54 	queue->head = queue->tail = STUB(queue);
55 	queue->peeked = NULL;
56 	atomic_set(&queue->count, 0);
57 	BUILD_BUG_ON(
58 		offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
59 							offsetof(struct prev_queue, empty) ||
60 		offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
61 							 offsetof(struct prev_queue, empty));
62 }
63 
64 static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
65 {
66 	WRITE_ONCE(NEXT(skb), NULL);
67 	WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
68 }
69 
70 bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
71 {
72 	if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
73 		return false;
74 	__wg_prev_queue_enqueue(queue, skb);
75 	return true;
76 }
77 
78 struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
79 {
80 	struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
81 
82 	if (tail == STUB(queue)) {
83 		if (!next)
84 			return NULL;
85 		queue->tail = next;
86 		tail = next;
87 		next = smp_load_acquire(&NEXT(next));
88 	}
89 	if (next) {
90 		queue->tail = next;
91 		atomic_dec(&queue->count);
92 		return tail;
93 	}
94 	if (tail != READ_ONCE(queue->head))
95 		return NULL;
96 	__wg_prev_queue_enqueue(queue, STUB(queue));
97 	next = smp_load_acquire(&NEXT(tail));
98 	if (next) {
99 		queue->tail = next;
100 		atomic_dec(&queue->count);
101 		return tail;
102 	}
103 	return NULL;
104 }
105 
106 #undef NEXT
107 #undef STUB
108