xref: /linux/drivers/net/can/dev/rx-offload.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2014      Protonic Holland,
3  *                         David Jander
4  * Copyright (C) 2014-2021 Pengutronix,
5  *                         Marc Kleine-Budde <kernel@pengutronix.de>
6  */
7 
8 #include <linux/can/dev.h>
9 #include <linux/can/rx-offload.h>
10 
11 struct can_rx_offload_cb {
12 	u32 timestamp;
13 };
14 
15 static inline struct can_rx_offload_cb *
16 can_rx_offload_get_cb(struct sk_buff *skb)
17 {
18 	BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
19 
20 	return (struct can_rx_offload_cb *)skb->cb;
21 }
22 
23 static inline bool
24 can_rx_offload_le(struct can_rx_offload *offload,
25 		  unsigned int a, unsigned int b)
26 {
27 	if (offload->inc)
28 		return a <= b;
29 	else
30 		return a >= b;
31 }
32 
33 static inline unsigned int
34 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
35 {
36 	if (offload->inc)
37 		return (*val)++;
38 	else
39 		return (*val)--;
40 }
41 
42 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
43 {
44 	struct can_rx_offload *offload = container_of(napi,
45 						      struct can_rx_offload,
46 						      napi);
47 	struct net_device *dev = offload->dev;
48 	struct net_device_stats *stats = &dev->stats;
49 	struct sk_buff *skb;
50 	int work_done = 0;
51 
52 	while ((work_done < quota) &&
53 	       (skb = skb_dequeue(&offload->skb_queue))) {
54 		struct can_frame *cf = (struct can_frame *)skb->data;
55 
56 		work_done++;
57 		if (!(cf->can_id & CAN_ERR_FLAG)) {
58 			stats->rx_packets++;
59 			if (!(cf->can_id & CAN_RTR_FLAG))
60 				stats->rx_bytes += cf->len;
61 		}
62 		netif_receive_skb(skb);
63 	}
64 
65 	if (work_done < quota) {
66 		napi_complete_done(napi, work_done);
67 
68 		/* Check if there was another interrupt */
69 		if (!skb_queue_empty(&offload->skb_queue))
70 			napi_reschedule(&offload->napi);
71 	}
72 
73 	can_led_event(offload->dev, CAN_LED_EVENT_RX);
74 
75 	return work_done;
76 }
77 
78 static inline void
79 __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
80 		     int (*compare)(struct sk_buff *a, struct sk_buff *b))
81 {
82 	struct sk_buff *pos, *insert = NULL;
83 
84 	skb_queue_reverse_walk(head, pos) {
85 		const struct can_rx_offload_cb *cb_pos, *cb_new;
86 
87 		cb_pos = can_rx_offload_get_cb(pos);
88 		cb_new = can_rx_offload_get_cb(new);
89 
90 		netdev_dbg(new->dev,
91 			   "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
92 			   __func__,
93 			   cb_pos->timestamp, cb_new->timestamp,
94 			   cb_new->timestamp - cb_pos->timestamp,
95 			   skb_queue_len(head));
96 
97 		if (compare(pos, new) < 0)
98 			continue;
99 		insert = pos;
100 		break;
101 	}
102 	if (!insert)
103 		__skb_queue_head(head, new);
104 	else
105 		__skb_queue_after(head, insert, new);
106 }
107 
108 static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
109 {
110 	const struct can_rx_offload_cb *cb_a, *cb_b;
111 
112 	cb_a = can_rx_offload_get_cb(a);
113 	cb_b = can_rx_offload_get_cb(b);
114 
115 	/* Subtract two u32 and return result as int, to keep
116 	 * difference steady around the u32 overflow.
117 	 */
118 	return cb_b->timestamp - cb_a->timestamp;
119 }
120 
121 /**
122  * can_rx_offload_offload_one() - Read one CAN frame from HW
123  * @offload: pointer to rx_offload context
124  * @n: number of mailbox to read
125  *
126  * The task of this function is to read a CAN frame from mailbox @n
127  * from the device and return the mailbox's content as a struct
128  * sk_buff.
129  *
130  * If the struct can_rx_offload::skb_queue exceeds the maximal queue
131  * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
132  * allocated, the mailbox contents is discarded by reading it into an
133  * overflow buffer. This way the mailbox is marked as free by the
134  * driver.
135  *
136  * Return: A pointer to skb containing the CAN frame on success.
137  *
138  *         NULL if the mailbox @n is empty.
139  *
140  *         ERR_PTR() in case of an error
141  */
142 static struct sk_buff *
143 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
144 {
145 	struct sk_buff *skb;
146 	struct can_rx_offload_cb *cb;
147 	bool drop = false;
148 	u32 timestamp;
149 
150 	/* If queue is full drop frame */
151 	if (unlikely(skb_queue_len(&offload->skb_queue) >
152 		     offload->skb_queue_len_max))
153 		drop = true;
154 
155 	skb = offload->mailbox_read(offload, n, &timestamp, drop);
156 	/* Mailbox was empty. */
157 	if (unlikely(!skb))
158 		return NULL;
159 
160 	/* There was a problem reading the mailbox, propagate
161 	 * error value.
162 	 */
163 	if (IS_ERR(skb)) {
164 		offload->dev->stats.rx_dropped++;
165 		offload->dev->stats.rx_fifo_errors++;
166 
167 		return skb;
168 	}
169 
170 	/* Mailbox was read. */
171 	cb = can_rx_offload_get_cb(skb);
172 	cb->timestamp = timestamp;
173 
174 	return skb;
175 }
176 
177 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
178 					 u64 pending)
179 {
180 	unsigned int i;
181 	int received = 0;
182 
183 	for (i = offload->mb_first;
184 	     can_rx_offload_le(offload, i, offload->mb_last);
185 	     can_rx_offload_inc(offload, &i)) {
186 		struct sk_buff *skb;
187 
188 		if (!(pending & BIT_ULL(i)))
189 			continue;
190 
191 		skb = can_rx_offload_offload_one(offload, i);
192 		if (IS_ERR_OR_NULL(skb))
193 			continue;
194 
195 		__skb_queue_add_sort(&offload->skb_irq_queue, skb,
196 				     can_rx_offload_compare);
197 		received++;
198 	}
199 
200 	return received;
201 }
202 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
203 
204 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
205 {
206 	struct sk_buff *skb;
207 	int received = 0;
208 
209 	while (1) {
210 		skb = can_rx_offload_offload_one(offload, 0);
211 		if (IS_ERR(skb))
212 			continue;
213 		if (!skb)
214 			break;
215 
216 		__skb_queue_tail(&offload->skb_irq_queue, skb);
217 		received++;
218 	}
219 
220 	return received;
221 }
222 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
223 
224 int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
225 				struct sk_buff *skb, u32 timestamp)
226 {
227 	struct can_rx_offload_cb *cb;
228 
229 	if (skb_queue_len(&offload->skb_queue) >
230 	    offload->skb_queue_len_max) {
231 		dev_kfree_skb_any(skb);
232 		return -ENOBUFS;
233 	}
234 
235 	cb = can_rx_offload_get_cb(skb);
236 	cb->timestamp = timestamp;
237 
238 	__skb_queue_add_sort(&offload->skb_irq_queue, skb,
239 			     can_rx_offload_compare);
240 
241 	return 0;
242 }
243 EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
244 
245 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
246 					 unsigned int idx, u32 timestamp,
247 					 unsigned int *frame_len_ptr)
248 {
249 	struct net_device *dev = offload->dev;
250 	struct net_device_stats *stats = &dev->stats;
251 	struct sk_buff *skb;
252 	u8 len;
253 	int err;
254 
255 	skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
256 	if (!skb)
257 		return 0;
258 
259 	err = can_rx_offload_queue_sorted(offload, skb, timestamp);
260 	if (err) {
261 		stats->rx_errors++;
262 		stats->tx_fifo_errors++;
263 	}
264 
265 	return len;
266 }
267 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
268 
269 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
270 			      struct sk_buff *skb)
271 {
272 	if (skb_queue_len(&offload->skb_queue) >
273 	    offload->skb_queue_len_max) {
274 		dev_kfree_skb_any(skb);
275 		return -ENOBUFS;
276 	}
277 
278 	__skb_queue_tail(&offload->skb_irq_queue, skb);
279 
280 	return 0;
281 }
282 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
283 
284 void can_rx_offload_irq_finish(struct can_rx_offload *offload)
285 {
286 	unsigned long flags;
287 	int queue_len;
288 
289 	if (skb_queue_empty_lockless(&offload->skb_irq_queue))
290 		return;
291 
292 	spin_lock_irqsave(&offload->skb_queue.lock, flags);
293 	skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
294 	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
295 
296 	queue_len = skb_queue_len(&offload->skb_queue);
297 	if (queue_len > offload->skb_queue_len_max / 8)
298 		netdev_dbg(offload->dev, "%s: queue_len=%d\n",
299 			   __func__, queue_len);
300 
301 	napi_schedule(&offload->napi);
302 }
303 EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
304 
305 void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
306 {
307 	unsigned long flags;
308 	int queue_len;
309 
310 	if (skb_queue_empty_lockless(&offload->skb_irq_queue))
311 		return;
312 
313 	spin_lock_irqsave(&offload->skb_queue.lock, flags);
314 	skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
315 	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
316 
317 	queue_len = skb_queue_len(&offload->skb_queue);
318 	if (queue_len > offload->skb_queue_len_max / 8)
319 		netdev_dbg(offload->dev, "%s: queue_len=%d\n",
320 			   __func__, queue_len);
321 
322 	local_bh_disable();
323 	napi_schedule(&offload->napi);
324 	local_bh_enable();
325 }
326 EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
327 
328 static int can_rx_offload_init_queue(struct net_device *dev,
329 				     struct can_rx_offload *offload,
330 				     unsigned int weight)
331 {
332 	offload->dev = dev;
333 
334 	/* Limit queue len to 4x the weight (rounted to next power of two) */
335 	offload->skb_queue_len_max = 2 << fls(weight);
336 	offload->skb_queue_len_max *= 4;
337 	skb_queue_head_init(&offload->skb_queue);
338 	__skb_queue_head_init(&offload->skb_irq_queue);
339 
340 	netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
341 
342 	dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
343 		__func__, offload->skb_queue_len_max);
344 
345 	return 0;
346 }
347 
348 int can_rx_offload_add_timestamp(struct net_device *dev,
349 				 struct can_rx_offload *offload)
350 {
351 	unsigned int weight;
352 
353 	if (offload->mb_first > BITS_PER_LONG_LONG ||
354 	    offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
355 		return -EINVAL;
356 
357 	if (offload->mb_first < offload->mb_last) {
358 		offload->inc = true;
359 		weight = offload->mb_last - offload->mb_first;
360 	} else {
361 		offload->inc = false;
362 		weight = offload->mb_first - offload->mb_last;
363 	}
364 
365 	return can_rx_offload_init_queue(dev, offload, weight);
366 }
367 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
368 
369 int can_rx_offload_add_fifo(struct net_device *dev,
370 			    struct can_rx_offload *offload, unsigned int weight)
371 {
372 	if (!offload->mailbox_read)
373 		return -EINVAL;
374 
375 	return can_rx_offload_init_queue(dev, offload, weight);
376 }
377 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
378 
379 int can_rx_offload_add_manual(struct net_device *dev,
380 			      struct can_rx_offload *offload,
381 			      unsigned int weight)
382 {
383 	if (offload->mailbox_read)
384 		return -EINVAL;
385 
386 	return can_rx_offload_init_queue(dev, offload, weight);
387 }
388 EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
389 
390 void can_rx_offload_enable(struct can_rx_offload *offload)
391 {
392 	napi_enable(&offload->napi);
393 }
394 EXPORT_SYMBOL_GPL(can_rx_offload_enable);
395 
396 void can_rx_offload_del(struct can_rx_offload *offload)
397 {
398 	netif_napi_del(&offload->napi);
399 	skb_queue_purge(&offload->skb_queue);
400 	__skb_queue_purge(&offload->skb_irq_queue);
401 }
402 EXPORT_SYMBOL_GPL(can_rx_offload_del);
403