xref: /linux/drivers/net/can/peak_canfd/peak_canfd.c (revision cbac924200b838cfb8d8b1415113d788089dc50b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
3  * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
4  *
5  * Copyright (C) 2016  PEAK System-Technik GmbH
6  */
7 
8 #include <linux/can.h>
9 #include <linux/can/dev.h>
10 
11 #include "peak_canfd_user.h"
12 
13 /* internal IP core cache size (used as default echo skbs max number) */
14 #define PCANFD_ECHO_SKB_MAX		24
15 
16 /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
17 static const struct can_bittiming_const peak_canfd_nominal_const = {
18 	.name = "peak_canfd",
19 	.tseg1_min = 1,
20 	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
21 	.tseg2_min = 1,
22 	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
23 	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
24 	.brp_min = 1,
25 	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
26 	.brp_inc = 1,
27 };
28 
29 static const struct can_bittiming_const peak_canfd_data_const = {
30 	.name = "peak_canfd",
31 	.tseg1_min = 1,
32 	.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
33 	.tseg2_min = 1,
34 	.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
35 	.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
36 	.brp_min = 1,
37 	.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
38 	.brp_inc = 1,
39 };
40 
41 static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
42 {
43 	priv->cmd_len = 0;
44 	return priv;
45 }
46 
47 static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
48 {
49 	struct pucan_command *cmd;
50 
51 	if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
52 		return NULL;
53 
54 	cmd = priv->cmd_buffer + priv->cmd_len;
55 
56 	/* reset all unused bit to default */
57 	memset(cmd, 0, sizeof(*cmd));
58 
59 	cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
60 	priv->cmd_len += sizeof(*cmd);
61 
62 	return cmd;
63 }
64 
65 static int pucan_write_cmd(struct peak_canfd_priv *priv)
66 {
67 	int err;
68 
69 	if (priv->pre_cmd) {
70 		err = priv->pre_cmd(priv);
71 		if (err)
72 			return err;
73 	}
74 
75 	err = priv->write_cmd(priv);
76 	if (err)
77 		return err;
78 
79 	if (priv->post_cmd)
80 		err = priv->post_cmd(priv);
81 
82 	return err;
83 }
84 
85 /* uCAN commands interface functions */
86 static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
87 {
88 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
89 	return pucan_write_cmd(priv);
90 }
91 
92 static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
93 {
94 	int err;
95 
96 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
97 	err = pucan_write_cmd(priv);
98 	if (!err)
99 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
100 
101 	return err;
102 }
103 
104 static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
105 {
106 	int err;
107 
108 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
109 	err = pucan_write_cmd(priv);
110 	if (!err)
111 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
112 
113 	return err;
114 }
115 
116 static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
117 				 const struct can_bittiming *pbt)
118 {
119 	struct pucan_timing_slow *cmd;
120 
121 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
122 
123 	cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
124 				       priv->can.ctrlmode &
125 				       CAN_CTRLMODE_3_SAMPLES);
126 	cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
127 	cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
128 	cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
129 
130 	cmd->ewl = 96;	/* default */
131 
132 	netdev_dbg(priv->ndev,
133 		   "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
134 		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
135 
136 	return pucan_write_cmd(priv);
137 }
138 
139 static int pucan_set_timing_fast(struct peak_canfd_priv *priv,
140 				 const struct can_bittiming *pbt)
141 {
142 	struct pucan_timing_fast *cmd;
143 
144 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST);
145 
146 	cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1);
147 	cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
148 	cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1);
149 	cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1));
150 
151 	netdev_dbg(priv->ndev,
152 		   "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
153 		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw);
154 
155 	return pucan_write_cmd(priv);
156 }
157 
158 static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
159 {
160 	struct pucan_std_filter *cmd;
161 
162 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
163 
164 	/* all the 11-bits CAN ID values are represented by one bit in a
165 	 * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the
166 	 * row while the lowest 5 bits select the bit in that row.
167 	 *
168 	 * bit	filter
169 	 * 1	passed
170 	 * 0	discarded
171 	 */
172 
173 	/* select the row */
174 	cmd->idx = row;
175 
176 	/* set/unset bits in the row */
177 	cmd->mask = cpu_to_le32(mask);
178 
179 	return pucan_write_cmd(priv);
180 }
181 
182 static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
183 {
184 	struct pucan_tx_abort *cmd;
185 
186 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
187 
188 	cmd->flags = cpu_to_le16(flags);
189 
190 	return pucan_write_cmd(priv);
191 }
192 
193 static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
194 {
195 	struct pucan_wr_err_cnt *cmd;
196 
197 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
198 
199 	cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
200 	cmd->tx_counter = 0;
201 	cmd->rx_counter = 0;
202 
203 	return pucan_write_cmd(priv);
204 }
205 
206 static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
207 {
208 	struct pucan_options *cmd;
209 
210 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
211 
212 	cmd->options = cpu_to_le16(opt_mask);
213 
214 	return pucan_write_cmd(priv);
215 }
216 
217 static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask)
218 {
219 	struct pucan_options *cmd;
220 
221 	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION);
222 
223 	cmd->options = cpu_to_le16(opt_mask);
224 
225 	return pucan_write_cmd(priv);
226 }
227 
228 static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
229 {
230 	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
231 
232 	return pucan_write_cmd(priv);
233 }
234 
235 static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
236 {
237 	struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
238 	u64 ts_us;
239 
240 	ts_us = (u64)le32_to_cpu(ts_high) << 32;
241 	ts_us |= le32_to_cpu(ts_low);
242 
243 	/* IP core timestamps are µs. */
244 	hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
245 
246 	return netif_rx(skb);
247 }
248 
249 /* handle the reception of one CAN frame */
250 static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
251 			       struct pucan_rx_msg *msg)
252 {
253 	struct net_device_stats *stats = &priv->ndev->stats;
254 	struct canfd_frame *cf;
255 	struct sk_buff *skb;
256 	const u16 rx_msg_flags = le16_to_cpu(msg->flags);
257 	u8 cf_len;
258 
259 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
260 		cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg));
261 	else
262 		cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg));
263 
264 	/* if this frame is an echo, */
265 	if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
266 		unsigned long flags;
267 
268 		spin_lock_irqsave(&priv->echo_lock, flags);
269 
270 		/* count bytes of the echo instead of skb */
271 		stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
272 		stats->tx_packets++;
273 
274 		/* restart tx queue (a slot is free) */
275 		netif_wake_queue(priv->ndev);
276 
277 		spin_unlock_irqrestore(&priv->echo_lock, flags);
278 
279 		/* if this frame is only an echo, stop here. Otherwise,
280 		 * continue to push this application self-received frame into
281 		 * its own rx queue.
282 		 */
283 		if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
284 			return 0;
285 	}
286 
287 	/* otherwise, it should be pushed into rx fifo */
288 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
289 		/* CANFD frame case */
290 		skb = alloc_canfd_skb(priv->ndev, &cf);
291 		if (!skb)
292 			return -ENOMEM;
293 
294 		if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
295 			cf->flags |= CANFD_BRS;
296 
297 		if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
298 			cf->flags |= CANFD_ESI;
299 	} else {
300 		/* CAN 2.0 frame case */
301 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
302 		if (!skb)
303 			return -ENOMEM;
304 	}
305 
306 	cf->can_id = le32_to_cpu(msg->can_id);
307 	cf->len = cf_len;
308 
309 	if (rx_msg_flags & PUCAN_MSG_EXT_ID)
310 		cf->can_id |= CAN_EFF_FLAG;
311 
312 	if (rx_msg_flags & PUCAN_MSG_RTR) {
313 		cf->can_id |= CAN_RTR_FLAG;
314 	} else {
315 		memcpy(cf->data, msg->d, cf->len);
316 
317 		stats->rx_bytes += cf->len;
318 	}
319 	stats->rx_packets++;
320 
321 	pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
322 
323 	return 0;
324 }
325 
326 /* handle rx/tx error counters notification */
327 static int pucan_handle_error(struct peak_canfd_priv *priv,
328 			      struct pucan_error_msg *msg)
329 {
330 	priv->bec.txerr = msg->tx_err_cnt;
331 	priv->bec.rxerr = msg->rx_err_cnt;
332 
333 	return 0;
334 }
335 
336 /* handle status notification */
337 static int pucan_handle_status(struct peak_canfd_priv *priv,
338 			       struct pucan_status_msg *msg)
339 {
340 	struct net_device *ndev = priv->ndev;
341 	struct net_device_stats *stats = &ndev->stats;
342 	struct can_frame *cf;
343 	struct sk_buff *skb;
344 
345 	/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
346 	if (pucan_status_is_rx_barrier(msg)) {
347 		if (priv->enable_tx_path) {
348 			int err = priv->enable_tx_path(priv);
349 
350 			if (err)
351 				return err;
352 		}
353 
354 		/* wake network queue up (echo_skb array is empty) */
355 		netif_wake_queue(ndev);
356 
357 		return 0;
358 	}
359 
360 	skb = alloc_can_err_skb(ndev, &cf);
361 
362 	/* test state error bits according to their priority */
363 	if (pucan_status_is_busoff(msg)) {
364 		netdev_dbg(ndev, "Bus-off entry status\n");
365 		priv->can.state = CAN_STATE_BUS_OFF;
366 		priv->can.can_stats.bus_off++;
367 		can_bus_off(ndev);
368 		if (skb)
369 			cf->can_id |= CAN_ERR_BUSOFF;
370 
371 	} else if (pucan_status_is_passive(msg)) {
372 		netdev_dbg(ndev, "Error passive status\n");
373 		priv->can.state = CAN_STATE_ERROR_PASSIVE;
374 		priv->can.can_stats.error_passive++;
375 		if (skb) {
376 			cf->can_id |= CAN_ERR_CRTL;
377 			cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
378 					CAN_ERR_CRTL_TX_PASSIVE :
379 					CAN_ERR_CRTL_RX_PASSIVE;
380 			cf->data[6] = priv->bec.txerr;
381 			cf->data[7] = priv->bec.rxerr;
382 		}
383 
384 	} else if (pucan_status_is_warning(msg)) {
385 		netdev_dbg(ndev, "Error warning status\n");
386 		priv->can.state = CAN_STATE_ERROR_WARNING;
387 		priv->can.can_stats.error_warning++;
388 		if (skb) {
389 			cf->can_id |= CAN_ERR_CRTL;
390 			cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
391 					CAN_ERR_CRTL_TX_WARNING :
392 					CAN_ERR_CRTL_RX_WARNING;
393 			cf->data[6] = priv->bec.txerr;
394 			cf->data[7] = priv->bec.rxerr;
395 		}
396 
397 	} else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) {
398 		/* back to ERROR_ACTIVE */
399 		netdev_dbg(ndev, "Error active status\n");
400 		can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE,
401 				 CAN_STATE_ERROR_ACTIVE);
402 	} else {
403 		dev_kfree_skb(skb);
404 		return 0;
405 	}
406 
407 	if (!skb) {
408 		stats->rx_dropped++;
409 		return -ENOMEM;
410 	}
411 
412 	pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
413 
414 	return 0;
415 }
416 
417 /* handle uCAN Rx overflow notification */
418 static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
419 {
420 	struct net_device_stats *stats = &priv->ndev->stats;
421 	struct can_frame *cf;
422 	struct sk_buff *skb;
423 
424 	stats->rx_over_errors++;
425 	stats->rx_errors++;
426 
427 	skb = alloc_can_err_skb(priv->ndev, &cf);
428 	if (!skb) {
429 		stats->rx_dropped++;
430 		return -ENOMEM;
431 	}
432 
433 	cf->can_id |= CAN_ERR_CRTL;
434 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
435 
436 	cf->data[6] = priv->bec.txerr;
437 	cf->data[7] = priv->bec.rxerr;
438 
439 	netif_rx(skb);
440 
441 	return 0;
442 }
443 
444 /* handle a single uCAN message */
445 int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
446 			  struct pucan_rx_msg *msg)
447 {
448 	u16 msg_type = le16_to_cpu(msg->type);
449 	int msg_size = le16_to_cpu(msg->size);
450 	int err;
451 
452 	if (!msg_size || !msg_type) {
453 		/* null packet found: end of list */
454 		goto exit;
455 	}
456 
457 	switch (msg_type) {
458 	case PUCAN_MSG_CAN_RX:
459 		err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
460 		break;
461 	case PUCAN_MSG_ERROR:
462 		err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
463 		break;
464 	case PUCAN_MSG_STATUS:
465 		err = pucan_handle_status(priv, (struct pucan_status_msg *)msg);
466 		break;
467 	case PUCAN_MSG_CACHE_CRITICAL:
468 		err = pucan_handle_cache_critical(priv);
469 		break;
470 	default:
471 		err = 0;
472 	}
473 
474 	if (err < 0)
475 		return err;
476 
477 exit:
478 	return msg_size;
479 }
480 
481 /* handle a list of rx_count messages from rx_msg memory address */
482 int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
483 				struct pucan_rx_msg *msg_list, int msg_count)
484 {
485 	void *msg_ptr = msg_list;
486 	int i, msg_size = 0;
487 
488 	for (i = 0; i < msg_count; i++) {
489 		msg_size = peak_canfd_handle_msg(priv, msg_ptr);
490 
491 		/* a null packet can be found at the end of a list */
492 		if (msg_size <= 0)
493 			break;
494 
495 		msg_ptr += ALIGN(msg_size, 4);
496 	}
497 
498 	if (msg_size < 0)
499 		return msg_size;
500 
501 	return i;
502 }
503 
504 static int peak_canfd_start(struct peak_canfd_priv *priv)
505 {
506 	int err;
507 
508 	err = pucan_clr_err_counters(priv);
509 	if (err)
510 		goto err_exit;
511 
512 	priv->echo_idx = 0;
513 
514 	priv->bec.txerr = 0;
515 	priv->bec.rxerr = 0;
516 
517 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
518 		err = pucan_set_listen_only_mode(priv);
519 	else
520 		err = pucan_set_normal_mode(priv);
521 
522 err_exit:
523 	return err;
524 }
525 
526 static void peak_canfd_stop(struct peak_canfd_priv *priv)
527 {
528 	int err;
529 
530 	/* go back to RESET mode */
531 	err = pucan_set_reset_mode(priv);
532 	if (err) {
533 		netdev_err(priv->ndev, "channel %u reset failed\n",
534 			   priv->index);
535 	} else {
536 		/* abort last Tx (MUST be done in RESET mode only!) */
537 		pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
538 	}
539 }
540 
541 static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
542 {
543 	struct peak_canfd_priv *priv = netdev_priv(ndev);
544 
545 	switch (mode) {
546 	case CAN_MODE_START:
547 		peak_canfd_start(priv);
548 		netif_wake_queue(ndev);
549 		break;
550 	default:
551 		return -EOPNOTSUPP;
552 	}
553 
554 	return 0;
555 }
556 
557 static int peak_canfd_get_berr_counter(const struct net_device *ndev,
558 				       struct can_berr_counter *bec)
559 {
560 	struct peak_canfd_priv *priv = netdev_priv(ndev);
561 
562 	*bec = priv->bec;
563 	return 0;
564 }
565 
566 static int peak_canfd_open(struct net_device *ndev)
567 {
568 	struct peak_canfd_priv *priv = netdev_priv(ndev);
569 	int i, err = 0;
570 
571 	err = open_candev(ndev);
572 	if (err) {
573 		netdev_err(ndev, "open_candev() failed, error %d\n", err);
574 		goto err_exit;
575 	}
576 
577 	err = pucan_set_reset_mode(priv);
578 	if (err)
579 		goto err_close;
580 
581 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
582 		if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
583 			err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO);
584 		else
585 			err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO);
586 
587 		if (err)
588 			goto err_close;
589 	}
590 
591 	/* set option: get rx/tx error counters */
592 	err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
593 	if (err)
594 		goto err_close;
595 
596 	/* accept all standard CAN ID */
597 	for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
598 		pucan_set_std_filter(priv, i, 0xffffffff);
599 
600 	err = peak_canfd_start(priv);
601 	if (err)
602 		goto err_close;
603 
604 	/* receiving the RB status says when Tx path is ready */
605 	err = pucan_setup_rx_barrier(priv);
606 	if (!err)
607 		goto err_exit;
608 
609 err_close:
610 	close_candev(ndev);
611 err_exit:
612 	return err;
613 }
614 
615 static int peak_canfd_set_bittiming(struct net_device *ndev)
616 {
617 	struct peak_canfd_priv *priv = netdev_priv(ndev);
618 
619 	return pucan_set_timing_slow(priv, &priv->can.bittiming);
620 }
621 
622 static int peak_canfd_set_data_bittiming(struct net_device *ndev)
623 {
624 	struct peak_canfd_priv *priv = netdev_priv(ndev);
625 
626 	return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
627 }
628 
629 static int peak_canfd_close(struct net_device *ndev)
630 {
631 	struct peak_canfd_priv *priv = netdev_priv(ndev);
632 
633 	netif_stop_queue(ndev);
634 	peak_canfd_stop(priv);
635 	close_candev(ndev);
636 
637 	return 0;
638 }
639 
640 static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
641 					 struct net_device *ndev)
642 {
643 	struct peak_canfd_priv *priv = netdev_priv(ndev);
644 	struct net_device_stats *stats = &ndev->stats;
645 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
646 	struct pucan_tx_msg *msg;
647 	u16 msg_size, msg_flags;
648 	unsigned long flags;
649 	bool should_stop_tx_queue;
650 	int room_left;
651 	u8 len;
652 
653 	if (can_dropped_invalid_skb(ndev, skb))
654 		return NETDEV_TX_OK;
655 
656 	msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
657 	msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
658 
659 	/* should never happen except under bus-off condition and (auto-)restart
660 	 * mechanism
661 	 */
662 	if (!msg) {
663 		stats->tx_dropped++;
664 		netif_stop_queue(ndev);
665 		return NETDEV_TX_BUSY;
666 	}
667 
668 	msg->size = cpu_to_le16(msg_size);
669 	msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
670 	msg_flags = 0;
671 
672 	if (cf->can_id & CAN_EFF_FLAG) {
673 		msg_flags |= PUCAN_MSG_EXT_ID;
674 		msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
675 	} else {
676 		msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
677 	}
678 
679 	if (can_is_canfd_skb(skb)) {
680 		/* CAN FD frame format */
681 		len = can_fd_len2dlc(cf->len);
682 
683 		msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
684 
685 		if (cf->flags & CANFD_BRS)
686 			msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
687 
688 		if (cf->flags & CANFD_ESI)
689 			msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
690 	} else {
691 		/* CAN 2.0 frame format */
692 		len = cf->len;
693 
694 		if (cf->can_id & CAN_RTR_FLAG)
695 			msg_flags |= PUCAN_MSG_RTR;
696 	}
697 
698 	/* always ask loopback for echo management */
699 	msg_flags |= PUCAN_MSG_LOOPED_BACK;
700 
701 	/* set driver specific bit to differentiate with application loopback */
702 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
703 		msg_flags |= PUCAN_MSG_SELF_RECEIVE;
704 
705 	msg->flags = cpu_to_le16(msg_flags);
706 	msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len);
707 	memcpy(msg->d, cf->data, cf->len);
708 
709 	/* struct msg client field is used as an index in the echo skbs ring */
710 	msg->client = priv->echo_idx;
711 
712 	spin_lock_irqsave(&priv->echo_lock, flags);
713 
714 	/* prepare and save echo skb in internal slot */
715 	can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
716 
717 	/* move echo index to the next slot */
718 	priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
719 
720 	/* if next slot is not free, stop network queue (no slot free in echo
721 	 * skb ring means that the controller did not write these frames on
722 	 * the bus: no need to continue).
723 	 */
724 	should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
725 
726 	/* stop network tx queue if not enough room to save one more msg too */
727 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
728 		should_stop_tx_queue |= (room_left <
729 					(sizeof(*msg) + CANFD_MAX_DLEN));
730 	else
731 		should_stop_tx_queue |= (room_left <
732 					(sizeof(*msg) + CAN_MAX_DLEN));
733 
734 	if (should_stop_tx_queue)
735 		netif_stop_queue(ndev);
736 
737 	spin_unlock_irqrestore(&priv->echo_lock, flags);
738 
739 	/* write the skb on the interface */
740 	priv->write_tx_msg(priv, msg);
741 
742 	return NETDEV_TX_OK;
743 }
744 
745 static const struct net_device_ops peak_canfd_netdev_ops = {
746 	.ndo_open = peak_canfd_open,
747 	.ndo_stop = peak_canfd_close,
748 	.ndo_start_xmit = peak_canfd_start_xmit,
749 	.ndo_change_mtu = can_change_mtu,
750 };
751 
752 struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
753 					int echo_skb_max)
754 {
755 	struct net_device *ndev;
756 	struct peak_canfd_priv *priv;
757 
758 	/* we DO support local echo */
759 	if (echo_skb_max < 0)
760 		echo_skb_max = PCANFD_ECHO_SKB_MAX;
761 
762 	/* allocate the candev object */
763 	ndev = alloc_candev(sizeof_priv, echo_skb_max);
764 	if (!ndev)
765 		return NULL;
766 
767 	priv = netdev_priv(ndev);
768 
769 	/* complete now socket-can initialization side */
770 	priv->can.state = CAN_STATE_STOPPED;
771 	priv->can.bittiming_const = &peak_canfd_nominal_const;
772 	priv->can.data_bittiming_const = &peak_canfd_data_const;
773 
774 	priv->can.do_set_mode = peak_canfd_set_mode;
775 	priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
776 	priv->can.do_set_bittiming = peak_canfd_set_bittiming;
777 	priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
778 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
779 				       CAN_CTRLMODE_LISTENONLY |
780 				       CAN_CTRLMODE_3_SAMPLES |
781 				       CAN_CTRLMODE_FD |
782 				       CAN_CTRLMODE_FD_NON_ISO |
783 				       CAN_CTRLMODE_BERR_REPORTING;
784 
785 	priv->ndev = ndev;
786 	priv->index = index;
787 	priv->cmd_len = 0;
788 	spin_lock_init(&priv->echo_lock);
789 
790 	ndev->flags |= IFF_ECHO;
791 	ndev->netdev_ops = &peak_canfd_netdev_ops;
792 	ndev->dev_id = index;
793 
794 	return ndev;
795 }
796