xref: /linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <asm/unaligned.h>
16 #include <linux/bitfield.h>
17 
18 #include "mcp251xfd.h"
19 
20 static inline struct
21 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
22 {
23 	u8 tx_head;
24 
25 	tx_head = mcp251xfd_get_tx_head(tx_ring);
26 
27 	return &tx_ring->obj[tx_head];
28 }
29 
30 static void
31 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
32 			  struct mcp251xfd_tx_obj *tx_obj,
33 			  const struct sk_buff *skb,
34 			  unsigned int seq)
35 {
36 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
37 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
38 	union mcp251xfd_tx_obj_load_buf *load_buf;
39 	u8 dlc;
40 	u32 id, flags;
41 	int len_sanitized = 0, len;
42 
43 	if (cfd->can_id & CAN_EFF_FLAG) {
44 		u32 sid, eid;
45 
46 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
47 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
48 
49 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
50 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
51 
52 		flags = MCP251XFD_OBJ_FLAGS_IDE;
53 	} else {
54 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
55 		flags = 0;
56 	}
57 
58 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
59 	 * harm, only the lower 7 bits will be transferred into the
60 	 * TEF object.
61 	 */
62 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
63 
64 	if (cfd->can_id & CAN_RTR_FLAG)
65 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
66 	else
67 		len_sanitized = canfd_sanitize_len(cfd->len);
68 
69 	/* CANFD */
70 	if (can_is_canfd_skb(skb)) {
71 		if (cfd->flags & CANFD_ESI)
72 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
73 
74 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
75 
76 		if (cfd->flags & CANFD_BRS)
77 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
78 
79 		dlc = can_fd_len2dlc(cfd->len);
80 	} else {
81 		dlc = can_get_cc_dlc((struct can_frame *)cfd,
82 				     priv->can.ctrlmode);
83 	}
84 
85 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
86 
87 	load_buf = &tx_obj->buf;
88 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
89 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
90 	else
91 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
92 
93 	put_unaligned_le32(id, &hw_tx_obj->id);
94 	put_unaligned_le32(flags, &hw_tx_obj->flags);
95 
96 	/* Copy data */
97 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
98 
99 	/* Clear unused data at end of CAN frame */
100 	if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
101 		int pad_len;
102 
103 		pad_len = len_sanitized - cfd->len;
104 		if (pad_len)
105 			memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
106 	}
107 
108 	/* Number of bytes to be written into the RAM of the controller */
109 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
110 	if (MCP251XFD_SANITIZE_CAN)
111 		len += round_up(len_sanitized, sizeof(u32));
112 	else
113 		len += round_up(cfd->len, sizeof(u32));
114 
115 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
116 		u16 crc;
117 
118 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
119 						     len);
120 		/* CRC */
121 		len += sizeof(load_buf->crc.cmd);
122 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
123 		put_unaligned_be16(crc, (void *)load_buf + len);
124 
125 		/* Total length */
126 		len += sizeof(load_buf->crc.crc);
127 	} else {
128 		len += sizeof(load_buf->nocrc.cmd);
129 	}
130 
131 	tx_obj->xfer[0].len = len;
132 }
133 
134 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
135 				  struct mcp251xfd_tx_obj *tx_obj)
136 {
137 	return spi_async(priv->spi, &tx_obj->msg);
138 }
139 
140 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
141 			      struct mcp251xfd_tx_ring *tx_ring)
142 {
143 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
144 		return false;
145 
146 	netif_stop_queue(priv->ndev);
147 
148 	/* Memory barrier before checking tx_free (head and tail) */
149 	smp_mb();
150 
151 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
152 		netdev_dbg(priv->ndev,
153 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
154 			   tx_ring->head, tx_ring->tail,
155 			   tx_ring->head - tx_ring->tail);
156 
157 		return true;
158 	}
159 
160 	netif_start_queue(priv->ndev);
161 
162 	return false;
163 }
164 
165 netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
166 				 struct net_device *ndev)
167 {
168 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
169 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
170 	struct mcp251xfd_tx_obj *tx_obj;
171 	unsigned int frame_len;
172 	u8 tx_head;
173 	int err;
174 
175 	if (can_dropped_invalid_skb(ndev, skb))
176 		return NETDEV_TX_OK;
177 
178 	if (mcp251xfd_tx_busy(priv, tx_ring))
179 		return NETDEV_TX_BUSY;
180 
181 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
182 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
183 
184 	/* Stop queue if we occupy the complete TX FIFO */
185 	tx_head = mcp251xfd_get_tx_head(tx_ring);
186 	tx_ring->head++;
187 	if (mcp251xfd_get_tx_free(tx_ring) == 0)
188 		netif_stop_queue(ndev);
189 
190 	frame_len = can_skb_get_frame_len(skb);
191 	err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
192 	if (!err)
193 		netdev_sent_queue(priv->ndev, frame_len);
194 
195 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
196 	if (err)
197 		goto out_err;
198 
199 	return NETDEV_TX_OK;
200 
201  out_err:
202 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
203 
204 	return NETDEV_TX_OK;
205 }
206