1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 // Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14
15 #include <linux/unaligned.h>
16 #include <linux/bitfield.h>
17
18 #include "mcp251xfd.h"
19
20 static inline struct
mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring * tx_ring)21 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
22 {
23 u8 tx_head;
24
25 tx_head = mcp251xfd_get_tx_head(tx_ring);
26
27 return &tx_ring->obj[tx_head];
28 }
29
30 static void
mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_obj * tx_obj,const struct sk_buff * skb,unsigned int seq)31 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
32 struct mcp251xfd_tx_obj *tx_obj,
33 const struct sk_buff *skb,
34 unsigned int seq)
35 {
36 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
37 struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
38 union mcp251xfd_tx_obj_load_buf *load_buf;
39 u8 dlc;
40 u32 id, flags;
41 int len_sanitized = 0, len;
42
43 if (cfd->can_id & CAN_EFF_FLAG) {
44 u32 sid, eid;
45
46 sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
47 eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
48
49 id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
50 FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
51
52 flags = MCP251XFD_OBJ_FLAGS_IDE;
53 } else {
54 id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
55 flags = 0;
56 }
57
58 /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
59 * harm, only the lower 7 bits will be transferred into the
60 * TEF object.
61 */
62 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
63
64 if (cfd->can_id & CAN_RTR_FLAG)
65 flags |= MCP251XFD_OBJ_FLAGS_RTR;
66 else
67 len_sanitized = canfd_sanitize_len(cfd->len);
68
69 /* CANFD */
70 if (can_is_canfd_skb(skb)) {
71 if (cfd->flags & CANFD_ESI)
72 flags |= MCP251XFD_OBJ_FLAGS_ESI;
73
74 flags |= MCP251XFD_OBJ_FLAGS_FDF;
75
76 if (cfd->flags & CANFD_BRS)
77 flags |= MCP251XFD_OBJ_FLAGS_BRS;
78
79 dlc = can_fd_len2dlc(cfd->len);
80 } else {
81 dlc = can_get_cc_dlc((struct can_frame *)cfd,
82 priv->can.ctrlmode);
83 }
84
85 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
86
87 load_buf = &tx_obj->buf;
88 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
89 hw_tx_obj = &load_buf->crc.hw_tx_obj;
90 else
91 hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
92
93 put_unaligned_le32(id, &hw_tx_obj->id);
94 put_unaligned_le32(flags, &hw_tx_obj->flags);
95
96 /* Copy data */
97 memcpy(hw_tx_obj->data, cfd->data, cfd->len);
98
99 /* Clear unused data at end of CAN frame */
100 if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
101 int pad_len;
102
103 pad_len = len_sanitized - cfd->len;
104 if (pad_len)
105 memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
106 }
107
108 /* Number of bytes to be written into the RAM of the controller */
109 len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
110 if (MCP251XFD_SANITIZE_CAN)
111 len += round_up(len_sanitized, sizeof(u32));
112 else
113 len += round_up(cfd->len, sizeof(u32));
114
115 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
116 u16 crc;
117
118 mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
119 len);
120 /* CRC */
121 len += sizeof(load_buf->crc.cmd);
122 crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
123 put_unaligned_be16(crc, (void *)load_buf + len);
124
125 /* Total length */
126 len += sizeof(load_buf->crc.crc);
127 } else {
128 len += sizeof(load_buf->nocrc.cmd);
129 }
130
131 tx_obj->xfer[0].len = len;
132 }
133
mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_ring * tx_ring,int err)134 static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
135 struct mcp251xfd_tx_ring *tx_ring,
136 int err)
137 {
138 struct net_device *ndev = priv->ndev;
139 struct net_device_stats *stats = &ndev->stats;
140 unsigned int frame_len = 0;
141 u8 tx_head;
142
143 tx_ring->head--;
144 stats->tx_dropped++;
145 tx_head = mcp251xfd_get_tx_head(tx_ring);
146 can_free_echo_skb(ndev, tx_head, &frame_len);
147 netdev_completed_queue(ndev, 1, frame_len);
148 netif_wake_queue(ndev);
149
150 if (net_ratelimit())
151 netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
152 }
153
mcp251xfd_tx_obj_write_sync(struct work_struct * work)154 void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
155 {
156 struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
157 tx_work);
158 struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
159 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
160 int err;
161
162 err = spi_sync(priv->spi, &tx_obj->msg);
163 if (err)
164 mcp251xfd_tx_failure_drop(priv, tx_ring, err);
165 }
166
mcp251xfd_tx_obj_write(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_obj * tx_obj)167 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
168 struct mcp251xfd_tx_obj *tx_obj)
169 {
170 return spi_async(priv->spi, &tx_obj->msg);
171 }
172
mcp251xfd_tx_busy(const struct mcp251xfd_priv * priv,struct mcp251xfd_tx_ring * tx_ring)173 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
174 struct mcp251xfd_tx_ring *tx_ring)
175 {
176 if (mcp251xfd_get_tx_free(tx_ring) > 0)
177 return false;
178
179 netif_stop_queue(priv->ndev);
180
181 /* Memory barrier before checking tx_free (head and tail) */
182 smp_mb();
183
184 if (mcp251xfd_get_tx_free(tx_ring) == 0) {
185 netdev_dbg(priv->ndev,
186 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
187 tx_ring->head, tx_ring->tail,
188 tx_ring->head - tx_ring->tail);
189
190 return true;
191 }
192
193 netif_start_queue(priv->ndev);
194
195 return false;
196 }
197
mcp251xfd_work_busy(struct work_struct * work)198 static bool mcp251xfd_work_busy(struct work_struct *work)
199 {
200 return work_busy(work);
201 }
202
mcp251xfd_start_xmit(struct sk_buff * skb,struct net_device * ndev)203 netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
204 struct net_device *ndev)
205 {
206 struct mcp251xfd_priv *priv = netdev_priv(ndev);
207 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
208 struct mcp251xfd_tx_obj *tx_obj;
209 unsigned int frame_len;
210 u8 tx_head;
211 int err;
212
213 if (can_dev_dropped_skb(ndev, skb))
214 return NETDEV_TX_OK;
215
216 if (mcp251xfd_tx_busy(priv, tx_ring) ||
217 mcp251xfd_work_busy(&priv->tx_work))
218 return NETDEV_TX_BUSY;
219
220 tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
221 mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
222
223 /* Stop queue if we occupy the complete TX FIFO */
224 tx_head = mcp251xfd_get_tx_head(tx_ring);
225 tx_ring->head++;
226 if (mcp251xfd_get_tx_free(tx_ring) == 0)
227 netif_stop_queue(ndev);
228
229 frame_len = can_skb_get_frame_len(skb);
230 err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
231 if (!err)
232 netdev_sent_queue(priv->ndev, frame_len);
233
234 err = mcp251xfd_tx_obj_write(priv, tx_obj);
235 if (err == -EBUSY) {
236 netif_stop_queue(ndev);
237 priv->tx_work_obj = tx_obj;
238 queue_work(priv->wq, &priv->tx_work);
239 } else if (err) {
240 mcp251xfd_tx_failure_drop(priv, tx_ring, err);
241 }
242
243 return NETDEV_TX_OK;
244 }
245