xref: /linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c (revision bfc64d9b7e8cac82be6b8629865e137d962578f8)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/unaligned.h>
16 
17 #include "mcp251xfd.h"
18 #include "mcp251xfd-ram.h"
19 
20 static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv * priv,union mcp251xfd_write_reg_buf * write_reg_buf,const u16 reg,const u32 mask,const u32 val)21 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
22 				union mcp251xfd_write_reg_buf *write_reg_buf,
23 				const u16 reg, const u32 mask, const u32 val)
24 {
25 	u8 first_byte, last_byte, len;
26 	u8 *data;
27 	__le32 val_le32;
28 
29 	first_byte = mcp251xfd_first_byte_set(mask);
30 	last_byte = mcp251xfd_last_byte_set(mask);
31 	len = last_byte - first_byte + 1;
32 
33 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
34 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
35 	memcpy(data, &val_le32, len);
36 
37 	if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
38 		len += sizeof(write_reg_buf->nocrc.cmd);
39 	} else if (len == 1) {
40 		u16 crc;
41 
42 		/* CRC */
43 		len += sizeof(write_reg_buf->safe.cmd);
44 		crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
45 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
46 
47 		/* Total length */
48 		len += sizeof(write_reg_buf->safe.crc);
49 	} else {
50 		u16 crc;
51 
52 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
53 						     len);
54 		/* CRC */
55 		len += sizeof(write_reg_buf->crc.cmd);
56 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
57 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
58 
59 		/* Total length */
60 		len += sizeof(write_reg_buf->crc.crc);
61 	}
62 
63 	return len;
64 }
65 
66 static void
mcp251xfd_ring_init_tef(struct mcp251xfd_priv * priv,u16 * base)67 mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
68 {
69 	struct mcp251xfd_tef_ring *tef_ring;
70 	struct spi_transfer *xfer;
71 	u32 val;
72 	u16 addr;
73 	u8 len;
74 	int i;
75 
76 	/* TEF */
77 	tef_ring = priv->tef;
78 	tef_ring->head = 0;
79 	tef_ring->tail = 0;
80 
81 	/* TEF- and TX-FIFO have same number of objects */
82 	*base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
83 
84 	/* FIFO IRQ enable */
85 	addr = MCP251XFD_REG_TEFCON;
86 	val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
87 
88 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
89 					      addr, val, val);
90 	tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
91 	tef_ring->irq_enable_xfer.len = len;
92 	spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
93 					&tef_ring->irq_enable_xfer, 1);
94 
95 	/* FIFO increment TEF tail pointer */
96 	addr = MCP251XFD_REG_TEFCON;
97 	val = MCP251XFD_REG_TEFCON_UINC;
98 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
99 					      addr, val, val);
100 
101 	for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
102 		xfer = &tef_ring->uinc_xfer[i];
103 		xfer->tx_buf = &tef_ring->uinc_buf;
104 		xfer->len = len;
105 		xfer->cs_change = 1;
106 		xfer->cs_change_delay.value = 0;
107 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
108 	}
109 
110 	/* "cs_change == 1" on the last transfer results in an active
111 	 * chip select after the complete SPI message. This causes the
112 	 * controller to interpret the next register access as
113 	 * data. Set "cs_change" of the last transfer to "0" to
114 	 * properly deactivate the chip select at the end of the
115 	 * message.
116 	 */
117 	xfer->cs_change = 0;
118 
119 	if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
120 		val = MCP251XFD_REG_TEFCON_UINC |
121 			MCP251XFD_REG_TEFCON_TEFOVIE |
122 			MCP251XFD_REG_TEFCON_TEFHIE;
123 
124 		len = mcp251xfd_cmd_prepare_write_reg(priv,
125 						      &tef_ring->uinc_irq_disable_buf,
126 						      addr, val, val);
127 		xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
128 		xfer->len = len;
129 	}
130 }
131 
132 static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv * priv,const struct mcp251xfd_tx_ring * ring,struct mcp251xfd_tx_obj * tx_obj,const u8 rts_buf_len,const u8 n)133 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
134 			      const struct mcp251xfd_tx_ring *ring,
135 			      struct mcp251xfd_tx_obj *tx_obj,
136 			      const u8 rts_buf_len,
137 			      const u8 n)
138 {
139 	struct spi_transfer *xfer;
140 	u16 addr;
141 
142 	/* FIFO load */
143 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
144 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
145 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
146 						     addr);
147 	else
148 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
149 					      addr);
150 
151 	xfer = &tx_obj->xfer[0];
152 	xfer->tx_buf = &tx_obj->buf;
153 	xfer->len = 0;	/* actual len is assigned on the fly */
154 	xfer->cs_change = 1;
155 	xfer->cs_change_delay.value = 0;
156 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
157 
158 	/* FIFO request to send */
159 	xfer = &tx_obj->xfer[1];
160 	xfer->tx_buf = &ring->rts_buf;
161 	xfer->len = rts_buf_len;
162 
163 	/* SPI message */
164 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
165 					ARRAY_SIZE(tx_obj->xfer));
166 }
167 
168 static void
mcp251xfd_ring_init_tx(struct mcp251xfd_priv * priv,u16 * base,u8 * fifo_nr)169 mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
170 {
171 	struct mcp251xfd_tx_ring *tx_ring;
172 	struct mcp251xfd_tx_obj *tx_obj;
173 	u32 val;
174 	u16 addr;
175 	u8 len;
176 	int i;
177 
178 	tx_ring = priv->tx;
179 	tx_ring->head = 0;
180 	tx_ring->tail = 0;
181 	tx_ring->base = *base;
182 	tx_ring->nr = 0;
183 	tx_ring->fifo_nr = *fifo_nr;
184 
185 	*base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
186 	*fifo_nr += 1;
187 
188 	/* FIFO request to send */
189 	addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
190 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
191 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
192 					      addr, val, val);
193 
194 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
195 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
196 }
197 
198 static void
mcp251xfd_ring_init_rx(struct mcp251xfd_priv * priv,u16 * base,u8 * fifo_nr)199 mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
200 {
201 	struct mcp251xfd_rx_ring *rx_ring;
202 	struct spi_transfer *xfer;
203 	u32 val;
204 	u16 addr;
205 	u8 len;
206 	int i, j;
207 
208 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
209 		rx_ring->last_valid = timecounter_read(&priv->tc);
210 		rx_ring->head = 0;
211 		rx_ring->tail = 0;
212 		rx_ring->base = *base;
213 		rx_ring->nr = i;
214 		rx_ring->fifo_nr = *fifo_nr;
215 
216 		*base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
217 		*fifo_nr += 1;
218 
219 		/* FIFO IRQ enable */
220 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
221 		val = MCP251XFD_REG_FIFOCON_RXOVIE |
222 			MCP251XFD_REG_FIFOCON_TFNRFNIE;
223 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
224 						      addr, val, val);
225 		rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
226 		rx_ring->irq_enable_xfer.len = len;
227 		spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
228 						&rx_ring->irq_enable_xfer, 1);
229 
230 		/* FIFO increment RX tail pointer */
231 		val = MCP251XFD_REG_FIFOCON_UINC;
232 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
233 						      addr, val, val);
234 
235 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
236 			xfer = &rx_ring->uinc_xfer[j];
237 			xfer->tx_buf = &rx_ring->uinc_buf;
238 			xfer->len = len;
239 			xfer->cs_change = 1;
240 			xfer->cs_change_delay.value = 0;
241 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
242 		}
243 
244 		/* "cs_change == 1" on the last transfer results in an
245 		 * active chip select after the complete SPI
246 		 * message. This causes the controller to interpret
247 		 * the next register access as data. Set "cs_change"
248 		 * of the last transfer to "0" to properly deactivate
249 		 * the chip select at the end of the message.
250 		 */
251 		xfer->cs_change = 0;
252 
253 		/* Use 1st RX-FIFO for IRQ coalescing. If enabled
254 		 * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
255 		 * is activated), use the last transfer to disable:
256 		 *
257 		 * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
258 		 *
259 		 * and enable:
260 		 *
261 		 * - TFHRFHIE (Receive FIFO Half Full Interrupt)
262 		 *   - or -
263 		 * - TFERFFIE (Receive FIFO Full Interrupt)
264 		 *
265 		 * depending on rx_max_coalesce_frames_irq.
266 		 *
267 		 * The RXOVIE (Overflow Interrupt) is always enabled.
268 		 */
269 		if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
270 					 priv->rx_obj_num_coalesce_irq)) {
271 			val = MCP251XFD_REG_FIFOCON_UINC |
272 				MCP251XFD_REG_FIFOCON_RXOVIE;
273 
274 			if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
275 				val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
276 			else if (priv->rx_obj_num_coalesce_irq)
277 				val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
278 
279 			len = mcp251xfd_cmd_prepare_write_reg(priv,
280 							      &rx_ring->uinc_irq_disable_buf,
281 							      addr, val, val);
282 			xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
283 			xfer->len = len;
284 		}
285 	}
286 }
287 
mcp251xfd_ring_init(struct mcp251xfd_priv * priv)288 int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
289 {
290 	const struct mcp251xfd_rx_ring *rx_ring;
291 	u16 base = 0, ram_used;
292 	u8 fifo_nr = 1;
293 	int err = 0, i;
294 
295 	netdev_reset_queue(priv->ndev);
296 
297 	mcp251xfd_ring_init_tef(priv, &base);
298 	mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
299 	mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
300 
301 	/* mcp251xfd_handle_rxif() will iterate over all RX rings.
302 	 * Rings with their corresponding bit set in
303 	 * priv->regs_status.rxif are read out.
304 	 *
305 	 * If the chip is configured for only 1 RX-FIFO, and if there
306 	 * is an RX interrupt pending (RXIF in INT register is set),
307 	 * it must be the 1st RX-FIFO.
308 	 *
309 	 * We mark the RXIF of the 1st FIFO as pending here, so that
310 	 * we can skip the read of the RXIF register in
311 	 * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
312 	 *
313 	 * If we use more than 1 RX-FIFO, this value gets overwritten
314 	 * in mcp251xfd_read_regs_status(), so set it unconditionally
315 	 * here.
316 	 */
317 	priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
318 
319 	if (priv->tx_obj_num_coalesce_irq) {
320 		netdev_dbg(priv->ndev,
321 			   "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
322 			   mcp251xfd_get_tef_obj_addr(0),
323 			   priv->tx_obj_num_coalesce_irq,
324 			   sizeof(struct mcp251xfd_hw_tef_obj),
325 			   priv->tx_obj_num_coalesce_irq *
326 			   sizeof(struct mcp251xfd_hw_tef_obj));
327 
328 		netdev_dbg(priv->ndev,
329 			   "                         0x%03x: %2d*%zu bytes = %4zu bytes\n",
330 			   mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
331 			   priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
332 			   sizeof(struct mcp251xfd_hw_tef_obj),
333 			   (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
334 			   sizeof(struct mcp251xfd_hw_tef_obj));
335 	} else {
336 		netdev_dbg(priv->ndev,
337 			   "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes\n",
338 			   mcp251xfd_get_tef_obj_addr(0),
339 			   priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
340 			   priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
341 	}
342 
343 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
344 		if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
345 			netdev_dbg(priv->ndev,
346 				   "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
347 				   rx_ring->nr, rx_ring->fifo_nr,
348 				   mcp251xfd_get_rx_obj_addr(rx_ring, 0),
349 				   priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
350 				   priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
351 
352 			if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
353 				continue;
354 
355 			netdev_dbg(priv->ndev,
356 				   "                         0x%03x: %2u*%u bytes = %4u bytes\n",
357 				   mcp251xfd_get_rx_obj_addr(rx_ring,
358 							     priv->rx_obj_num_coalesce_irq),
359 				   rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
360 				   rx_ring->obj_size,
361 				   (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
362 				   rx_ring->obj_size);
363 		} else {
364 			netdev_dbg(priv->ndev,
365 				   "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
366 				   rx_ring->nr, rx_ring->fifo_nr,
367 				   mcp251xfd_get_rx_obj_addr(rx_ring, 0),
368 				   rx_ring->obj_num, rx_ring->obj_size,
369 				   rx_ring->obj_num * rx_ring->obj_size);
370 		}
371 	}
372 
373 	netdev_dbg(priv->ndev,
374 		   "FIFO setup: TX:   FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
375 		   priv->tx->fifo_nr,
376 		   mcp251xfd_get_tx_obj_addr(priv->tx, 0),
377 		   priv->tx->obj_num, priv->tx->obj_size,
378 		   priv->tx->obj_num * priv->tx->obj_size);
379 
380 	netdev_dbg(priv->ndev,
381 		   "FIFO setup: free:                             %4d bytes\n",
382 		   MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
383 
384 	ram_used = base - MCP251XFD_RAM_START;
385 	if (ram_used > MCP251XFD_RAM_SIZE) {
386 		netdev_err(priv->ndev,
387 			   "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
388 			   ram_used, MCP251XFD_RAM_SIZE);
389 		err = -ENOMEM;
390 	}
391 
392 	if (priv->tx_obj_num_coalesce_irq &&
393 	    priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
394 		netdev_err(priv->ndev,
395 			   "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
396 			   priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
397 		err = -EINVAL;
398 	}
399 
400 	return err;
401 }
402 
mcp251xfd_ring_free(struct mcp251xfd_priv * priv)403 void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
404 {
405 	int i;
406 
407 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
408 		kfree(priv->rx[i]);
409 		priv->rx[i] = NULL;
410 	}
411 }
412 
mcp251xfd_rx_irq_timer(struct hrtimer * t)413 static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
414 {
415 	struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
416 						   rx_irq_timer);
417 	struct mcp251xfd_rx_ring *ring = priv->rx[0];
418 
419 	if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
420 		return HRTIMER_NORESTART;
421 
422 	spi_async(priv->spi, &ring->irq_enable_msg);
423 
424 	return HRTIMER_NORESTART;
425 }
426 
mcp251xfd_tx_irq_timer(struct hrtimer * t)427 static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
428 {
429 	struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
430 						   tx_irq_timer);
431 	struct mcp251xfd_tef_ring *ring = priv->tef;
432 
433 	if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
434 		return HRTIMER_NORESTART;
435 
436 	spi_async(priv->spi, &ring->irq_enable_msg);
437 
438 	return HRTIMER_NORESTART;
439 }
440 
441 const struct can_ram_config mcp251xfd_ram_config = {
442 	.rx = {
443 		.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
444 		.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
445 		.min = MCP251XFD_RX_OBJ_NUM_MIN,
446 		.max = MCP251XFD_RX_OBJ_NUM_MAX,
447 		.def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
448 		.def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
449 		.fifo_num = MCP251XFD_FIFO_RX_NUM,
450 		.fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
451 		.fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
452 	},
453 	.tx = {
454 		.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
455 			sizeof(struct mcp251xfd_hw_tx_obj_can),
456 		.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
457 			sizeof(struct mcp251xfd_hw_tx_obj_canfd),
458 		.min = MCP251XFD_TX_OBJ_NUM_MIN,
459 		.max = MCP251XFD_TX_OBJ_NUM_MAX,
460 		.def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
461 		.def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
462 		.fifo_num = MCP251XFD_FIFO_TX_NUM,
463 		.fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
464 		.fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
465 	},
466 	.size = MCP251XFD_RAM_SIZE,
467 	.fifo_depth = MCP251XFD_FIFO_DEPTH,
468 };
469 
mcp251xfd_ring_alloc(struct mcp251xfd_priv * priv)470 int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
471 {
472 	const bool fd_mode = mcp251xfd_is_fd_mode(priv);
473 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
474 	struct mcp251xfd_rx_ring *rx_ring;
475 	u8 tx_obj_size, rx_obj_size;
476 	u8 rem, i;
477 
478 	/* switching from CAN-2.0 to CAN-FD mode or vice versa */
479 	if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
480 		const struct ethtool_ringparam ring = {
481 			.rx_pending = priv->rx_obj_num,
482 			.tx_pending = priv->tx->obj_num,
483 		};
484 		const struct ethtool_coalesce ec = {
485 			.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
486 			.rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
487 				1 : priv->rx_obj_num_coalesce_irq,
488 			.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
489 			.tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
490 				1 : priv->tx_obj_num_coalesce_irq,
491 		};
492 		struct can_ram_layout layout;
493 
494 		can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
495 
496 		priv->rx_obj_num = layout.cur_rx;
497 		priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
498 
499 		tx_ring->obj_num = layout.cur_tx;
500 		priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
501 	}
502 
503 	if (fd_mode) {
504 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
505 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
506 		set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
507 	} else {
508 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
509 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
510 		clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
511 	}
512 
513 	tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
514 		ilog2(tx_ring->obj_num);
515 	tx_ring->obj_size = tx_obj_size;
516 
517 	rem = priv->rx_obj_num;
518 	for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
519 		u8 rx_obj_num;
520 
521 		if (i == 0 && priv->rx_obj_num_coalesce_irq)
522 			rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
523 					   MCP251XFD_FIFO_DEPTH);
524 		else
525 			rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
526 					   MCP251XFD_FIFO_DEPTH);
527 		rem -= rx_obj_num;
528 
529 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
530 				  GFP_KERNEL);
531 		if (!rx_ring) {
532 			mcp251xfd_ring_free(priv);
533 			return -ENOMEM;
534 		}
535 
536 		rx_ring->obj_num = rx_obj_num;
537 		rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
538 			ilog2(rx_obj_num);
539 		rx_ring->obj_size = rx_obj_size;
540 		priv->rx[i] = rx_ring;
541 	}
542 	priv->rx_ring_num = i;
543 
544 	hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
545 	priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
546 
547 	hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
548 	priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
549 
550 	return 0;
551 }
552