xref: /linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 //               Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <asm/unaligned.h>
16 
17 #include "mcp251xfd.h"
18 #include "mcp251xfd-ram.h"
19 
20 static inline u8
21 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
22 				union mcp251xfd_write_reg_buf *write_reg_buf,
23 				const u16 reg, const u32 mask, const u32 val)
24 {
25 	u8 first_byte, last_byte, len;
26 	u8 *data;
27 	__le32 val_le32;
28 
29 	first_byte = mcp251xfd_first_byte_set(mask);
30 	last_byte = mcp251xfd_last_byte_set(mask);
31 	len = last_byte - first_byte + 1;
32 
33 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
34 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
35 	memcpy(data, &val_le32, len);
36 
37 	if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
38 		len += sizeof(write_reg_buf->nocrc.cmd);
39 	} else if (len == 1) {
40 		u16 crc;
41 
42 		/* CRC */
43 		len += sizeof(write_reg_buf->safe.cmd);
44 		crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
45 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
46 
47 		/* Total length */
48 		len += sizeof(write_reg_buf->safe.crc);
49 	} else {
50 		u16 crc;
51 
52 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
53 						     len);
54 		/* CRC */
55 		len += sizeof(write_reg_buf->crc.cmd);
56 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
57 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
58 
59 		/* Total length */
60 		len += sizeof(write_reg_buf->crc.crc);
61 	}
62 
63 	return len;
64 }
65 
66 static void
67 mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
68 {
69 	struct mcp251xfd_tef_ring *tef_ring;
70 	struct spi_transfer *xfer;
71 	u32 val;
72 	u16 addr;
73 	u8 len;
74 	int i;
75 
76 	/* TEF */
77 	tef_ring = priv->tef;
78 	tef_ring->head = 0;
79 	tef_ring->tail = 0;
80 
81 	/* TEF- and TX-FIFO have same number of objects */
82 	*base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
83 
84 	/* FIFO IRQ enable */
85 	addr = MCP251XFD_REG_TEFCON;
86 	val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
87 
88 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
89 					      addr, val, val);
90 	tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
91 	tef_ring->irq_enable_xfer.len = len;
92 	spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
93 					&tef_ring->irq_enable_xfer, 1);
94 
95 	/* FIFO increment TEF tail pointer */
96 	addr = MCP251XFD_REG_TEFCON;
97 	val = MCP251XFD_REG_TEFCON_UINC;
98 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
99 					      addr, val, val);
100 
101 	for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
102 		xfer = &tef_ring->uinc_xfer[i];
103 		xfer->tx_buf = &tef_ring->uinc_buf;
104 		xfer->len = len;
105 		xfer->cs_change = 1;
106 		xfer->cs_change_delay.value = 0;
107 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
108 	}
109 
110 	/* "cs_change == 1" on the last transfer results in an active
111 	 * chip select after the complete SPI message. This causes the
112 	 * controller to interpret the next register access as
113 	 * data. Set "cs_change" of the last transfer to "0" to
114 	 * properly deactivate the chip select at the end of the
115 	 * message.
116 	 */
117 	xfer->cs_change = 0;
118 
119 	if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
120 		val = MCP251XFD_REG_TEFCON_UINC |
121 			MCP251XFD_REG_TEFCON_TEFOVIE |
122 			MCP251XFD_REG_TEFCON_TEFHIE;
123 
124 		len = mcp251xfd_cmd_prepare_write_reg(priv,
125 						      &tef_ring->uinc_irq_disable_buf,
126 						      addr, val, val);
127 		xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
128 		xfer->len = len;
129 	}
130 }
131 
132 static void
133 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
134 			      const struct mcp251xfd_tx_ring *ring,
135 			      struct mcp251xfd_tx_obj *tx_obj,
136 			      const u8 rts_buf_len,
137 			      const u8 n)
138 {
139 	struct spi_transfer *xfer;
140 	u16 addr;
141 
142 	/* FIFO load */
143 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
144 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
145 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
146 						     addr);
147 	else
148 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
149 					      addr);
150 
151 	xfer = &tx_obj->xfer[0];
152 	xfer->tx_buf = &tx_obj->buf;
153 	xfer->len = 0;	/* actual len is assigned on the fly */
154 	xfer->cs_change = 1;
155 	xfer->cs_change_delay.value = 0;
156 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
157 
158 	/* FIFO request to send */
159 	xfer = &tx_obj->xfer[1];
160 	xfer->tx_buf = &ring->rts_buf;
161 	xfer->len = rts_buf_len;
162 
163 	/* SPI message */
164 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
165 					ARRAY_SIZE(tx_obj->xfer));
166 }
167 
168 static void
169 mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
170 {
171 	struct mcp251xfd_tx_ring *tx_ring;
172 	struct mcp251xfd_tx_obj *tx_obj;
173 	u32 val;
174 	u16 addr;
175 	u8 len;
176 	int i;
177 
178 	tx_ring = priv->tx;
179 	tx_ring->head = 0;
180 	tx_ring->tail = 0;
181 	tx_ring->base = *base;
182 	tx_ring->nr = 0;
183 	tx_ring->fifo_nr = *fifo_nr;
184 
185 	*base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
186 	*fifo_nr += 1;
187 
188 	/* FIFO request to send */
189 	addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
190 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
191 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
192 					      addr, val, val);
193 
194 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
195 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
196 }
197 
198 static void
199 mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
200 {
201 	struct mcp251xfd_rx_ring *rx_ring;
202 	struct spi_transfer *xfer;
203 	u32 val;
204 	u16 addr;
205 	u8 len;
206 	int i, j;
207 
208 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
209 		rx_ring->last_valid = timecounter_read(&priv->tc);
210 		rx_ring->head = 0;
211 		rx_ring->tail = 0;
212 		rx_ring->base = *base;
213 		rx_ring->nr = i;
214 		rx_ring->fifo_nr = *fifo_nr;
215 
216 		*base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
217 		*fifo_nr += 1;
218 
219 		/* FIFO IRQ enable */
220 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
221 		val = MCP251XFD_REG_FIFOCON_RXOVIE |
222 			MCP251XFD_REG_FIFOCON_TFNRFNIE;
223 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
224 						      addr, val, val);
225 		rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
226 		rx_ring->irq_enable_xfer.len = len;
227 		spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
228 						&rx_ring->irq_enable_xfer, 1);
229 
230 		/* FIFO increment RX tail pointer */
231 		val = MCP251XFD_REG_FIFOCON_UINC;
232 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
233 						      addr, val, val);
234 
235 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
236 			xfer = &rx_ring->uinc_xfer[j];
237 			xfer->tx_buf = &rx_ring->uinc_buf;
238 			xfer->len = len;
239 			xfer->cs_change = 1;
240 			xfer->cs_change_delay.value = 0;
241 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
242 		}
243 
244 		/* "cs_change == 1" on the last transfer results in an
245 		 * active chip select after the complete SPI
246 		 * message. This causes the controller to interpret
247 		 * the next register access as data. Set "cs_change"
248 		 * of the last transfer to "0" to properly deactivate
249 		 * the chip select at the end of the message.
250 		 */
251 		xfer->cs_change = 0;
252 
253 		/* Use 1st RX-FIFO for IRQ coalescing. If enabled
254 		 * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
255 		 * is activated), use the last transfer to disable:
256 		 *
257 		 * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
258 		 *
259 		 * and enable:
260 		 *
261 		 * - TFHRFHIE (Receive FIFO Half Full Interrupt)
262 		 *   - or -
263 		 * - TFERFFIE (Receive FIFO Full Interrupt)
264 		 *
265 		 * depending on rx_max_coalesce_frames_irq.
266 		 *
267 		 * The RXOVIE (Overflow Interrupt) is always enabled.
268 		 */
269 		if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
270 					 priv->rx_obj_num_coalesce_irq)) {
271 			val = MCP251XFD_REG_FIFOCON_UINC |
272 				MCP251XFD_REG_FIFOCON_RXOVIE;
273 
274 			if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
275 				val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
276 			else if (priv->rx_obj_num_coalesce_irq)
277 				val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
278 
279 			len = mcp251xfd_cmd_prepare_write_reg(priv,
280 							      &rx_ring->uinc_irq_disable_buf,
281 							      addr, val, val);
282 			xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
283 			xfer->len = len;
284 		}
285 	}
286 }
287 
288 int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
289 {
290 	const struct mcp251xfd_rx_ring *rx_ring;
291 	u16 base = 0, ram_used;
292 	u8 fifo_nr = 1;
293 	int i;
294 
295 	netdev_reset_queue(priv->ndev);
296 
297 	mcp251xfd_ring_init_tef(priv, &base);
298 	mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
299 	mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
300 
301 	/* mcp251xfd_handle_rxif() will iterate over all RX rings.
302 	 * Rings with their corresponding bit set in
303 	 * priv->regs_status.rxif are read out.
304 	 *
305 	 * If the chip is configured for only 1 RX-FIFO, and if there
306 	 * is an RX interrupt pending (RXIF in INT register is set),
307 	 * it must be the 1st RX-FIFO.
308 	 *
309 	 * We mark the RXIF of the 1st FIFO as pending here, so that
310 	 * we can skip the read of the RXIF register in
311 	 * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
312 	 *
313 	 * If we use more than 1 RX-FIFO, this value gets overwritten
314 	 * in mcp251xfd_read_regs_status(), so set it unconditionally
315 	 * here.
316 	 */
317 	priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
318 
319 	if (priv->tx_obj_num_coalesce_irq) {
320 		netdev_dbg(priv->ndev,
321 			   "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
322 			   mcp251xfd_get_tef_obj_addr(0),
323 			   priv->tx_obj_num_coalesce_irq,
324 			   sizeof(struct mcp251xfd_hw_tef_obj),
325 			   priv->tx_obj_num_coalesce_irq *
326 			   sizeof(struct mcp251xfd_hw_tef_obj));
327 
328 		netdev_dbg(priv->ndev,
329 			   "                         0x%03x: %2d*%zu bytes = %4zu bytes\n",
330 			   mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
331 			   priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
332 			   sizeof(struct mcp251xfd_hw_tef_obj),
333 			   (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
334 			   sizeof(struct mcp251xfd_hw_tef_obj));
335 	} else {
336 		netdev_dbg(priv->ndev,
337 			   "FIFO setup: TEF:         0x%03x: %2d*%zu bytes = %4zu bytes\n",
338 			   mcp251xfd_get_tef_obj_addr(0),
339 			   priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
340 			   priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
341 	}
342 
343 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
344 		if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
345 			netdev_dbg(priv->ndev,
346 				   "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
347 				   rx_ring->nr, rx_ring->fifo_nr,
348 				   mcp251xfd_get_rx_obj_addr(rx_ring, 0),
349 				   priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
350 				   priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
351 
352 			if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
353 				continue;
354 
355 			netdev_dbg(priv->ndev,
356 				   "                         0x%03x: %2u*%u bytes = %4u bytes\n",
357 				   mcp251xfd_get_rx_obj_addr(rx_ring,
358 							     priv->rx_obj_num_coalesce_irq),
359 				   rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
360 				   rx_ring->obj_size,
361 				   (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
362 				   rx_ring->obj_size);
363 		} else {
364 			netdev_dbg(priv->ndev,
365 				   "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
366 				   rx_ring->nr, rx_ring->fifo_nr,
367 				   mcp251xfd_get_rx_obj_addr(rx_ring, 0),
368 				   rx_ring->obj_num, rx_ring->obj_size,
369 				   rx_ring->obj_num * rx_ring->obj_size);
370 		}
371 	}
372 
373 	netdev_dbg(priv->ndev,
374 		   "FIFO setup: TX:   FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
375 		   priv->tx->fifo_nr,
376 		   mcp251xfd_get_tx_obj_addr(priv->tx, 0),
377 		   priv->tx->obj_num, priv->tx->obj_size,
378 		   priv->tx->obj_num * priv->tx->obj_size);
379 
380 	netdev_dbg(priv->ndev,
381 		   "FIFO setup: free:                             %4d bytes\n",
382 		   MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
383 
384 	ram_used = base - MCP251XFD_RAM_START;
385 	if (ram_used > MCP251XFD_RAM_SIZE) {
386 		netdev_err(priv->ndev,
387 			   "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
388 			   ram_used, MCP251XFD_RAM_SIZE);
389 		return -ENOMEM;
390 	}
391 
392 	return 0;
393 }
394 
395 void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
396 {
397 	int i;
398 
399 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
400 		kfree(priv->rx[i]);
401 		priv->rx[i] = NULL;
402 	}
403 }
404 
405 static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
406 {
407 	struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
408 						   rx_irq_timer);
409 	struct mcp251xfd_rx_ring *ring = priv->rx[0];
410 
411 	if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
412 		return HRTIMER_NORESTART;
413 
414 	spi_async(priv->spi, &ring->irq_enable_msg);
415 
416 	return HRTIMER_NORESTART;
417 }
418 
419 static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
420 {
421 	struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
422 						   tx_irq_timer);
423 	struct mcp251xfd_tef_ring *ring = priv->tef;
424 
425 	if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
426 		return HRTIMER_NORESTART;
427 
428 	spi_async(priv->spi, &ring->irq_enable_msg);
429 
430 	return HRTIMER_NORESTART;
431 }
432 
433 const struct can_ram_config mcp251xfd_ram_config = {
434 	.rx = {
435 		.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
436 		.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
437 		.min = MCP251XFD_RX_OBJ_NUM_MIN,
438 		.max = MCP251XFD_RX_OBJ_NUM_MAX,
439 		.def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
440 		.def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
441 		.fifo_num = MCP251XFD_FIFO_RX_NUM,
442 		.fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
443 		.fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
444 	},
445 	.tx = {
446 		.size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
447 			sizeof(struct mcp251xfd_hw_tx_obj_can),
448 		.size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
449 			sizeof(struct mcp251xfd_hw_tx_obj_canfd),
450 		.min = MCP251XFD_TX_OBJ_NUM_MIN,
451 		.max = MCP251XFD_TX_OBJ_NUM_MAX,
452 		.def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
453 		.def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
454 		.fifo_num = MCP251XFD_FIFO_TX_NUM,
455 		.fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
456 		.fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
457 	},
458 	.size = MCP251XFD_RAM_SIZE,
459 	.fifo_depth = MCP251XFD_FIFO_DEPTH,
460 };
461 
462 int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
463 {
464 	const bool fd_mode = mcp251xfd_is_fd_mode(priv);
465 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
466 	struct mcp251xfd_rx_ring *rx_ring;
467 	u8 tx_obj_size, rx_obj_size;
468 	u8 rem, i;
469 
470 	/* switching from CAN-2.0 to CAN-FD mode or vice versa */
471 	if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
472 		struct can_ram_layout layout;
473 
474 		can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
475 		priv->rx_obj_num = layout.default_rx;
476 		tx_ring->obj_num = layout.default_tx;
477 	}
478 
479 	if (fd_mode) {
480 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
481 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
482 		set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
483 	} else {
484 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
485 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
486 		clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
487 	}
488 
489 	tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
490 		ilog2(tx_ring->obj_num);
491 	tx_ring->obj_size = tx_obj_size;
492 
493 	rem = priv->rx_obj_num;
494 	for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
495 		u8 rx_obj_num;
496 
497 		if (i == 0 && priv->rx_obj_num_coalesce_irq)
498 			rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
499 					   MCP251XFD_FIFO_DEPTH);
500 		else
501 			rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
502 					   MCP251XFD_FIFO_DEPTH);
503 		rem -= rx_obj_num;
504 
505 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
506 				  GFP_KERNEL);
507 		if (!rx_ring) {
508 			mcp251xfd_ring_free(priv);
509 			return -ENOMEM;
510 		}
511 
512 		rx_ring->obj_num = rx_obj_num;
513 		rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
514 			ilog2(rx_obj_num);
515 		rx_ring->obj_size = rx_obj_size;
516 		priv->rx[i] = rx_ring;
517 	}
518 	priv->rx_ring_num = i;
519 
520 	hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
521 	priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
522 
523 	hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
524 	priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
525 
526 	return 0;
527 }
528