xref: /linux/drivers/net/wan/fsl_qmc_hdlc.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale QMC HDLC Device Driver
4  *
5  * Copyright 2023 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include <linux/array_size.h>
11 #include <linux/bug.h>
12 #include <linux/cleanup.h>
13 #include <linux/bitmap.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/framer/framer.h>
18 #include <linux/hdlc.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 
27 #include <soc/fsl/qe/qmc.h>
28 
29 struct qmc_hdlc_desc {
30 	struct net_device *netdev;
31 	struct sk_buff *skb; /* NULL if the descriptor is not in use */
32 	dma_addr_t dma_addr;
33 	size_t dma_size;
34 };
35 
36 struct qmc_hdlc {
37 	struct device *dev;
38 	struct qmc_chan *qmc_chan;
39 	struct net_device *netdev;
40 	struct framer *framer;
41 	struct mutex carrier_lock; /* Protect carrier detection */
42 	struct notifier_block nb;
43 	bool is_crc32;
44 	spinlock_t tx_lock; /* Protect tx descriptors */
45 	struct qmc_hdlc_desc tx_descs[8];
46 	unsigned int tx_out;
47 	struct qmc_hdlc_desc rx_descs[4];
48 	u32 slot_map;
49 };
50 
51 static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
52 {
53 	return dev_to_hdlc(netdev)->priv;
54 }
55 
56 static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
57 {
58 	struct framer_status framer_status;
59 	int ret;
60 
61 	if (!qmc_hdlc->framer)
62 		return 0;
63 
64 	guard(mutex)(&qmc_hdlc->carrier_lock);
65 
66 	ret = framer_get_status(qmc_hdlc->framer, &framer_status);
67 	if (ret) {
68 		dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
69 		return ret;
70 	}
71 	if (framer_status.link_is_on)
72 		netif_carrier_on(qmc_hdlc->netdev);
73 	else
74 		netif_carrier_off(qmc_hdlc->netdev);
75 
76 	return 0;
77 }
78 
79 static int qmc_hdlc_framer_notifier(struct notifier_block *nb, unsigned long action,
80 				    void *data)
81 {
82 	struct qmc_hdlc *qmc_hdlc = container_of(nb, struct qmc_hdlc, nb);
83 	int ret;
84 
85 	if (action != FRAMER_EVENT_STATUS)
86 		return NOTIFY_DONE;
87 
88 	ret = qmc_hdlc_framer_set_carrier(qmc_hdlc);
89 	return ret ? NOTIFY_DONE : NOTIFY_OK;
90 }
91 
92 static int qmc_hdlc_framer_start(struct qmc_hdlc *qmc_hdlc)
93 {
94 	struct framer_status framer_status;
95 	int ret;
96 
97 	if (!qmc_hdlc->framer)
98 		return 0;
99 
100 	ret = framer_power_on(qmc_hdlc->framer);
101 	if (ret) {
102 		dev_err(qmc_hdlc->dev, "framer power-on failed (%d)\n", ret);
103 		return ret;
104 	}
105 
106 	/* Be sure that get_status is supported */
107 	ret = framer_get_status(qmc_hdlc->framer, &framer_status);
108 	if (ret) {
109 		dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
110 		goto framer_power_off;
111 	}
112 
113 	qmc_hdlc->nb.notifier_call = qmc_hdlc_framer_notifier;
114 	ret = framer_notifier_register(qmc_hdlc->framer, &qmc_hdlc->nb);
115 	if (ret) {
116 		dev_err(qmc_hdlc->dev, "framer notifier register failed (%d)\n", ret);
117 		goto framer_power_off;
118 	}
119 
120 	return 0;
121 
122 framer_power_off:
123 	framer_power_off(qmc_hdlc->framer);
124 	return ret;
125 }
126 
127 static void qmc_hdlc_framer_stop(struct qmc_hdlc *qmc_hdlc)
128 {
129 	if (!qmc_hdlc->framer)
130 		return;
131 
132 	framer_notifier_unregister(qmc_hdlc->framer, &qmc_hdlc->nb);
133 	framer_power_off(qmc_hdlc->framer);
134 }
135 
136 static int qmc_hdlc_framer_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface,
137 				     const te1_settings *te1)
138 {
139 	struct framer_config config;
140 	int ret;
141 
142 	if (!qmc_hdlc->framer)
143 		return 0;
144 
145 	ret = framer_get_config(qmc_hdlc->framer, &config);
146 	if (ret)
147 		return ret;
148 
149 	switch (if_iface) {
150 	case IF_IFACE_E1:
151 		config.iface = FRAMER_IFACE_E1;
152 		break;
153 	case IF_IFACE_T1:
154 		config.iface = FRAMER_IFACE_T1;
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 
160 	switch (te1->clock_type) {
161 	case CLOCK_DEFAULT:
162 		/* Keep current value */
163 		break;
164 	case CLOCK_EXT:
165 		config.clock_type = FRAMER_CLOCK_EXT;
166 		break;
167 	case CLOCK_INT:
168 		config.clock_type = FRAMER_CLOCK_INT;
169 		break;
170 	default:
171 		return -EINVAL;
172 	}
173 	config.line_clock_rate = te1->clock_rate;
174 
175 	return framer_set_config(qmc_hdlc->framer, &config);
176 }
177 
178 static int qmc_hdlc_framer_get_iface(struct qmc_hdlc *qmc_hdlc, int *if_iface, te1_settings *te1)
179 {
180 	struct framer_config config;
181 	int ret;
182 
183 	if (!qmc_hdlc->framer) {
184 		*if_iface = IF_IFACE_E1;
185 		return 0;
186 	}
187 
188 	ret = framer_get_config(qmc_hdlc->framer, &config);
189 	if (ret)
190 		return ret;
191 
192 	switch (config.iface) {
193 	case FRAMER_IFACE_E1:
194 		*if_iface = IF_IFACE_E1;
195 		break;
196 	case FRAMER_IFACE_T1:
197 		*if_iface = IF_IFACE_T1;
198 		break;
199 	}
200 
201 	if (!te1)
202 		return 0; /* Only iface type requested */
203 
204 	switch (config.clock_type) {
205 	case FRAMER_CLOCK_EXT:
206 		te1->clock_type = CLOCK_EXT;
207 		break;
208 	case FRAMER_CLOCK_INT:
209 		te1->clock_type = CLOCK_INT;
210 		break;
211 	default:
212 		return -EINVAL;
213 	}
214 	te1->clock_rate = config.line_clock_rate;
215 	return 0;
216 }
217 
218 static int qmc_hdlc_framer_init(struct qmc_hdlc *qmc_hdlc)
219 {
220 	int ret;
221 
222 	if (!qmc_hdlc->framer)
223 		return 0;
224 
225 	ret = framer_init(qmc_hdlc->framer);
226 	if (ret) {
227 		dev_err(qmc_hdlc->dev, "framer init failed (%d)\n", ret);
228 		return ret;
229 	}
230 
231 	return 0;
232 }
233 
234 static void qmc_hdlc_framer_exit(struct qmc_hdlc *qmc_hdlc)
235 {
236 	if (!qmc_hdlc->framer)
237 		return;
238 
239 	framer_exit(qmc_hdlc->framer);
240 }
241 
242 static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size);
243 
244 #define QMC_HDLC_RX_ERROR_FLAGS				\
245 	(QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA |	\
246 	 QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT)
247 
248 static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int flags)
249 {
250 	struct qmc_hdlc_desc *desc = context;
251 	struct net_device *netdev;
252 	struct qmc_hdlc *qmc_hdlc;
253 	size_t crc_size;
254 	int ret;
255 
256 	netdev = desc->netdev;
257 	qmc_hdlc = netdev_to_qmc_hdlc(netdev);
258 
259 	dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
260 
261 	if (flags & QMC_HDLC_RX_ERROR_FLAGS) {
262 		netdev->stats.rx_errors++;
263 		if (flags & QMC_RX_FLAG_HDLC_OVF) /* Data overflow */
264 			netdev->stats.rx_over_errors++;
265 		if (flags & QMC_RX_FLAG_HDLC_UNA) /* bits received not multiple of 8 */
266 			netdev->stats.rx_frame_errors++;
267 		if (flags & QMC_RX_FLAG_HDLC_ABORT) /* Received an abort sequence */
268 			netdev->stats.rx_frame_errors++;
269 		if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
270 			netdev->stats.rx_crc_errors++;
271 		kfree_skb(desc->skb);
272 		goto re_queue;
273 	}
274 
275 	/* Discard the CRC */
276 	crc_size = qmc_hdlc->is_crc32 ? 4 : 2;
277 	if (length < crc_size) {
278 		netdev->stats.rx_length_errors++;
279 		kfree_skb(desc->skb);
280 		goto re_queue;
281 	}
282 	length -= crc_size;
283 
284 	netdev->stats.rx_packets++;
285 	netdev->stats.rx_bytes += length;
286 
287 	skb_put(desc->skb, length);
288 	desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
289 	netif_rx(desc->skb);
290 
291 re_queue:
292 	/* Re-queue a transfer using the same descriptor */
293 	ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
294 	if (ret) {
295 		dev_err(qmc_hdlc->dev, "queue recv desc failed (%d)\n", ret);
296 		netdev->stats.rx_errors++;
297 	}
298 }
299 
300 static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size)
301 {
302 	int ret;
303 
304 	desc->skb = dev_alloc_skb(size);
305 	if (!desc->skb)
306 		return -ENOMEM;
307 
308 	desc->dma_size = size;
309 	desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
310 					desc->dma_size, DMA_FROM_DEVICE);
311 	ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
312 	if (ret)
313 		goto free_skb;
314 
315 	ret = qmc_chan_read_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
316 				   qmc_hcld_recv_complete, desc);
317 	if (ret)
318 		goto dma_unmap;
319 
320 	return 0;
321 
322 dma_unmap:
323 	dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
324 free_skb:
325 	kfree_skb(desc->skb);
326 	desc->skb = NULL;
327 	return ret;
328 }
329 
330 static void qmc_hdlc_xmit_complete(void *context)
331 {
332 	struct qmc_hdlc_desc *desc = context;
333 	struct net_device *netdev;
334 	struct qmc_hdlc *qmc_hdlc;
335 	struct sk_buff *skb;
336 
337 	netdev = desc->netdev;
338 	qmc_hdlc = netdev_to_qmc_hdlc(netdev);
339 
340 	scoped_guard(spinlock_irqsave, &qmc_hdlc->tx_lock) {
341 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
342 		skb = desc->skb;
343 		desc->skb = NULL; /* Release the descriptor */
344 		if (netif_queue_stopped(netdev))
345 			netif_wake_queue(netdev);
346 	}
347 
348 	netdev->stats.tx_packets++;
349 	netdev->stats.tx_bytes += skb->len;
350 
351 	dev_consume_skb_any(skb);
352 }
353 
354 static int qmc_hdlc_xmit_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc)
355 {
356 	int ret;
357 
358 	desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
359 					desc->dma_size, DMA_TO_DEVICE);
360 	ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
361 	if (ret) {
362 		dev_err(qmc_hdlc->dev, "failed to map skb\n");
363 		return ret;
364 	}
365 
366 	ret = qmc_chan_write_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
367 				    qmc_hdlc_xmit_complete, desc);
368 	if (ret) {
369 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
370 		dev_err(qmc_hdlc->dev, "qmc chan write returns %d\n", ret);
371 		return ret;
372 	}
373 
374 	return 0;
375 }
376 
377 static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev)
378 {
379 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
380 	struct qmc_hdlc_desc *desc;
381 	int err;
382 
383 	guard(spinlock_irqsave)(&qmc_hdlc->tx_lock);
384 
385 	desc = &qmc_hdlc->tx_descs[qmc_hdlc->tx_out];
386 	if (WARN_ONCE(desc->skb, "No tx descriptors available\n")) {
387 		/* Should never happen.
388 		 * Previous xmit should have already stopped the queue.
389 		 */
390 		netif_stop_queue(netdev);
391 		return NETDEV_TX_BUSY;
392 	}
393 
394 	desc->netdev = netdev;
395 	desc->dma_size = skb->len;
396 	desc->skb = skb;
397 	err = qmc_hdlc_xmit_queue(qmc_hdlc, desc);
398 	if (err) {
399 		desc->skb = NULL; /* Release the descriptor */
400 		if (err == -EBUSY) {
401 			netif_stop_queue(netdev);
402 			return NETDEV_TX_BUSY;
403 		}
404 		dev_kfree_skb(skb);
405 		netdev->stats.tx_dropped++;
406 		return NETDEV_TX_OK;
407 	}
408 
409 	qmc_hdlc->tx_out = (qmc_hdlc->tx_out + 1) % ARRAY_SIZE(qmc_hdlc->tx_descs);
410 
411 	if (qmc_hdlc->tx_descs[qmc_hdlc->tx_out].skb)
412 		netif_stop_queue(netdev);
413 
414 	return NETDEV_TX_OK;
415 }
416 
417 static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
418 				   u32 slot_map, struct qmc_chan_ts_info *ts_info)
419 {
420 	DECLARE_BITMAP(ts_mask_avail, 64);
421 	DECLARE_BITMAP(ts_mask, 64);
422 	DECLARE_BITMAP(map, 64);
423 
424 	/* Tx and Rx available masks must be identical */
425 	if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
426 		dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
427 			ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
428 		return -EINVAL;
429 	}
430 
431 	bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
432 	bitmap_from_u64(map, slot_map);
433 	bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
434 
435 	if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
436 		dev_err(qmc_hdlc->dev, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n",
437 			map, ts_mask_avail, ts_mask);
438 		return -EINVAL;
439 	}
440 
441 	bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64);
442 	ts_info->rx_ts_mask = ts_info->tx_ts_mask;
443 	return 0;
444 }
445 
446 static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
447 				  const struct qmc_chan_ts_info *ts_info, u32 *slot_map)
448 {
449 	DECLARE_BITMAP(ts_mask_avail, 64);
450 	DECLARE_BITMAP(ts_mask, 64);
451 	DECLARE_BITMAP(map, 64);
452 	u32 slot_array[2];
453 
454 	/* Tx and Rx masks and available masks must be identical */
455 	if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
456 		dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
457 			ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
458 		return -EINVAL;
459 	}
460 	if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
461 		dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
462 			ts_info->rx_ts_mask, ts_info->tx_ts_mask);
463 		return -EINVAL;
464 	}
465 
466 	bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
467 	bitmap_from_u64(ts_mask, ts_info->rx_ts_mask);
468 	bitmap_gather(map, ts_mask, ts_mask_avail, 64);
469 
470 	if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
471 		dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n",
472 			ts_mask_avail, ts_mask, map);
473 		return -EINVAL;
474 	}
475 
476 	bitmap_to_arr32(slot_array, map, 64);
477 	if (slot_array[1]) {
478 		dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n",
479 			ts_mask_avail, ts_mask, map);
480 		return -EINVAL;
481 	}
482 
483 	*slot_map = slot_array[0];
484 	return 0;
485 }
486 
487 static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1)
488 {
489 	struct qmc_chan_ts_info ts_info;
490 	int ret;
491 
492 	ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
493 	if (ret) {
494 		dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
495 		return ret;
496 	}
497 	ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
498 	if (ret)
499 		return ret;
500 
501 	ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
502 	if (ret) {
503 		dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret);
504 		return ret;
505 	}
506 
507 	qmc_hdlc->slot_map = te1->slot_map;
508 
509 	ret = qmc_hdlc_framer_set_iface(qmc_hdlc, if_iface, te1);
510 	if (ret) {
511 		dev_err(qmc_hdlc->dev, "framer set iface failed %d\n", ret);
512 		return ret;
513 	}
514 
515 	return 0;
516 }
517 
518 static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
519 {
520 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
521 	te1_settings te1;
522 	int ret;
523 
524 	switch (ifs->type) {
525 	case IF_GET_IFACE:
526 		if (ifs->size < sizeof(te1)) {
527 			/* Retrieve type only */
528 			ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, NULL);
529 			if (ret)
530 				return ret;
531 
532 			if (!ifs->size)
533 				return 0; /* only type requested */
534 
535 			ifs->size = sizeof(te1); /* data size wanted */
536 			return -ENOBUFS;
537 		}
538 
539 		memset(&te1, 0, sizeof(te1));
540 
541 		/* Retrieve info from framer */
542 		ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, &te1);
543 		if (ret)
544 			return ret;
545 
546 		/* Update slot_map */
547 		te1.slot_map = qmc_hdlc->slot_map;
548 
549 		if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1)))
550 			return -EFAULT;
551 		return 0;
552 
553 	case IF_IFACE_E1:
554 	case IF_IFACE_T1:
555 		if (!capable(CAP_NET_ADMIN))
556 			return -EPERM;
557 
558 		if (netdev->flags & IFF_UP)
559 			return -EBUSY;
560 
561 		if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
562 			return -EFAULT;
563 
564 		return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
565 
566 	default:
567 		return hdlc_ioctl(netdev, ifs);
568 	}
569 }
570 
571 static int qmc_hdlc_open(struct net_device *netdev)
572 {
573 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
574 	struct qmc_chan_param chan_param;
575 	struct qmc_hdlc_desc *desc;
576 	int ret;
577 	int i;
578 
579 	ret = qmc_hdlc_framer_start(qmc_hdlc);
580 	if (ret)
581 		return ret;
582 
583 	ret = hdlc_open(netdev);
584 	if (ret)
585 		goto framer_stop;
586 
587 	/* Update carrier */
588 	qmc_hdlc_framer_set_carrier(qmc_hdlc);
589 
590 	chan_param.mode = QMC_HDLC;
591 	/* HDLC_MAX_MRU + 4 for the CRC
592 	 * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC
593 	 */
594 	chan_param.hdlc.max_rx_buf_size = HDLC_MAX_MRU + 4 + 8;
595 	chan_param.hdlc.max_rx_frame_size = HDLC_MAX_MRU + 4;
596 	chan_param.hdlc.is_crc32 = qmc_hdlc->is_crc32;
597 	ret = qmc_chan_set_param(qmc_hdlc->qmc_chan, &chan_param);
598 	if (ret) {
599 		dev_err(qmc_hdlc->dev, "failed to set param (%d)\n", ret);
600 		goto hdlc_close;
601 	}
602 
603 	/* Queue as many recv descriptors as possible */
604 	for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
605 		desc = &qmc_hdlc->rx_descs[i];
606 
607 		desc->netdev = netdev;
608 		ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, chan_param.hdlc.max_rx_buf_size);
609 		if (ret == -EBUSY && i != 0)
610 			break; /* We use all the QMC chan capability */
611 		if (ret)
612 			goto free_desc;
613 	}
614 
615 	ret = qmc_chan_start(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
616 	if (ret) {
617 		dev_err(qmc_hdlc->dev, "qmc chan start failed (%d)\n", ret);
618 		goto free_desc;
619 	}
620 
621 	netif_start_queue(netdev);
622 
623 	return 0;
624 
625 free_desc:
626 	qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
627 	while (i--) {
628 		desc = &qmc_hdlc->rx_descs[i];
629 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
630 				 DMA_FROM_DEVICE);
631 		kfree_skb(desc->skb);
632 		desc->skb = NULL;
633 	}
634 hdlc_close:
635 	hdlc_close(netdev);
636 framer_stop:
637 	qmc_hdlc_framer_stop(qmc_hdlc);
638 	return ret;
639 }
640 
641 static int qmc_hdlc_close(struct net_device *netdev)
642 {
643 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
644 	struct qmc_hdlc_desc *desc;
645 	int i;
646 
647 	qmc_chan_stop(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
648 	qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
649 
650 	netif_stop_queue(netdev);
651 
652 	for (i = 0; i < ARRAY_SIZE(qmc_hdlc->tx_descs); i++) {
653 		desc = &qmc_hdlc->tx_descs[i];
654 		if (!desc->skb)
655 			continue;
656 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
657 				 DMA_TO_DEVICE);
658 		kfree_skb(desc->skb);
659 		desc->skb = NULL;
660 	}
661 
662 	for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
663 		desc = &qmc_hdlc->rx_descs[i];
664 		if (!desc->skb)
665 			continue;
666 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
667 				 DMA_FROM_DEVICE);
668 		kfree_skb(desc->skb);
669 		desc->skb = NULL;
670 	}
671 
672 	hdlc_close(netdev);
673 	qmc_hdlc_framer_stop(qmc_hdlc);
674 	return 0;
675 }
676 
677 static int qmc_hdlc_attach(struct net_device *netdev, unsigned short encoding,
678 			   unsigned short parity)
679 {
680 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
681 
682 	if (encoding != ENCODING_NRZ)
683 		return -EINVAL;
684 
685 	switch (parity) {
686 	case PARITY_CRC16_PR1_CCITT:
687 		qmc_hdlc->is_crc32 = false;
688 		break;
689 	case PARITY_CRC32_PR1_CCITT:
690 		qmc_hdlc->is_crc32 = true;
691 		break;
692 	default:
693 		dev_err(qmc_hdlc->dev, "unsupported parity %u\n", parity);
694 		return -EINVAL;
695 	}
696 
697 	return 0;
698 }
699 
700 static const struct net_device_ops qmc_hdlc_netdev_ops = {
701 	.ndo_open       = qmc_hdlc_open,
702 	.ndo_stop       = qmc_hdlc_close,
703 	.ndo_start_xmit = hdlc_start_xmit,
704 	.ndo_siocwandev = qmc_hdlc_ioctl,
705 };
706 
707 static int qmc_hdlc_probe(struct platform_device *pdev)
708 {
709 	struct device *dev = &pdev->dev;
710 	struct qmc_chan_ts_info ts_info;
711 	struct qmc_hdlc *qmc_hdlc;
712 	struct qmc_chan_info info;
713 	hdlc_device *hdlc;
714 	int ret;
715 
716 	qmc_hdlc = devm_kzalloc(dev, sizeof(*qmc_hdlc), GFP_KERNEL);
717 	if (!qmc_hdlc)
718 		return -ENOMEM;
719 
720 	qmc_hdlc->dev = dev;
721 	spin_lock_init(&qmc_hdlc->tx_lock);
722 	mutex_init(&qmc_hdlc->carrier_lock);
723 
724 	qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
725 	if (IS_ERR(qmc_hdlc->qmc_chan))
726 		return dev_err_probe(dev, PTR_ERR(qmc_hdlc->qmc_chan),
727 				     "get QMC channel failed\n");
728 
729 	ret = qmc_chan_get_info(qmc_hdlc->qmc_chan, &info);
730 	if (ret)
731 		return dev_err_probe(dev, ret, "get QMC channel info failed\n");
732 
733 	if (info.mode != QMC_HDLC)
734 		return dev_err_probe(dev, -EINVAL, "QMC chan mode %d is not QMC_HDLC\n",
735 				     info.mode);
736 
737 	ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
738 	if (ret)
739 		return dev_err_probe(dev, ret, "get QMC channel ts info failed\n");
740 
741 	ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
742 	if (ret)
743 		return ret;
744 
745 	qmc_hdlc->framer = devm_framer_optional_get(dev, "fsl,framer");
746 	if (IS_ERR(qmc_hdlc->framer))
747 		return PTR_ERR(qmc_hdlc->framer);
748 
749 	ret = qmc_hdlc_framer_init(qmc_hdlc);
750 	if (ret)
751 		return ret;
752 
753 	qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
754 	if (!qmc_hdlc->netdev) {
755 		ret = -ENOMEM;
756 		goto framer_exit;
757 	}
758 
759 	hdlc = dev_to_hdlc(qmc_hdlc->netdev);
760 	hdlc->attach = qmc_hdlc_attach;
761 	hdlc->xmit = qmc_hdlc_xmit;
762 	SET_NETDEV_DEV(qmc_hdlc->netdev, dev);
763 	qmc_hdlc->netdev->tx_queue_len = ARRAY_SIZE(qmc_hdlc->tx_descs);
764 	qmc_hdlc->netdev->netdev_ops = &qmc_hdlc_netdev_ops;
765 	ret = register_hdlc_device(qmc_hdlc->netdev);
766 	if (ret) {
767 		dev_err_probe(dev, ret, "failed to register hdlc device\n");
768 		goto free_netdev;
769 	}
770 
771 	platform_set_drvdata(pdev, qmc_hdlc);
772 	return 0;
773 
774 free_netdev:
775 	free_netdev(qmc_hdlc->netdev);
776 framer_exit:
777 	qmc_hdlc_framer_exit(qmc_hdlc);
778 	return ret;
779 }
780 
781 static void qmc_hdlc_remove(struct platform_device *pdev)
782 {
783 	struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
784 
785 	unregister_hdlc_device(qmc_hdlc->netdev);
786 	free_netdev(qmc_hdlc->netdev);
787 	qmc_hdlc_framer_exit(qmc_hdlc);
788 }
789 
790 static const struct of_device_id qmc_hdlc_id_table[] = {
791 	{ .compatible = "fsl,qmc-hdlc" },
792 	{} /* sentinel */
793 };
794 MODULE_DEVICE_TABLE(of, qmc_hdlc_id_table);
795 
796 static struct platform_driver qmc_hdlc_driver = {
797 	.driver = {
798 		.name = "fsl-qmc-hdlc",
799 		.of_match_table = qmc_hdlc_id_table,
800 	},
801 	.probe = qmc_hdlc_probe,
802 	.remove_new = qmc_hdlc_remove,
803 };
804 module_platform_driver(qmc_hdlc_driver);
805 
806 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
807 MODULE_DESCRIPTION("QMC HDLC driver");
808 MODULE_LICENSE("GPL");
809