xref: /linux/drivers/net/wan/fsl_qmc_hdlc.c (revision ef9226cd56b718c79184a3466d32984a51cb449c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale QMC HDLC Device Driver
4  *
5  * Copyright 2023 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include <linux/array_size.h>
11 #include <linux/bug.h>
12 #include <linux/cleanup.h>
13 #include <linux/bitmap.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/framer/framer.h>
18 #include <linux/hdlc.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 
26 #include <soc/fsl/qe/qmc.h>
27 
28 struct qmc_hdlc_desc {
29 	struct net_device *netdev;
30 	struct sk_buff *skb; /* NULL if the descriptor is not in use */
31 	dma_addr_t dma_addr;
32 	size_t dma_size;
33 };
34 
35 struct qmc_hdlc {
36 	struct device *dev;
37 	struct qmc_chan *qmc_chan;
38 	struct net_device *netdev;
39 	struct framer *framer;
40 	spinlock_t carrier_lock; /* Protect carrier detection */
41 	struct notifier_block nb;
42 	bool is_crc32;
43 	spinlock_t tx_lock; /* Protect tx descriptors */
44 	struct qmc_hdlc_desc tx_descs[8];
45 	unsigned int tx_out;
46 	struct qmc_hdlc_desc rx_descs[4];
47 	u32 slot_map;
48 };
49 
50 static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
51 {
52 	return dev_to_hdlc(netdev)->priv;
53 }
54 
55 static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
56 {
57 	struct framer_status framer_status;
58 	int ret;
59 
60 	if (!qmc_hdlc->framer)
61 		return 0;
62 
63 	guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
64 
65 	ret = framer_get_status(qmc_hdlc->framer, &framer_status);
66 	if (ret) {
67 		dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
68 		return ret;
69 	}
70 	if (framer_status.link_is_on)
71 		netif_carrier_on(qmc_hdlc->netdev);
72 	else
73 		netif_carrier_off(qmc_hdlc->netdev);
74 
75 	return 0;
76 }
77 
78 static int qmc_hdlc_framer_notifier(struct notifier_block *nb, unsigned long action,
79 				    void *data)
80 {
81 	struct qmc_hdlc *qmc_hdlc = container_of(nb, struct qmc_hdlc, nb);
82 	int ret;
83 
84 	if (action != FRAMER_EVENT_STATUS)
85 		return NOTIFY_DONE;
86 
87 	ret = qmc_hdlc_framer_set_carrier(qmc_hdlc);
88 	return ret ? NOTIFY_DONE : NOTIFY_OK;
89 }
90 
91 static int qmc_hdlc_framer_start(struct qmc_hdlc *qmc_hdlc)
92 {
93 	struct framer_status framer_status;
94 	int ret;
95 
96 	if (!qmc_hdlc->framer)
97 		return 0;
98 
99 	ret = framer_power_on(qmc_hdlc->framer);
100 	if (ret) {
101 		dev_err(qmc_hdlc->dev, "framer power-on failed (%d)\n", ret);
102 		return ret;
103 	}
104 
105 	/* Be sure that get_status is supported */
106 	ret = framer_get_status(qmc_hdlc->framer, &framer_status);
107 	if (ret) {
108 		dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
109 		goto framer_power_off;
110 	}
111 
112 	qmc_hdlc->nb.notifier_call = qmc_hdlc_framer_notifier;
113 	ret = framer_notifier_register(qmc_hdlc->framer, &qmc_hdlc->nb);
114 	if (ret) {
115 		dev_err(qmc_hdlc->dev, "framer notifier register failed (%d)\n", ret);
116 		goto framer_power_off;
117 	}
118 
119 	return 0;
120 
121 framer_power_off:
122 	framer_power_off(qmc_hdlc->framer);
123 	return ret;
124 }
125 
126 static void qmc_hdlc_framer_stop(struct qmc_hdlc *qmc_hdlc)
127 {
128 	if (!qmc_hdlc->framer)
129 		return;
130 
131 	framer_notifier_unregister(qmc_hdlc->framer, &qmc_hdlc->nb);
132 	framer_power_off(qmc_hdlc->framer);
133 }
134 
135 static int qmc_hdlc_framer_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface,
136 				     const te1_settings *te1)
137 {
138 	struct framer_config config;
139 	int ret;
140 
141 	if (!qmc_hdlc->framer)
142 		return 0;
143 
144 	ret = framer_get_config(qmc_hdlc->framer, &config);
145 	if (ret)
146 		return ret;
147 
148 	switch (if_iface) {
149 	case IF_IFACE_E1:
150 		config.iface = FRAMER_IFACE_E1;
151 		break;
152 	case IF_IFACE_T1:
153 		config.iface = FRAMER_IFACE_T1;
154 		break;
155 	default:
156 		return -EINVAL;
157 	}
158 
159 	switch (te1->clock_type) {
160 	case CLOCK_DEFAULT:
161 		/* Keep current value */
162 		break;
163 	case CLOCK_EXT:
164 		config.clock_type = FRAMER_CLOCK_EXT;
165 		break;
166 	case CLOCK_INT:
167 		config.clock_type = FRAMER_CLOCK_INT;
168 		break;
169 	default:
170 		return -EINVAL;
171 	}
172 	config.line_clock_rate = te1->clock_rate;
173 
174 	return framer_set_config(qmc_hdlc->framer, &config);
175 }
176 
177 static int qmc_hdlc_framer_get_iface(struct qmc_hdlc *qmc_hdlc, int *if_iface, te1_settings *te1)
178 {
179 	struct framer_config config;
180 	int ret;
181 
182 	if (!qmc_hdlc->framer) {
183 		*if_iface = IF_IFACE_E1;
184 		return 0;
185 	}
186 
187 	ret = framer_get_config(qmc_hdlc->framer, &config);
188 	if (ret)
189 		return ret;
190 
191 	switch (config.iface) {
192 	case FRAMER_IFACE_E1:
193 		*if_iface = IF_IFACE_E1;
194 		break;
195 	case FRAMER_IFACE_T1:
196 		*if_iface = IF_IFACE_T1;
197 		break;
198 	}
199 
200 	if (!te1)
201 		return 0; /* Only iface type requested */
202 
203 	switch (config.clock_type) {
204 	case FRAMER_CLOCK_EXT:
205 		te1->clock_type = CLOCK_EXT;
206 		break;
207 	case FRAMER_CLOCK_INT:
208 		te1->clock_type = CLOCK_INT;
209 		break;
210 	default:
211 		return -EINVAL;
212 	}
213 	te1->clock_rate = config.line_clock_rate;
214 	return 0;
215 }
216 
217 static int qmc_hdlc_framer_init(struct qmc_hdlc *qmc_hdlc)
218 {
219 	int ret;
220 
221 	if (!qmc_hdlc->framer)
222 		return 0;
223 
224 	ret = framer_init(qmc_hdlc->framer);
225 	if (ret) {
226 		dev_err(qmc_hdlc->dev, "framer init failed (%d)\n", ret);
227 		return ret;
228 	}
229 
230 	return 0;
231 }
232 
233 static void qmc_hdlc_framer_exit(struct qmc_hdlc *qmc_hdlc)
234 {
235 	if (!qmc_hdlc->framer)
236 		return;
237 
238 	framer_exit(qmc_hdlc->framer);
239 }
240 
241 static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size);
242 
243 #define QMC_HDLC_RX_ERROR_FLAGS				\
244 	(QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA |	\
245 	 QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT)
246 
247 static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int flags)
248 {
249 	struct qmc_hdlc_desc *desc = context;
250 	struct net_device *netdev;
251 	struct qmc_hdlc *qmc_hdlc;
252 	int ret;
253 
254 	netdev = desc->netdev;
255 	qmc_hdlc = netdev_to_qmc_hdlc(netdev);
256 
257 	dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
258 
259 	if (flags & QMC_HDLC_RX_ERROR_FLAGS) {
260 		netdev->stats.rx_errors++;
261 		if (flags & QMC_RX_FLAG_HDLC_OVF) /* Data overflow */
262 			netdev->stats.rx_over_errors++;
263 		if (flags & QMC_RX_FLAG_HDLC_UNA) /* bits received not multiple of 8 */
264 			netdev->stats.rx_frame_errors++;
265 		if (flags & QMC_RX_FLAG_HDLC_ABORT) /* Received an abort sequence */
266 			netdev->stats.rx_frame_errors++;
267 		if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
268 			netdev->stats.rx_crc_errors++;
269 		kfree_skb(desc->skb);
270 	} else {
271 		netdev->stats.rx_packets++;
272 		netdev->stats.rx_bytes += length;
273 
274 		skb_put(desc->skb, length);
275 		desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
276 		netif_rx(desc->skb);
277 	}
278 
279 	/* Re-queue a transfer using the same descriptor */
280 	ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
281 	if (ret) {
282 		dev_err(qmc_hdlc->dev, "queue recv desc failed (%d)\n", ret);
283 		netdev->stats.rx_errors++;
284 	}
285 }
286 
287 static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size)
288 {
289 	int ret;
290 
291 	desc->skb = dev_alloc_skb(size);
292 	if (!desc->skb)
293 		return -ENOMEM;
294 
295 	desc->dma_size = size;
296 	desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
297 					desc->dma_size, DMA_FROM_DEVICE);
298 	ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
299 	if (ret)
300 		goto free_skb;
301 
302 	ret = qmc_chan_read_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
303 				   qmc_hcld_recv_complete, desc);
304 	if (ret)
305 		goto dma_unmap;
306 
307 	return 0;
308 
309 dma_unmap:
310 	dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
311 free_skb:
312 	kfree_skb(desc->skb);
313 	desc->skb = NULL;
314 	return ret;
315 }
316 
317 static void qmc_hdlc_xmit_complete(void *context)
318 {
319 	struct qmc_hdlc_desc *desc = context;
320 	struct net_device *netdev;
321 	struct qmc_hdlc *qmc_hdlc;
322 	struct sk_buff *skb;
323 
324 	netdev = desc->netdev;
325 	qmc_hdlc = netdev_to_qmc_hdlc(netdev);
326 
327 	scoped_guard(spinlock_irqsave, &qmc_hdlc->tx_lock) {
328 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
329 		skb = desc->skb;
330 		desc->skb = NULL; /* Release the descriptor */
331 		if (netif_queue_stopped(netdev))
332 			netif_wake_queue(netdev);
333 	}
334 
335 	netdev->stats.tx_packets++;
336 	netdev->stats.tx_bytes += skb->len;
337 
338 	dev_consume_skb_any(skb);
339 }
340 
341 static int qmc_hdlc_xmit_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc)
342 {
343 	int ret;
344 
345 	desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
346 					desc->dma_size, DMA_TO_DEVICE);
347 	ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
348 	if (ret) {
349 		dev_err(qmc_hdlc->dev, "failed to map skb\n");
350 		return ret;
351 	}
352 
353 	ret = qmc_chan_write_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
354 				    qmc_hdlc_xmit_complete, desc);
355 	if (ret) {
356 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
357 		dev_err(qmc_hdlc->dev, "qmc chan write returns %d\n", ret);
358 		return ret;
359 	}
360 
361 	return 0;
362 }
363 
364 static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev)
365 {
366 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
367 	struct qmc_hdlc_desc *desc;
368 	int err;
369 
370 	guard(spinlock_irqsave)(&qmc_hdlc->tx_lock);
371 
372 	desc = &qmc_hdlc->tx_descs[qmc_hdlc->tx_out];
373 	if (WARN_ONCE(desc->skb, "No tx descriptors available\n")) {
374 		/* Should never happen.
375 		 * Previous xmit should have already stopped the queue.
376 		 */
377 		netif_stop_queue(netdev);
378 		return NETDEV_TX_BUSY;
379 	}
380 
381 	desc->netdev = netdev;
382 	desc->dma_size = skb->len;
383 	desc->skb = skb;
384 	err = qmc_hdlc_xmit_queue(qmc_hdlc, desc);
385 	if (err) {
386 		desc->skb = NULL; /* Release the descriptor */
387 		if (err == -EBUSY) {
388 			netif_stop_queue(netdev);
389 			return NETDEV_TX_BUSY;
390 		}
391 		dev_kfree_skb(skb);
392 		netdev->stats.tx_dropped++;
393 		return NETDEV_TX_OK;
394 	}
395 
396 	qmc_hdlc->tx_out = (qmc_hdlc->tx_out + 1) % ARRAY_SIZE(qmc_hdlc->tx_descs);
397 
398 	if (qmc_hdlc->tx_descs[qmc_hdlc->tx_out].skb)
399 		netif_stop_queue(netdev);
400 
401 	return NETDEV_TX_OK;
402 }
403 
404 static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
405 				   u32 slot_map, struct qmc_chan_ts_info *ts_info)
406 {
407 	DECLARE_BITMAP(ts_mask_avail, 64);
408 	DECLARE_BITMAP(ts_mask, 64);
409 	DECLARE_BITMAP(map, 64);
410 
411 	/* Tx and Rx available masks must be identical */
412 	if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
413 		dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
414 			ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
415 		return -EINVAL;
416 	}
417 
418 	bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
419 	bitmap_from_u64(map, slot_map);
420 	bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
421 
422 	if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
423 		dev_err(qmc_hdlc->dev, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n",
424 			map, ts_mask_avail, ts_mask);
425 		return -EINVAL;
426 	}
427 
428 	bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64);
429 	ts_info->rx_ts_mask = ts_info->tx_ts_mask;
430 	return 0;
431 }
432 
433 static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
434 				  const struct qmc_chan_ts_info *ts_info, u32 *slot_map)
435 {
436 	DECLARE_BITMAP(ts_mask_avail, 64);
437 	DECLARE_BITMAP(ts_mask, 64);
438 	DECLARE_BITMAP(map, 64);
439 	u32 slot_array[2];
440 
441 	/* Tx and Rx masks and available masks must be identical */
442 	if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
443 		dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
444 			ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
445 		return -EINVAL;
446 	}
447 	if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
448 		dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
449 			ts_info->rx_ts_mask, ts_info->tx_ts_mask);
450 		return -EINVAL;
451 	}
452 
453 	bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
454 	bitmap_from_u64(ts_mask, ts_info->rx_ts_mask);
455 	bitmap_gather(map, ts_mask, ts_mask_avail, 64);
456 
457 	if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
458 		dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n",
459 			ts_mask_avail, ts_mask, map);
460 		return -EINVAL;
461 	}
462 
463 	bitmap_to_arr32(slot_array, map, 64);
464 	if (slot_array[1]) {
465 		dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n",
466 			ts_mask_avail, ts_mask, map);
467 		return -EINVAL;
468 	}
469 
470 	*slot_map = slot_array[0];
471 	return 0;
472 }
473 
474 static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1)
475 {
476 	struct qmc_chan_ts_info ts_info;
477 	int ret;
478 
479 	ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
480 	if (ret) {
481 		dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
482 		return ret;
483 	}
484 	ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
485 	if (ret)
486 		return ret;
487 
488 	ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
489 	if (ret) {
490 		dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret);
491 		return ret;
492 	}
493 
494 	qmc_hdlc->slot_map = te1->slot_map;
495 
496 	ret = qmc_hdlc_framer_set_iface(qmc_hdlc, if_iface, te1);
497 	if (ret) {
498 		dev_err(qmc_hdlc->dev, "framer set iface failed %d\n", ret);
499 		return ret;
500 	}
501 
502 	return 0;
503 }
504 
505 static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
506 {
507 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
508 	te1_settings te1;
509 	int ret;
510 
511 	switch (ifs->type) {
512 	case IF_GET_IFACE:
513 		if (ifs->size < sizeof(te1)) {
514 			/* Retrieve type only */
515 			ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, NULL);
516 			if (ret)
517 				return ret;
518 
519 			if (!ifs->size)
520 				return 0; /* only type requested */
521 
522 			ifs->size = sizeof(te1); /* data size wanted */
523 			return -ENOBUFS;
524 		}
525 
526 		memset(&te1, 0, sizeof(te1));
527 
528 		/* Retrieve info from framer */
529 		ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, &te1);
530 		if (ret)
531 			return ret;
532 
533 		/* Update slot_map */
534 		te1.slot_map = qmc_hdlc->slot_map;
535 
536 		if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1)))
537 			return -EFAULT;
538 		return 0;
539 
540 	case IF_IFACE_E1:
541 	case IF_IFACE_T1:
542 		if (!capable(CAP_NET_ADMIN))
543 			return -EPERM;
544 
545 		if (netdev->flags & IFF_UP)
546 			return -EBUSY;
547 
548 		if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
549 			return -EFAULT;
550 
551 		return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
552 
553 	default:
554 		return hdlc_ioctl(netdev, ifs);
555 	}
556 }
557 
558 static int qmc_hdlc_open(struct net_device *netdev)
559 {
560 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
561 	struct qmc_chan_param chan_param;
562 	struct qmc_hdlc_desc *desc;
563 	int ret;
564 	int i;
565 
566 	ret = qmc_hdlc_framer_start(qmc_hdlc);
567 	if (ret)
568 		return ret;
569 
570 	ret = hdlc_open(netdev);
571 	if (ret)
572 		goto framer_stop;
573 
574 	/* Update carrier */
575 	qmc_hdlc_framer_set_carrier(qmc_hdlc);
576 
577 	chan_param.mode = QMC_HDLC;
578 	/* HDLC_MAX_MRU + 4 for the CRC
579 	 * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC
580 	 */
581 	chan_param.hdlc.max_rx_buf_size = HDLC_MAX_MRU + 4 + 8;
582 	chan_param.hdlc.max_rx_frame_size = HDLC_MAX_MRU + 4;
583 	chan_param.hdlc.is_crc32 = qmc_hdlc->is_crc32;
584 	ret = qmc_chan_set_param(qmc_hdlc->qmc_chan, &chan_param);
585 	if (ret) {
586 		dev_err(qmc_hdlc->dev, "failed to set param (%d)\n", ret);
587 		goto hdlc_close;
588 	}
589 
590 	/* Queue as many recv descriptors as possible */
591 	for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
592 		desc = &qmc_hdlc->rx_descs[i];
593 
594 		desc->netdev = netdev;
595 		ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, chan_param.hdlc.max_rx_buf_size);
596 		if (ret == -EBUSY && i != 0)
597 			break; /* We use all the QMC chan capability */
598 		if (ret)
599 			goto free_desc;
600 	}
601 
602 	ret = qmc_chan_start(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
603 	if (ret) {
604 		dev_err(qmc_hdlc->dev, "qmc chan start failed (%d)\n", ret);
605 		goto free_desc;
606 	}
607 
608 	netif_start_queue(netdev);
609 
610 	return 0;
611 
612 free_desc:
613 	qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
614 	while (i--) {
615 		desc = &qmc_hdlc->rx_descs[i];
616 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
617 				 DMA_FROM_DEVICE);
618 		kfree_skb(desc->skb);
619 		desc->skb = NULL;
620 	}
621 hdlc_close:
622 	hdlc_close(netdev);
623 framer_stop:
624 	qmc_hdlc_framer_stop(qmc_hdlc);
625 	return ret;
626 }
627 
628 static int qmc_hdlc_close(struct net_device *netdev)
629 {
630 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
631 	struct qmc_hdlc_desc *desc;
632 	int i;
633 
634 	qmc_chan_stop(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
635 	qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
636 
637 	netif_stop_queue(netdev);
638 
639 	for (i = 0; i < ARRAY_SIZE(qmc_hdlc->tx_descs); i++) {
640 		desc = &qmc_hdlc->tx_descs[i];
641 		if (!desc->skb)
642 			continue;
643 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
644 				 DMA_TO_DEVICE);
645 		kfree_skb(desc->skb);
646 		desc->skb = NULL;
647 	}
648 
649 	for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
650 		desc = &qmc_hdlc->rx_descs[i];
651 		if (!desc->skb)
652 			continue;
653 		dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
654 				 DMA_FROM_DEVICE);
655 		kfree_skb(desc->skb);
656 		desc->skb = NULL;
657 	}
658 
659 	hdlc_close(netdev);
660 	qmc_hdlc_framer_stop(qmc_hdlc);
661 	return 0;
662 }
663 
664 static int qmc_hdlc_attach(struct net_device *netdev, unsigned short encoding,
665 			   unsigned short parity)
666 {
667 	struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
668 
669 	if (encoding != ENCODING_NRZ)
670 		return -EINVAL;
671 
672 	switch (parity) {
673 	case PARITY_CRC16_PR1_CCITT:
674 		qmc_hdlc->is_crc32 = false;
675 		break;
676 	case PARITY_CRC32_PR1_CCITT:
677 		qmc_hdlc->is_crc32 = true;
678 		break;
679 	default:
680 		dev_err(qmc_hdlc->dev, "unsupported parity %u\n", parity);
681 		return -EINVAL;
682 	}
683 
684 	return 0;
685 }
686 
687 static const struct net_device_ops qmc_hdlc_netdev_ops = {
688 	.ndo_open       = qmc_hdlc_open,
689 	.ndo_stop       = qmc_hdlc_close,
690 	.ndo_start_xmit = hdlc_start_xmit,
691 	.ndo_siocwandev = qmc_hdlc_ioctl,
692 };
693 
694 static int qmc_hdlc_probe(struct platform_device *pdev)
695 {
696 	struct device *dev = &pdev->dev;
697 	struct qmc_chan_ts_info ts_info;
698 	struct qmc_hdlc *qmc_hdlc;
699 	struct qmc_chan_info info;
700 	hdlc_device *hdlc;
701 	int ret;
702 
703 	qmc_hdlc = devm_kzalloc(dev, sizeof(*qmc_hdlc), GFP_KERNEL);
704 	if (!qmc_hdlc)
705 		return -ENOMEM;
706 
707 	qmc_hdlc->dev = dev;
708 	spin_lock_init(&qmc_hdlc->tx_lock);
709 	spin_lock_init(&qmc_hdlc->carrier_lock);
710 
711 	qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
712 	if (IS_ERR(qmc_hdlc->qmc_chan))
713 		return dev_err_probe(dev, PTR_ERR(qmc_hdlc->qmc_chan),
714 				     "get QMC channel failed\n");
715 
716 	ret = qmc_chan_get_info(qmc_hdlc->qmc_chan, &info);
717 	if (ret)
718 		return dev_err_probe(dev, ret, "get QMC channel info failed\n");
719 
720 	if (info.mode != QMC_HDLC)
721 		return dev_err_probe(dev, -EINVAL, "QMC chan mode %d is not QMC_HDLC\n",
722 				     info.mode);
723 
724 	ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
725 	if (ret)
726 		return dev_err_probe(dev, ret, "get QMC channel ts info failed\n");
727 
728 	ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
729 	if (ret)
730 		return ret;
731 
732 	qmc_hdlc->framer = devm_framer_optional_get(dev, "fsl,framer");
733 	if (IS_ERR(qmc_hdlc->framer))
734 		return PTR_ERR(qmc_hdlc->framer);
735 
736 	ret = qmc_hdlc_framer_init(qmc_hdlc);
737 	if (ret)
738 		return ret;
739 
740 	qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
741 	if (!qmc_hdlc->netdev) {
742 		ret = -ENOMEM;
743 		goto framer_exit;
744 	}
745 
746 	hdlc = dev_to_hdlc(qmc_hdlc->netdev);
747 	hdlc->attach = qmc_hdlc_attach;
748 	hdlc->xmit = qmc_hdlc_xmit;
749 	SET_NETDEV_DEV(qmc_hdlc->netdev, dev);
750 	qmc_hdlc->netdev->tx_queue_len = ARRAY_SIZE(qmc_hdlc->tx_descs);
751 	qmc_hdlc->netdev->netdev_ops = &qmc_hdlc_netdev_ops;
752 	ret = register_hdlc_device(qmc_hdlc->netdev);
753 	if (ret) {
754 		dev_err_probe(dev, ret, "failed to register hdlc device\n");
755 		goto free_netdev;
756 	}
757 
758 	platform_set_drvdata(pdev, qmc_hdlc);
759 	return 0;
760 
761 free_netdev:
762 	free_netdev(qmc_hdlc->netdev);
763 framer_exit:
764 	qmc_hdlc_framer_exit(qmc_hdlc);
765 	return ret;
766 }
767 
768 static void qmc_hdlc_remove(struct platform_device *pdev)
769 {
770 	struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
771 
772 	unregister_hdlc_device(qmc_hdlc->netdev);
773 	free_netdev(qmc_hdlc->netdev);
774 	qmc_hdlc_framer_exit(qmc_hdlc);
775 }
776 
777 static const struct of_device_id qmc_hdlc_id_table[] = {
778 	{ .compatible = "fsl,qmc-hdlc" },
779 	{} /* sentinel */
780 };
781 MODULE_DEVICE_TABLE(of, qmc_hdlc_id_table);
782 
783 static struct platform_driver qmc_hdlc_driver = {
784 	.driver = {
785 		.name = "fsl-qmc-hdlc",
786 		.of_match_table = qmc_hdlc_id_table,
787 	},
788 	.probe = qmc_hdlc_probe,
789 	.remove_new = qmc_hdlc_remove,
790 };
791 module_platform_driver(qmc_hdlc_driver);
792 
793 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
794 MODULE_DESCRIPTION("QMC HDLC driver");
795 MODULE_LICENSE("GPL");
796