xref: /linux/drivers/net/ethernet/engleder/tsnep_main.c (revision 24aeeb107f0724fa15e16d5f28b39f3c3ecfc746)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
3 
4 /* TSN endpoint Ethernet MAC driver
5  *
6  * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
7  * communication. It is designed for endpoints within TSN (Time Sensitive
8  * Networking) networks; e.g., for PLCs in the industrial automation case.
9  *
10  * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
11  * by the driver.
12  *
13  * More information can be found here:
14  * - www.embedded-experts.at/tsn
15  * - www.engleder-embedded.com
16  */
17 
18 #include "tsnep.h"
19 #include "tsnep_hw.h"
20 
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <linux/interrupt.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
28 #include <linux/iopoll.h>
29 
30 #define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
31 				TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
32 #define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
33 #define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
34 
35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
37 #else
38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
39 #endif
40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
41 
42 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
43 {
44 	iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
45 }
46 
47 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
48 {
49 	mask |= ECM_INT_DISABLE;
50 	iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
51 }
52 
53 static irqreturn_t tsnep_irq(int irq, void *arg)
54 {
55 	struct tsnep_adapter *adapter = arg;
56 	u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
57 
58 	/* acknowledge interrupt */
59 	if (active != 0)
60 		iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
61 
62 	/* handle link interrupt */
63 	if ((active & ECM_INT_LINK) != 0) {
64 		if (adapter->netdev->phydev)
65 			phy_mac_interrupt(adapter->netdev->phydev);
66 	}
67 
68 	/* handle TX/RX queue 0 interrupt */
69 	if ((active & adapter->queue[0].irq_mask) != 0) {
70 		if (adapter->netdev) {
71 			tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
72 			napi_schedule(&adapter->queue[0].napi);
73 		}
74 	}
75 
76 	return IRQ_HANDLED;
77 }
78 
79 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
80 {
81 	struct tsnep_adapter *adapter = bus->priv;
82 	u32 md;
83 	int retval;
84 
85 	if (regnum & MII_ADDR_C45)
86 		return -EOPNOTSUPP;
87 
88 	md = ECM_MD_READ;
89 	if (!adapter->suppress_preamble)
90 		md |= ECM_MD_PREAMBLE;
91 	md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
92 	md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
93 	iowrite32(md, adapter->addr + ECM_MD_CONTROL);
94 	retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
95 					   !(md & ECM_MD_BUSY), 16, 1000);
96 	if (retval != 0)
97 		return retval;
98 
99 	return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
100 }
101 
102 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
103 			       u16 val)
104 {
105 	struct tsnep_adapter *adapter = bus->priv;
106 	u32 md;
107 	int retval;
108 
109 	if (regnum & MII_ADDR_C45)
110 		return -EOPNOTSUPP;
111 
112 	md = ECM_MD_WRITE;
113 	if (!adapter->suppress_preamble)
114 		md |= ECM_MD_PREAMBLE;
115 	md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
116 	md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
117 	md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
118 	iowrite32(md, adapter->addr + ECM_MD_CONTROL);
119 	retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
120 					   !(md & ECM_MD_BUSY), 16, 1000);
121 	if (retval != 0)
122 		return retval;
123 
124 	return 0;
125 }
126 
127 static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
128 {
129 	u32 mode;
130 
131 	switch (adapter->phydev->speed) {
132 	case SPEED_100:
133 		mode = ECM_LINK_MODE_100;
134 		break;
135 	case SPEED_1000:
136 		mode = ECM_LINK_MODE_1000;
137 		break;
138 	default:
139 		mode = ECM_LINK_MODE_OFF;
140 		break;
141 	}
142 	iowrite32(mode, adapter->addr + ECM_STATUS);
143 }
144 
145 static void tsnep_phy_link_status_change(struct net_device *netdev)
146 {
147 	struct tsnep_adapter *adapter = netdev_priv(netdev);
148 	struct phy_device *phydev = netdev->phydev;
149 
150 	if (phydev->link)
151 		tsnep_set_link_mode(adapter);
152 
153 	phy_print_status(netdev->phydev);
154 }
155 
156 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
157 {
158 	int retval;
159 
160 	retval = phy_loopback(adapter->phydev, enable);
161 
162 	/* PHY link state change is not signaled if loopback is enabled, it
163 	 * would delay a working loopback anyway, let's ensure that loopback
164 	 * is working immediately by setting link mode directly
165 	 */
166 	if (!retval && enable)
167 		tsnep_set_link_mode(adapter);
168 
169 	return retval;
170 }
171 
172 static int tsnep_phy_open(struct tsnep_adapter *adapter)
173 {
174 	struct phy_device *phydev;
175 	struct ethtool_eee ethtool_eee;
176 	int retval;
177 
178 	retval = phy_connect_direct(adapter->netdev, adapter->phydev,
179 				    tsnep_phy_link_status_change,
180 				    adapter->phy_mode);
181 	if (retval)
182 		return retval;
183 	phydev = adapter->netdev->phydev;
184 
185 	/* MAC supports only 100Mbps|1000Mbps full duplex
186 	 * SPE (Single Pair Ethernet) is also an option but not implemented yet
187 	 */
188 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
189 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
190 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
191 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
192 
193 	/* disable EEE autoneg, EEE not supported by TSNEP */
194 	memset(&ethtool_eee, 0, sizeof(ethtool_eee));
195 	phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
196 
197 	adapter->phydev->irq = PHY_MAC_INTERRUPT;
198 	phy_start(adapter->phydev);
199 
200 	return 0;
201 }
202 
203 static void tsnep_phy_close(struct tsnep_adapter *adapter)
204 {
205 	phy_stop(adapter->netdev->phydev);
206 	phy_disconnect(adapter->netdev->phydev);
207 	adapter->netdev->phydev = NULL;
208 }
209 
210 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
211 {
212 	struct device *dmadev = tx->adapter->dmadev;
213 	int i;
214 
215 	memset(tx->entry, 0, sizeof(tx->entry));
216 
217 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
218 		if (tx->page[i]) {
219 			dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
220 					  tx->page_dma[i]);
221 			tx->page[i] = NULL;
222 			tx->page_dma[i] = 0;
223 		}
224 	}
225 }
226 
227 static int tsnep_tx_ring_init(struct tsnep_tx *tx)
228 {
229 	struct device *dmadev = tx->adapter->dmadev;
230 	struct tsnep_tx_entry *entry;
231 	struct tsnep_tx_entry *next_entry;
232 	int i, j;
233 	int retval;
234 
235 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
236 		tx->page[i] =
237 			dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
238 					   GFP_KERNEL);
239 		if (!tx->page[i]) {
240 			retval = -ENOMEM;
241 			goto alloc_failed;
242 		}
243 		for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
244 			entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
245 			entry->desc_wb = (struct tsnep_tx_desc_wb *)
246 				(((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
247 			entry->desc = (struct tsnep_tx_desc *)
248 				(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
249 			entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
250 		}
251 	}
252 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
253 		entry = &tx->entry[i];
254 		next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
255 		entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
256 	}
257 
258 	return 0;
259 
260 alloc_failed:
261 	tsnep_tx_ring_cleanup(tx);
262 	return retval;
263 }
264 
265 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
266 			      bool last)
267 {
268 	struct tsnep_tx_entry *entry = &tx->entry[index];
269 
270 	entry->properties = 0;
271 	if (entry->skb) {
272 		entry->properties = length & TSNEP_DESC_LENGTH_MASK;
273 		entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
274 		if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
275 			entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
276 
277 		/* toggle user flag to prevent false acknowledge
278 		 *
279 		 * Only the first fragment is acknowledged. For all other
280 		 * fragments no acknowledge is done and the last written owner
281 		 * counter stays in the writeback descriptor. Therefore, it is
282 		 * possible that the last written owner counter is identical to
283 		 * the new incremented owner counter and a false acknowledge is
284 		 * detected before the real acknowledge has been done by
285 		 * hardware.
286 		 *
287 		 * The user flag is used to prevent this situation. The user
288 		 * flag is copied to the writeback descriptor by the hardware
289 		 * and is used as additional acknowledge data. By toggeling the
290 		 * user flag only for the first fragment (which is
291 		 * acknowledged), it is guaranteed that the last acknowledge
292 		 * done for this descriptor has used a different user flag and
293 		 * cannot be detected as false acknowledge.
294 		 */
295 		entry->owner_user_flag = !entry->owner_user_flag;
296 	}
297 	if (last)
298 		entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
299 	if (index == tx->increment_owner_counter) {
300 		tx->owner_counter++;
301 		if (tx->owner_counter == 4)
302 			tx->owner_counter = 1;
303 		tx->increment_owner_counter--;
304 		if (tx->increment_owner_counter < 0)
305 			tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
306 	}
307 	entry->properties |=
308 		(tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
309 		TSNEP_DESC_OWNER_COUNTER_MASK;
310 	if (entry->owner_user_flag)
311 		entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
312 	entry->desc->more_properties =
313 		__cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
314 
315 	/* descriptor properties shall be written last, because valid data is
316 	 * signaled there
317 	 */
318 	dma_wmb();
319 
320 	entry->desc->properties = __cpu_to_le32(entry->properties);
321 }
322 
323 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
324 {
325 	if (tx->read <= tx->write)
326 		return TSNEP_RING_SIZE - tx->write + tx->read - 1;
327 	else
328 		return tx->read - tx->write - 1;
329 }
330 
331 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
332 {
333 	struct device *dmadev = tx->adapter->dmadev;
334 	struct tsnep_tx_entry *entry;
335 	unsigned int len;
336 	dma_addr_t dma;
337 	int map_len = 0;
338 	int i;
339 
340 	for (i = 0; i < count; i++) {
341 		entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
342 
343 		if (i == 0) {
344 			len = skb_headlen(skb);
345 			dma = dma_map_single(dmadev, skb->data, len,
346 					     DMA_TO_DEVICE);
347 		} else {
348 			len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
349 			dma = skb_frag_dma_map(dmadev,
350 					       &skb_shinfo(skb)->frags[i - 1],
351 					       0, len, DMA_TO_DEVICE);
352 		}
353 		if (dma_mapping_error(dmadev, dma))
354 			return -ENOMEM;
355 
356 		entry->len = len;
357 		dma_unmap_addr_set(entry, dma, dma);
358 
359 		entry->desc->tx = __cpu_to_le64(dma);
360 
361 		map_len += len;
362 	}
363 
364 	return map_len;
365 }
366 
367 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
368 {
369 	struct device *dmadev = tx->adapter->dmadev;
370 	struct tsnep_tx_entry *entry;
371 	int map_len = 0;
372 	int i;
373 
374 	for (i = 0; i < count; i++) {
375 		entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
376 
377 		if (entry->len) {
378 			if (i == 0)
379 				dma_unmap_single(dmadev,
380 						 dma_unmap_addr(entry, dma),
381 						 dma_unmap_len(entry, len),
382 						 DMA_TO_DEVICE);
383 			else
384 				dma_unmap_page(dmadev,
385 					       dma_unmap_addr(entry, dma),
386 					       dma_unmap_len(entry, len),
387 					       DMA_TO_DEVICE);
388 			map_len += entry->len;
389 			entry->len = 0;
390 		}
391 	}
392 
393 	return map_len;
394 }
395 
396 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
397 					 struct tsnep_tx *tx)
398 {
399 	unsigned long flags;
400 	int count = 1;
401 	struct tsnep_tx_entry *entry;
402 	int length;
403 	int i;
404 	int retval;
405 
406 	if (skb_shinfo(skb)->nr_frags > 0)
407 		count += skb_shinfo(skb)->nr_frags;
408 
409 	spin_lock_irqsave(&tx->lock, flags);
410 
411 	if (tsnep_tx_desc_available(tx) < count) {
412 		/* ring full, shall not happen because queue is stopped if full
413 		 * below
414 		 */
415 		netif_stop_queue(tx->adapter->netdev);
416 
417 		spin_unlock_irqrestore(&tx->lock, flags);
418 
419 		return NETDEV_TX_BUSY;
420 	}
421 
422 	entry = &tx->entry[tx->write];
423 	entry->skb = skb;
424 
425 	retval = tsnep_tx_map(skb, tx, count);
426 	if (retval < 0) {
427 		tsnep_tx_unmap(tx, tx->write, count);
428 		dev_kfree_skb_any(entry->skb);
429 		entry->skb = NULL;
430 
431 		tx->dropped++;
432 
433 		spin_unlock_irqrestore(&tx->lock, flags);
434 
435 		netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
436 
437 		return NETDEV_TX_OK;
438 	}
439 	length = retval;
440 
441 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
442 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
443 
444 	for (i = 0; i < count; i++)
445 		tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
446 				  i == (count - 1));
447 	tx->write = (tx->write + count) % TSNEP_RING_SIZE;
448 
449 	skb_tx_timestamp(skb);
450 
451 	/* descriptor properties shall be valid before hardware is notified */
452 	dma_wmb();
453 
454 	iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
455 
456 	if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
457 		/* ring can get full with next frame */
458 		netif_stop_queue(tx->adapter->netdev);
459 	}
460 
461 	spin_unlock_irqrestore(&tx->lock, flags);
462 
463 	return NETDEV_TX_OK;
464 }
465 
466 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
467 {
468 	unsigned long flags;
469 	int budget = 128;
470 	struct tsnep_tx_entry *entry;
471 	int count;
472 	int length;
473 
474 	spin_lock_irqsave(&tx->lock, flags);
475 
476 	do {
477 		if (tx->read == tx->write)
478 			break;
479 
480 		entry = &tx->entry[tx->read];
481 		if ((__le32_to_cpu(entry->desc_wb->properties) &
482 		     TSNEP_TX_DESC_OWNER_MASK) !=
483 		    (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
484 			break;
485 
486 		/* descriptor properties shall be read first, because valid data
487 		 * is signaled there
488 		 */
489 		dma_rmb();
490 
491 		count = 1;
492 		if (skb_shinfo(entry->skb)->nr_frags > 0)
493 			count += skb_shinfo(entry->skb)->nr_frags;
494 
495 		length = tsnep_tx_unmap(tx, tx->read, count);
496 
497 		if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
498 		    (__le32_to_cpu(entry->desc_wb->properties) &
499 		     TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
500 			struct skb_shared_hwtstamps hwtstamps;
501 			u64 timestamp;
502 
503 			if (skb_shinfo(entry->skb)->tx_flags &
504 			    SKBTX_HW_TSTAMP_USE_CYCLES)
505 				timestamp =
506 					__le64_to_cpu(entry->desc_wb->counter);
507 			else
508 				timestamp =
509 					__le64_to_cpu(entry->desc_wb->timestamp);
510 
511 			memset(&hwtstamps, 0, sizeof(hwtstamps));
512 			hwtstamps.hwtstamp = ns_to_ktime(timestamp);
513 
514 			skb_tstamp_tx(entry->skb, &hwtstamps);
515 		}
516 
517 		napi_consume_skb(entry->skb, budget);
518 		entry->skb = NULL;
519 
520 		tx->read = (tx->read + count) % TSNEP_RING_SIZE;
521 
522 		tx->packets++;
523 		tx->bytes += length + ETH_FCS_LEN;
524 
525 		budget--;
526 	} while (likely(budget));
527 
528 	if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
529 	    netif_queue_stopped(tx->adapter->netdev)) {
530 		netif_wake_queue(tx->adapter->netdev);
531 	}
532 
533 	spin_unlock_irqrestore(&tx->lock, flags);
534 
535 	return (budget != 0);
536 }
537 
538 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
539 			 struct tsnep_tx *tx)
540 {
541 	dma_addr_t dma;
542 	int retval;
543 
544 	memset(tx, 0, sizeof(*tx));
545 	tx->adapter = adapter;
546 	tx->addr = addr;
547 
548 	retval = tsnep_tx_ring_init(tx);
549 	if (retval)
550 		return retval;
551 
552 	dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
553 	iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
554 	iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
555 	tx->owner_counter = 1;
556 	tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
557 
558 	spin_lock_init(&tx->lock);
559 
560 	return 0;
561 }
562 
563 static void tsnep_tx_close(struct tsnep_tx *tx)
564 {
565 	u32 val;
566 
567 	readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
568 			   ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
569 			   1000000);
570 
571 	tsnep_tx_ring_cleanup(tx);
572 }
573 
574 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
575 {
576 	struct device *dmadev = rx->adapter->dmadev;
577 	struct tsnep_rx_entry *entry;
578 	int i;
579 
580 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
581 		entry = &rx->entry[i];
582 		if (dma_unmap_addr(entry, dma))
583 			dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
584 					 dma_unmap_len(entry, len),
585 					 DMA_FROM_DEVICE);
586 		if (entry->skb)
587 			dev_kfree_skb(entry->skb);
588 	}
589 
590 	memset(rx->entry, 0, sizeof(rx->entry));
591 
592 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
593 		if (rx->page[i]) {
594 			dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
595 					  rx->page_dma[i]);
596 			rx->page[i] = NULL;
597 			rx->page_dma[i] = 0;
598 		}
599 	}
600 }
601 
602 static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
603 				      struct tsnep_rx_entry *entry)
604 {
605 	struct device *dmadev = rx->adapter->dmadev;
606 	struct sk_buff *skb;
607 	dma_addr_t dma;
608 
609 	skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
610 				 GFP_ATOMIC | GFP_DMA);
611 	if (!skb)
612 		return -ENOMEM;
613 
614 	skb_reserve(skb, RX_SKB_RESERVE);
615 
616 	dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
617 			     DMA_FROM_DEVICE);
618 	if (dma_mapping_error(dmadev, dma)) {
619 		dev_kfree_skb(skb);
620 		return -ENOMEM;
621 	}
622 
623 	entry->skb = skb;
624 	entry->len = RX_SKB_LENGTH;
625 	dma_unmap_addr_set(entry, dma, dma);
626 	entry->desc->rx = __cpu_to_le64(dma);
627 
628 	return 0;
629 }
630 
631 static int tsnep_rx_ring_init(struct tsnep_rx *rx)
632 {
633 	struct device *dmadev = rx->adapter->dmadev;
634 	struct tsnep_rx_entry *entry;
635 	struct tsnep_rx_entry *next_entry;
636 	int i, j;
637 	int retval;
638 
639 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
640 		rx->page[i] =
641 			dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
642 					   GFP_KERNEL);
643 		if (!rx->page[i]) {
644 			retval = -ENOMEM;
645 			goto failed;
646 		}
647 		for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
648 			entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
649 			entry->desc_wb = (struct tsnep_rx_desc_wb *)
650 				(((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
651 			entry->desc = (struct tsnep_rx_desc *)
652 				(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
653 			entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
654 		}
655 	}
656 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
657 		entry = &rx->entry[i];
658 		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
659 		entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
660 
661 		retval = tsnep_rx_alloc_and_map_skb(rx, entry);
662 		if (retval)
663 			goto failed;
664 	}
665 
666 	return 0;
667 
668 failed:
669 	tsnep_rx_ring_cleanup(rx);
670 	return retval;
671 }
672 
673 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
674 {
675 	struct tsnep_rx_entry *entry = &rx->entry[index];
676 
677 	/* RX_SKB_LENGTH is a multiple of 4 */
678 	entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
679 	entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
680 	if (index == rx->increment_owner_counter) {
681 		rx->owner_counter++;
682 		if (rx->owner_counter == 4)
683 			rx->owner_counter = 1;
684 		rx->increment_owner_counter--;
685 		if (rx->increment_owner_counter < 0)
686 			rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
687 	}
688 	entry->properties |=
689 		(rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
690 		TSNEP_DESC_OWNER_COUNTER_MASK;
691 
692 	/* descriptor properties shall be written last, because valid data is
693 	 * signaled there
694 	 */
695 	dma_wmb();
696 
697 	entry->desc->properties = __cpu_to_le32(entry->properties);
698 }
699 
700 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
701 			 int budget)
702 {
703 	struct device *dmadev = rx->adapter->dmadev;
704 	int done = 0;
705 	struct tsnep_rx_entry *entry;
706 	struct sk_buff *skb;
707 	size_t len;
708 	dma_addr_t dma;
709 	int length;
710 	bool enable = false;
711 	int retval;
712 
713 	while (likely(done < budget)) {
714 		entry = &rx->entry[rx->read];
715 		if ((__le32_to_cpu(entry->desc_wb->properties) &
716 		     TSNEP_DESC_OWNER_COUNTER_MASK) !=
717 		    (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
718 			break;
719 
720 		/* descriptor properties shall be read first, because valid data
721 		 * is signaled there
722 		 */
723 		dma_rmb();
724 
725 		skb = entry->skb;
726 		len = dma_unmap_len(entry, len);
727 		dma = dma_unmap_addr(entry, dma);
728 
729 		/* forward skb only if allocation is successful, otherwise
730 		 * skb is reused and frame dropped
731 		 */
732 		retval = tsnep_rx_alloc_and_map_skb(rx, entry);
733 		if (!retval) {
734 			dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
735 
736 			length = __le32_to_cpu(entry->desc_wb->properties) &
737 				 TSNEP_DESC_LENGTH_MASK;
738 			skb_put(skb, length - ETH_FCS_LEN);
739 			if (rx->adapter->hwtstamp_config.rx_filter ==
740 			    HWTSTAMP_FILTER_ALL) {
741 				struct skb_shared_hwtstamps *hwtstamps =
742 					skb_hwtstamps(skb);
743 				struct tsnep_rx_inline *rx_inline =
744 					(struct tsnep_rx_inline *)skb->data;
745 
746 				skb_shinfo(skb)->tx_flags |=
747 					SKBTX_HW_TSTAMP_NETDEV;
748 				memset(hwtstamps, 0, sizeof(*hwtstamps));
749 				hwtstamps->netdev_data = rx_inline;
750 			}
751 			skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
752 			skb_record_rx_queue(skb, rx->queue_index);
753 			skb->protocol = eth_type_trans(skb,
754 						       rx->adapter->netdev);
755 
756 			rx->packets++;
757 			rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
758 			if (skb->pkt_type == PACKET_MULTICAST)
759 				rx->multicast++;
760 
761 			napi_gro_receive(napi, skb);
762 			done++;
763 		} else {
764 			rx->dropped++;
765 		}
766 
767 		tsnep_rx_activate(rx, rx->read);
768 
769 		enable = true;
770 
771 		rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
772 	}
773 
774 	if (enable) {
775 		/* descriptor properties shall be valid before hardware is
776 		 * notified
777 		 */
778 		dma_wmb();
779 
780 		iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
781 	}
782 
783 	return done;
784 }
785 
786 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
787 			 int queue_index, struct tsnep_rx *rx)
788 {
789 	dma_addr_t dma;
790 	int i;
791 	int retval;
792 
793 	memset(rx, 0, sizeof(*rx));
794 	rx->adapter = adapter;
795 	rx->addr = addr;
796 	rx->queue_index = queue_index;
797 
798 	retval = tsnep_rx_ring_init(rx);
799 	if (retval)
800 		return retval;
801 
802 	dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
803 	iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
804 	iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
805 	rx->owner_counter = 1;
806 	rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
807 
808 	for (i = 0; i < TSNEP_RING_SIZE; i++)
809 		tsnep_rx_activate(rx, i);
810 
811 	/* descriptor properties shall be valid before hardware is notified */
812 	dma_wmb();
813 
814 	iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
815 
816 	return 0;
817 }
818 
819 static void tsnep_rx_close(struct tsnep_rx *rx)
820 {
821 	u32 val;
822 
823 	iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
824 	readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
825 			   ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
826 			   1000000);
827 
828 	tsnep_rx_ring_cleanup(rx);
829 }
830 
831 static int tsnep_poll(struct napi_struct *napi, int budget)
832 {
833 	struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
834 						 napi);
835 	bool complete = true;
836 	int done = 0;
837 
838 	if (queue->tx)
839 		complete = tsnep_tx_poll(queue->tx, budget);
840 
841 	if (queue->rx) {
842 		done = tsnep_rx_poll(queue->rx, napi, budget);
843 		if (done >= budget)
844 			complete = false;
845 	}
846 
847 	/* if all work not completed, return budget and keep polling */
848 	if (!complete)
849 		return budget;
850 
851 	if (likely(napi_complete_done(napi, done)))
852 		tsnep_enable_irq(queue->adapter, queue->irq_mask);
853 
854 	return min(done, budget - 1);
855 }
856 
857 static int tsnep_netdev_open(struct net_device *netdev)
858 {
859 	struct tsnep_adapter *adapter = netdev_priv(netdev);
860 	int i;
861 	void __iomem *addr;
862 	int tx_queue_index = 0;
863 	int rx_queue_index = 0;
864 	int retval;
865 
866 	retval = tsnep_phy_open(adapter);
867 	if (retval)
868 		return retval;
869 
870 	for (i = 0; i < adapter->num_queues; i++) {
871 		adapter->queue[i].adapter = adapter;
872 		if (adapter->queue[i].tx) {
873 			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
874 			retval = tsnep_tx_open(adapter, addr,
875 					       adapter->queue[i].tx);
876 			if (retval)
877 				goto failed;
878 			tx_queue_index++;
879 		}
880 		if (adapter->queue[i].rx) {
881 			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
882 			retval = tsnep_rx_open(adapter, addr,
883 					       rx_queue_index,
884 					       adapter->queue[i].rx);
885 			if (retval)
886 				goto failed;
887 			rx_queue_index++;
888 		}
889 	}
890 
891 	retval = netif_set_real_num_tx_queues(adapter->netdev,
892 					      adapter->num_tx_queues);
893 	if (retval)
894 		goto failed;
895 	retval = netif_set_real_num_rx_queues(adapter->netdev,
896 					      adapter->num_rx_queues);
897 	if (retval)
898 		goto failed;
899 
900 	for (i = 0; i < adapter->num_queues; i++) {
901 		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
902 			       tsnep_poll, 64);
903 		napi_enable(&adapter->queue[i].napi);
904 
905 		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
906 	}
907 
908 	return 0;
909 
910 failed:
911 	for (i = 0; i < adapter->num_queues; i++) {
912 		if (adapter->queue[i].rx)
913 			tsnep_rx_close(adapter->queue[i].rx);
914 		if (adapter->queue[i].tx)
915 			tsnep_tx_close(adapter->queue[i].tx);
916 	}
917 	tsnep_phy_close(adapter);
918 	return retval;
919 }
920 
921 static int tsnep_netdev_close(struct net_device *netdev)
922 {
923 	struct tsnep_adapter *adapter = netdev_priv(netdev);
924 	int i;
925 
926 	for (i = 0; i < adapter->num_queues; i++) {
927 		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
928 
929 		napi_disable(&adapter->queue[i].napi);
930 		netif_napi_del(&adapter->queue[i].napi);
931 
932 		if (adapter->queue[i].rx)
933 			tsnep_rx_close(adapter->queue[i].rx);
934 		if (adapter->queue[i].tx)
935 			tsnep_tx_close(adapter->queue[i].tx);
936 	}
937 
938 	tsnep_phy_close(adapter);
939 
940 	return 0;
941 }
942 
943 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
944 					   struct net_device *netdev)
945 {
946 	struct tsnep_adapter *adapter = netdev_priv(netdev);
947 	u16 queue_mapping = skb_get_queue_mapping(skb);
948 
949 	if (queue_mapping >= adapter->num_tx_queues)
950 		queue_mapping = 0;
951 
952 	return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
953 }
954 
955 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
956 			      int cmd)
957 {
958 	if (!netif_running(netdev))
959 		return -EINVAL;
960 	if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
961 		return tsnep_ptp_ioctl(netdev, ifr, cmd);
962 	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
963 }
964 
965 static void tsnep_netdev_set_multicast(struct net_device *netdev)
966 {
967 	struct tsnep_adapter *adapter = netdev_priv(netdev);
968 
969 	u16 rx_filter = 0;
970 
971 	/* configured MAC address and broadcasts are never filtered */
972 	if (netdev->flags & IFF_PROMISC) {
973 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
974 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
975 	} else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
976 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
977 	}
978 	iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
979 }
980 
981 static void tsnep_netdev_get_stats64(struct net_device *netdev,
982 				     struct rtnl_link_stats64 *stats)
983 {
984 	struct tsnep_adapter *adapter = netdev_priv(netdev);
985 	u32 reg;
986 	u32 val;
987 	int i;
988 
989 	for (i = 0; i < adapter->num_tx_queues; i++) {
990 		stats->tx_packets += adapter->tx[i].packets;
991 		stats->tx_bytes += adapter->tx[i].bytes;
992 		stats->tx_dropped += adapter->tx[i].dropped;
993 	}
994 	for (i = 0; i < adapter->num_rx_queues; i++) {
995 		stats->rx_packets += adapter->rx[i].packets;
996 		stats->rx_bytes += adapter->rx[i].bytes;
997 		stats->rx_dropped += adapter->rx[i].dropped;
998 		stats->multicast += adapter->rx[i].multicast;
999 
1000 		reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
1001 			       TSNEP_RX_STATISTIC);
1002 		val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
1003 		      TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
1004 		stats->rx_dropped += val;
1005 		val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
1006 		      TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
1007 		stats->rx_dropped += val;
1008 		val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
1009 		      TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
1010 		stats->rx_errors += val;
1011 		stats->rx_fifo_errors += val;
1012 		val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
1013 		      TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
1014 		stats->rx_errors += val;
1015 		stats->rx_frame_errors += val;
1016 	}
1017 
1018 	reg = ioread32(adapter->addr + ECM_STAT);
1019 	val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
1020 	stats->rx_errors += val;
1021 	val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
1022 	stats->rx_errors += val;
1023 	stats->rx_crc_errors += val;
1024 	val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
1025 	stats->rx_errors += val;
1026 }
1027 
1028 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
1029 {
1030 	iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1031 	iowrite16(*(u16 *)(addr + sizeof(u32)),
1032 		  adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1033 
1034 	ether_addr_copy(adapter->mac_address, addr);
1035 	netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
1036 		   addr);
1037 }
1038 
1039 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
1040 {
1041 	struct tsnep_adapter *adapter = netdev_priv(netdev);
1042 	struct sockaddr *sock_addr = addr;
1043 	int retval;
1044 
1045 	retval = eth_prepare_mac_addr_change(netdev, sock_addr);
1046 	if (retval)
1047 		return retval;
1048 	eth_hw_addr_set(netdev, sock_addr->sa_data);
1049 	tsnep_mac_set_address(adapter, sock_addr->sa_data);
1050 
1051 	return 0;
1052 }
1053 
1054 static int tsnep_netdev_set_features(struct net_device *netdev,
1055 				     netdev_features_t features)
1056 {
1057 	struct tsnep_adapter *adapter = netdev_priv(netdev);
1058 	netdev_features_t changed = netdev->features ^ features;
1059 	bool enable;
1060 	int retval = 0;
1061 
1062 	if (changed & NETIF_F_LOOPBACK) {
1063 		enable = !!(features & NETIF_F_LOOPBACK);
1064 		retval = tsnep_phy_loopback(adapter, enable);
1065 	}
1066 
1067 	return retval;
1068 }
1069 
1070 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
1071 				       const struct skb_shared_hwtstamps *hwtstamps,
1072 				       bool cycles)
1073 {
1074 	struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
1075 	u64 timestamp;
1076 
1077 	if (cycles)
1078 		timestamp = __le64_to_cpu(rx_inline->counter);
1079 	else
1080 		timestamp = __le64_to_cpu(rx_inline->timestamp);
1081 
1082 	return ns_to_ktime(timestamp);
1083 }
1084 
1085 static const struct net_device_ops tsnep_netdev_ops = {
1086 	.ndo_open = tsnep_netdev_open,
1087 	.ndo_stop = tsnep_netdev_close,
1088 	.ndo_start_xmit = tsnep_netdev_xmit_frame,
1089 	.ndo_eth_ioctl = tsnep_netdev_ioctl,
1090 	.ndo_set_rx_mode = tsnep_netdev_set_multicast,
1091 	.ndo_get_stats64 = tsnep_netdev_get_stats64,
1092 	.ndo_set_mac_address = tsnep_netdev_set_mac_address,
1093 	.ndo_set_features = tsnep_netdev_set_features,
1094 	.ndo_get_tstamp = tsnep_netdev_get_tstamp,
1095 	.ndo_setup_tc = tsnep_tc_setup,
1096 };
1097 
1098 static int tsnep_mac_init(struct tsnep_adapter *adapter)
1099 {
1100 	int retval;
1101 
1102 	/* initialize RX filtering, at least configured MAC address and
1103 	 * broadcast are not filtered
1104 	 */
1105 	iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
1106 
1107 	/* try to get MAC address in the following order:
1108 	 * - device tree
1109 	 * - valid MAC address already set
1110 	 * - MAC address register if valid
1111 	 * - random MAC address
1112 	 */
1113 	retval = of_get_mac_address(adapter->pdev->dev.of_node,
1114 				    adapter->mac_address);
1115 	if (retval == -EPROBE_DEFER)
1116 		return retval;
1117 	if (retval && !is_valid_ether_addr(adapter->mac_address)) {
1118 		*(u32 *)adapter->mac_address =
1119 			ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1120 		*(u16 *)(adapter->mac_address + sizeof(u32)) =
1121 			ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1122 		if (!is_valid_ether_addr(adapter->mac_address))
1123 			eth_random_addr(adapter->mac_address);
1124 	}
1125 
1126 	tsnep_mac_set_address(adapter, adapter->mac_address);
1127 	eth_hw_addr_set(adapter->netdev, adapter->mac_address);
1128 
1129 	return 0;
1130 }
1131 
1132 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
1133 {
1134 	struct device_node *np = adapter->pdev->dev.of_node;
1135 	int retval;
1136 
1137 	if (np) {
1138 		np = of_get_child_by_name(np, "mdio");
1139 		if (!np)
1140 			return 0;
1141 
1142 		adapter->suppress_preamble =
1143 			of_property_read_bool(np, "suppress-preamble");
1144 	}
1145 
1146 	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
1147 	if (!adapter->mdiobus) {
1148 		retval = -ENOMEM;
1149 
1150 		goto out;
1151 	}
1152 
1153 	adapter->mdiobus->priv = (void *)adapter;
1154 	adapter->mdiobus->parent = &adapter->pdev->dev;
1155 	adapter->mdiobus->read = tsnep_mdiobus_read;
1156 	adapter->mdiobus->write = tsnep_mdiobus_write;
1157 	adapter->mdiobus->name = TSNEP "-mdiobus";
1158 	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
1159 		 adapter->pdev->name);
1160 
1161 	/* do not scan broadcast address */
1162 	adapter->mdiobus->phy_mask = 0x0000001;
1163 
1164 	retval = of_mdiobus_register(adapter->mdiobus, np);
1165 
1166 out:
1167 	of_node_put(np);
1168 
1169 	return retval;
1170 }
1171 
1172 static int tsnep_phy_init(struct tsnep_adapter *adapter)
1173 {
1174 	struct device_node *phy_node;
1175 	int retval;
1176 
1177 	retval = of_get_phy_mode(adapter->pdev->dev.of_node,
1178 				 &adapter->phy_mode);
1179 	if (retval)
1180 		adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1181 
1182 	phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
1183 				    0);
1184 	adapter->phydev = of_phy_find_device(phy_node);
1185 	of_node_put(phy_node);
1186 	if (!adapter->phydev && adapter->mdiobus)
1187 		adapter->phydev = phy_find_first(adapter->mdiobus);
1188 	if (!adapter->phydev)
1189 		return -EIO;
1190 
1191 	return 0;
1192 }
1193 
1194 static int tsnep_probe(struct platform_device *pdev)
1195 {
1196 	struct tsnep_adapter *adapter;
1197 	struct net_device *netdev;
1198 	struct resource *io;
1199 	u32 type;
1200 	int revision;
1201 	int version;
1202 	int retval;
1203 
1204 	netdev = devm_alloc_etherdev_mqs(&pdev->dev,
1205 					 sizeof(struct tsnep_adapter),
1206 					 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
1207 	if (!netdev)
1208 		return -ENODEV;
1209 	SET_NETDEV_DEV(netdev, &pdev->dev);
1210 	adapter = netdev_priv(netdev);
1211 	platform_set_drvdata(pdev, adapter);
1212 	adapter->pdev = pdev;
1213 	adapter->dmadev = &pdev->dev;
1214 	adapter->netdev = netdev;
1215 	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
1216 			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1217 			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
1218 
1219 	netdev->min_mtu = ETH_MIN_MTU;
1220 	netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
1221 
1222 	mutex_init(&adapter->gate_control_lock);
1223 
1224 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1225 	adapter->addr = devm_ioremap_resource(&pdev->dev, io);
1226 	if (IS_ERR(adapter->addr))
1227 		return PTR_ERR(adapter->addr);
1228 	adapter->irq = platform_get_irq(pdev, 0);
1229 	netdev->mem_start = io->start;
1230 	netdev->mem_end = io->end;
1231 	netdev->irq = adapter->irq;
1232 
1233 	type = ioread32(adapter->addr + ECM_TYPE);
1234 	revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
1235 	version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
1236 	adapter->gate_control = type & ECM_GATE_CONTROL;
1237 
1238 	adapter->num_tx_queues = TSNEP_QUEUES;
1239 	adapter->num_rx_queues = TSNEP_QUEUES;
1240 	adapter->num_queues = TSNEP_QUEUES;
1241 	adapter->queue[0].tx = &adapter->tx[0];
1242 	adapter->queue[0].rx = &adapter->rx[0];
1243 	adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
1244 
1245 	retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
1246 					   DMA_BIT_MASK(64));
1247 	if (retval) {
1248 		dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
1249 		return retval;
1250 	}
1251 
1252 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1253 	retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
1254 				  0, TSNEP, adapter);
1255 	if (retval != 0) {
1256 		dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
1257 			adapter->irq);
1258 		return retval;
1259 	}
1260 	tsnep_enable_irq(adapter, ECM_INT_LINK);
1261 
1262 	retval = tsnep_mac_init(adapter);
1263 	if (retval)
1264 		goto mac_init_failed;
1265 
1266 	retval = tsnep_mdio_init(adapter);
1267 	if (retval)
1268 		goto mdio_init_failed;
1269 
1270 	retval = tsnep_phy_init(adapter);
1271 	if (retval)
1272 		goto phy_init_failed;
1273 
1274 	retval = tsnep_ptp_init(adapter);
1275 	if (retval)
1276 		goto ptp_init_failed;
1277 
1278 	retval = tsnep_tc_init(adapter);
1279 	if (retval)
1280 		goto tc_init_failed;
1281 
1282 	netdev->netdev_ops = &tsnep_netdev_ops;
1283 	netdev->ethtool_ops = &tsnep_ethtool_ops;
1284 	netdev->features = NETIF_F_SG;
1285 	netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
1286 
1287 	/* carrier off reporting is important to ethtool even BEFORE open */
1288 	netif_carrier_off(netdev);
1289 
1290 	retval = register_netdev(netdev);
1291 	if (retval)
1292 		goto register_failed;
1293 
1294 	dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
1295 		 revision);
1296 	if (adapter->gate_control)
1297 		dev_info(&adapter->pdev->dev, "gate control detected\n");
1298 
1299 	return 0;
1300 
1301 register_failed:
1302 	tsnep_tc_cleanup(adapter);
1303 tc_init_failed:
1304 	tsnep_ptp_cleanup(adapter);
1305 ptp_init_failed:
1306 phy_init_failed:
1307 	if (adapter->mdiobus)
1308 		mdiobus_unregister(adapter->mdiobus);
1309 mdio_init_failed:
1310 mac_init_failed:
1311 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1312 	return retval;
1313 }
1314 
1315 static int tsnep_remove(struct platform_device *pdev)
1316 {
1317 	struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
1318 
1319 	unregister_netdev(adapter->netdev);
1320 
1321 	tsnep_tc_cleanup(adapter);
1322 
1323 	tsnep_ptp_cleanup(adapter);
1324 
1325 	if (adapter->mdiobus)
1326 		mdiobus_unregister(adapter->mdiobus);
1327 
1328 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1329 
1330 	return 0;
1331 }
1332 
1333 static const struct of_device_id tsnep_of_match[] = {
1334 	{ .compatible = "engleder,tsnep", },
1335 { },
1336 };
1337 MODULE_DEVICE_TABLE(of, tsnep_of_match);
1338 
1339 static struct platform_driver tsnep_driver = {
1340 	.driver = {
1341 		.name = TSNEP,
1342 		.of_match_table = tsnep_of_match,
1343 	},
1344 	.probe = tsnep_probe,
1345 	.remove = tsnep_remove,
1346 };
1347 module_platform_driver(tsnep_driver);
1348 
1349 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
1350 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
1351 MODULE_LICENSE("GPL");
1352