xref: /linux/drivers/net/ethernet/tehuti/tn40.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) Tehuti Networks Ltd. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/ethtool.h>
6 #include <linux/firmware.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 #include <linux/netdevice.h>
10 #include <linux/pci.h>
11 #include <linux/phylink.h>
12 #include <linux/vmalloc.h>
13 #include <net/netdev_queues.h>
14 #include <net/page_pool/helpers.h>
15 
16 #include "tn40.h"
17 
18 #define TN40_SHORT_PACKET_SIZE 60
19 #define TN40_FIRMWARE_NAME "tehuti/bdx.bin"
20 
tn40_enable_interrupts(struct tn40_priv * priv)21 static void tn40_enable_interrupts(struct tn40_priv *priv)
22 {
23 	tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask);
24 }
25 
tn40_disable_interrupts(struct tn40_priv * priv)26 static void tn40_disable_interrupts(struct tn40_priv *priv)
27 {
28 	tn40_write_reg(priv, TN40_REG_IMR, 0);
29 }
30 
tn40_fifo_alloc(struct tn40_priv * priv,struct tn40_fifo * f,int fsz_type,u16 reg_cfg0,u16 reg_cfg1,u16 reg_rptr,u16 reg_wptr)31 static int tn40_fifo_alloc(struct tn40_priv *priv, struct tn40_fifo *f,
32 			   int fsz_type,
33 			   u16 reg_cfg0, u16 reg_cfg1,
34 			   u16 reg_rptr, u16 reg_wptr)
35 {
36 	u16 memsz = TN40_FIFO_SIZE * (1 << fsz_type);
37 	u64 cfg_base;
38 
39 	memset(f, 0, sizeof(struct tn40_fifo));
40 	/* 1K extra space is allocated at the end of the fifo to simplify
41 	 * processing of descriptors that wraps around fifo's end.
42 	 */
43 	f->va = dma_alloc_coherent(&priv->pdev->dev,
44 				   memsz + TN40_FIFO_EXTRA_SPACE, &f->da,
45 				   GFP_KERNEL);
46 	if (!f->va)
47 		return -ENOMEM;
48 
49 	f->reg_cfg0 = reg_cfg0;
50 	f->reg_cfg1 = reg_cfg1;
51 	f->reg_rptr = reg_rptr;
52 	f->reg_wptr = reg_wptr;
53 	f->rptr = 0;
54 	f->wptr = 0;
55 	f->memsz = memsz;
56 	f->size_mask = memsz - 1;
57 	cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type);
58 	tn40_write_reg(priv, reg_cfg0, cfg_base);
59 	tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da));
60 	return 0;
61 }
62 
tn40_fifo_free(struct tn40_priv * priv,struct tn40_fifo * f)63 static void tn40_fifo_free(struct tn40_priv *priv, struct tn40_fifo *f)
64 {
65 	dma_free_coherent(&priv->pdev->dev,
66 			  f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da);
67 }
68 
tn40_rxdb_alloc(int nelem)69 static struct tn40_rxdb *tn40_rxdb_alloc(int nelem)
70 {
71 	size_t size = sizeof(struct tn40_rxdb) + (nelem * sizeof(int)) +
72 	    (nelem * sizeof(struct tn40_rx_map));
73 	struct tn40_rxdb *db;
74 	int i;
75 
76 	db = vzalloc(size);
77 	if (db) {
78 		db->stack = (int *)(db + 1);
79 		db->elems = (void *)(db->stack + nelem);
80 		db->nelem = nelem;
81 		db->top = nelem;
82 		/* make the first alloc close to db struct */
83 		for (i = 0; i < nelem; i++)
84 			db->stack[i] = nelem - i - 1;
85 	}
86 	return db;
87 }
88 
tn40_rxdb_free(struct tn40_rxdb * db)89 static void tn40_rxdb_free(struct tn40_rxdb *db)
90 {
91 	vfree(db);
92 }
93 
tn40_rxdb_alloc_elem(struct tn40_rxdb * db)94 static int tn40_rxdb_alloc_elem(struct tn40_rxdb *db)
95 {
96 	return db->stack[--db->top];
97 }
98 
tn40_rxdb_addr_elem(struct tn40_rxdb * db,unsigned int n)99 static void *tn40_rxdb_addr_elem(struct tn40_rxdb *db, unsigned int n)
100 {
101 	return db->elems + n;
102 }
103 
tn40_rxdb_available(struct tn40_rxdb * db)104 static int tn40_rxdb_available(struct tn40_rxdb *db)
105 {
106 	return db->top;
107 }
108 
tn40_rxdb_free_elem(struct tn40_rxdb * db,unsigned int n)109 static void tn40_rxdb_free_elem(struct tn40_rxdb *db, unsigned int n)
110 {
111 	db->stack[db->top++] = n;
112 }
113 
114 /**
115  * tn40_create_rx_ring - Initialize RX all related HW and SW resources
116  * @priv: NIC private structure
117  *
118  * create_rx_ring creates rxf and rxd fifos, updates the relevant HW registers,
119  * preallocates skbs for rx. It assumes that Rx is disabled in HW funcs are
120  * grouped for better cache usage
121  *
122  * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be
123  * filled and packets will be dropped by the NIC without getting into the host
124  * or generating interrupts. In this situation the host has no chance of
125  * processing all the packets. Dropping packets by the NIC is cheaper, since it
126  * takes 0 CPU cycles.
127  *
128  * Return: 0 on success and negative value on error.
129  */
tn40_create_rx_ring(struct tn40_priv * priv)130 static int tn40_create_rx_ring(struct tn40_priv *priv)
131 {
132 	struct page_pool_params pp = {
133 		.dev = &priv->pdev->dev,
134 		.napi = &priv->napi,
135 		.dma_dir = DMA_FROM_DEVICE,
136 		.netdev = priv->ndev,
137 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
138 		.max_len = PAGE_SIZE,
139 	};
140 	int ret, pkt_size, nr;
141 
142 	priv->page_pool = page_pool_create(&pp);
143 	if (IS_ERR(priv->page_pool))
144 		return PTR_ERR(priv->page_pool);
145 
146 	ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size,
147 			      TN40_REG_RXD_CFG0_0, TN40_REG_RXD_CFG1_0,
148 			      TN40_REG_RXD_RPTR_0, TN40_REG_RXD_WPTR_0);
149 	if (ret)
150 		goto err_destroy_page_pool;
151 
152 	ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size,
153 			      TN40_REG_RXF_CFG0_0, TN40_REG_RXF_CFG1_0,
154 			      TN40_REG_RXF_RPTR_0, TN40_REG_RXF_WPTR_0);
155 	if (ret)
156 		goto err_free_rxd;
157 
158 	pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN;
159 	priv->rxf_fifo0.m.pktsz = pkt_size;
160 	nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc);
161 	priv->rxdb0 = tn40_rxdb_alloc(nr);
162 	if (!priv->rxdb0) {
163 		ret = -ENOMEM;
164 		goto err_free_rxf;
165 	}
166 	return 0;
167 err_free_rxf:
168 	tn40_fifo_free(priv, &priv->rxf_fifo0.m);
169 err_free_rxd:
170 	tn40_fifo_free(priv, &priv->rxd_fifo0.m);
171 err_destroy_page_pool:
172 	page_pool_destroy(priv->page_pool);
173 	return ret;
174 }
175 
tn40_rx_free_buffers(struct tn40_priv * priv)176 static void tn40_rx_free_buffers(struct tn40_priv *priv)
177 {
178 	struct tn40_rxdb *db = priv->rxdb0;
179 	struct tn40_rx_map *dm;
180 	u16 i;
181 
182 	netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem,
183 		   tn40_rxdb_available(db),
184 		   db->nelem - tn40_rxdb_available(db));
185 
186 	for (i = 0; i < db->nelem; i++) {
187 		dm = tn40_rxdb_addr_elem(db, i);
188 		if (dm->page)
189 			page_pool_put_full_page(priv->page_pool, dm->page,
190 						false);
191 	}
192 }
193 
tn40_destroy_rx_ring(struct tn40_priv * priv)194 static void tn40_destroy_rx_ring(struct tn40_priv *priv)
195 {
196 	if (priv->rxdb0) {
197 		tn40_rx_free_buffers(priv);
198 		tn40_rxdb_free(priv->rxdb0);
199 		priv->rxdb0 = NULL;
200 	}
201 	tn40_fifo_free(priv, &priv->rxf_fifo0.m);
202 	tn40_fifo_free(priv, &priv->rxd_fifo0.m);
203 	page_pool_destroy(priv->page_pool);
204 }
205 
tn40_set_rx_desc(struct tn40_priv * priv,int idx,u64 dma)206 static void tn40_set_rx_desc(struct tn40_priv *priv, int idx, u64 dma)
207 {
208 	struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
209 	struct tn40_rxf_desc *rxfd;
210 	int delta;
211 
212 	rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr);
213 	rxfd->info = cpu_to_le32(0x10003);	/* INFO =1 BC =3 */
214 	rxfd->va_lo = cpu_to_le32(idx);
215 	rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma));
216 	rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma));
217 	rxfd->len = cpu_to_le32(f->m.pktsz);
218 	f->m.wptr += sizeof(struct tn40_rxf_desc);
219 	delta = f->m.wptr - f->m.memsz;
220 	if (unlikely(delta >= 0)) {
221 		f->m.wptr = delta;
222 		if (delta > 0) {
223 			memcpy(f->m.va, f->m.va + f->m.memsz, delta);
224 			netdev_dbg(priv->ndev,
225 				   "wrapped rxd descriptor\n");
226 		}
227 	}
228 }
229 
230 /**
231  * tn40_rx_alloc_buffers - Fill rxf fifo with buffers.
232  *
233  * @priv: NIC's private structure
234  *
235  * rx_alloc_buffers allocates buffers via the page pool API, builds rxf descs
236  * and pushes them (rxf descr) into the rxf fifo. The pages are stored in rxdb.
237  * To calculate the free space, we uses the cached values of RPTR and WPTR
238  * when needed. This function also updates RPTR and WPTR.
239  */
tn40_rx_alloc_buffers(struct tn40_priv * priv)240 static void tn40_rx_alloc_buffers(struct tn40_priv *priv)
241 {
242 	struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
243 	struct tn40_rxdb *db = priv->rxdb0;
244 	struct tn40_rx_map *dm;
245 	struct page *page;
246 	int dno, i, idx;
247 
248 	dno = tn40_rxdb_available(db) - 1;
249 	for (i = dno; i > 0; i--) {
250 		page = page_pool_dev_alloc_pages(priv->page_pool);
251 		if (!page)
252 			break;
253 
254 		idx = tn40_rxdb_alloc_elem(db);
255 		tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(page));
256 		dm = tn40_rxdb_addr_elem(db, idx);
257 		dm->page = page;
258 	}
259 	if (i != dno)
260 		tn40_write_reg(priv, f->m.reg_wptr,
261 			       f->m.wptr & TN40_TXF_WPTR_WR_PTR);
262 	netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n",
263 		   f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
264 	netdev_dbg(priv->ndev, "read_reg  0x%04x f->m.reg_rptr=0x%x\n",
265 		   f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr));
266 	netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n",
267 		   f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr));
268 }
269 
tn40_recycle_rx_buffer(struct tn40_priv * priv,struct tn40_rxd_desc * rxdd)270 static void tn40_recycle_rx_buffer(struct tn40_priv *priv,
271 				   struct tn40_rxd_desc *rxdd)
272 {
273 	struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
274 	struct tn40_rx_map *dm;
275 	int idx;
276 
277 	idx = le32_to_cpu(rxdd->va_lo);
278 	dm = tn40_rxdb_addr_elem(priv->rxdb0, idx);
279 	tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page));
280 
281 	tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
282 }
283 
tn40_rx_receive(struct tn40_priv * priv,int budget)284 static int tn40_rx_receive(struct tn40_priv *priv, int budget)
285 {
286 	struct tn40_rxd_fifo *f = &priv->rxd_fifo0;
287 	u32 rxd_val1, rxd_err, pkt_id;
288 	int tmp_len, size, done = 0;
289 	struct tn40_rxdb *db = NULL;
290 	struct tn40_rxd_desc *rxdd;
291 	struct tn40_rx_map *dm;
292 	struct sk_buff *skb;
293 	u16 len, rxd_vlan;
294 	int idx;
295 
296 	f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR;
297 	size = f->m.wptr - f->m.rptr;
298 	if (size < 0)
299 		size += f->m.memsz;	/* Size is negative :-) */
300 
301 	while (size > 0) {
302 		rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr);
303 		db = priv->rxdb0;
304 
305 		/* We have a chicken and egg problem here. If the
306 		 * descriptor is wrapped we first need to copy the tail
307 		 * of the descriptor to the end of the buffer before
308 		 * extracting values from the descriptor. However in
309 		 * order to know if the descriptor is wrapped we need to
310 		 * obtain the length of the descriptor from (the
311 		 * wrapped) descriptor. Luckily the length is the first
312 		 * word of the descriptor. Descriptor lengths are
313 		 * multiples of 8 bytes so in case of a wrapped
314 		 * descriptor the first 8 bytes guaranteed to appear
315 		 * before the end of the buffer. We first obtain the
316 		 * length, we then copy the rest of the descriptor if
317 		 * needed and then extract the rest of the values from
318 		 * the descriptor.
319 		 *
320 		 * Do not change the order of operations as it will
321 		 * break the code!!!
322 		 */
323 		rxd_val1 = le32_to_cpu(rxdd->rxd_val1);
324 		tmp_len = TN40_GET_RXD_BC(rxd_val1) << 3;
325 		pkt_id = TN40_GET_RXD_PKT_ID(rxd_val1);
326 		size -= tmp_len;
327 		/* CHECK FOR A PARTIALLY ARRIVED DESCRIPTOR */
328 		if (size < 0) {
329 			netdev_dbg(priv->ndev,
330 				   "%s partially arrived desc tmp_len %d\n",
331 				   __func__, tmp_len);
332 			break;
333 		}
334 		/* make sure that the descriptor fully is arrived
335 		 * before reading the rest of the descriptor.
336 		 */
337 		rmb();
338 
339 		/* A special treatment is given to non-contiguous
340 		 * descriptors that start near the end, wraps around
341 		 * and continue at the beginning. The second part is
342 		 * copied right after the first, and then descriptor
343 		 * is interpreted as normal. The fifo has an extra
344 		 * space to allow such operations.
345 		 */
346 
347 		/* HAVE WE REACHED THE END OF THE QUEUE? */
348 		f->m.rptr += tmp_len;
349 		tmp_len = f->m.rptr - f->m.memsz;
350 		if (unlikely(tmp_len >= 0)) {
351 			f->m.rptr = tmp_len;
352 			if (tmp_len > 0) {
353 				/* COPY PARTIAL DESCRIPTOR
354 				 * TO THE END OF THE QUEUE
355 				 */
356 				netdev_dbg(priv->ndev,
357 					   "wrapped desc rptr=%d tmp_len=%d\n",
358 					   f->m.rptr, tmp_len);
359 				memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
360 			}
361 		}
362 		idx = le32_to_cpu(rxdd->va_lo);
363 		dm = tn40_rxdb_addr_elem(db, idx);
364 		prefetch(dm);
365 
366 		len = le16_to_cpu(rxdd->len);
367 		rxd_vlan = le16_to_cpu(rxdd->rxd_vlan);
368 		/* CHECK FOR ERRORS */
369 		rxd_err = TN40_GET_RXD_ERR(rxd_val1);
370 		if (unlikely(rxd_err)) {
371 			u64_stats_update_begin(&priv->syncp);
372 			priv->stats.rx_errors++;
373 			u64_stats_update_end(&priv->syncp);
374 			tn40_recycle_rx_buffer(priv, rxdd);
375 			continue;
376 		}
377 
378 		skb = napi_build_skb(page_address(dm->page), PAGE_SIZE);
379 		if (!skb) {
380 			u64_stats_update_begin(&priv->syncp);
381 			priv->stats.rx_dropped++;
382 			priv->alloc_fail++;
383 			u64_stats_update_end(&priv->syncp);
384 			tn40_recycle_rx_buffer(priv, rxdd);
385 			break;
386 		}
387 		skb_mark_for_recycle(skb);
388 		skb_put(skb, len);
389 		skb->protocol = eth_type_trans(skb, priv->ndev);
390 		skb->ip_summed =
391 		    (pkt_id == 0) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
392 		if (TN40_GET_RXD_VTAG(rxd_val1))
393 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
394 					       TN40_GET_RXD_VLAN_TCI(rxd_vlan));
395 
396 		dm->page = NULL;
397 		tn40_rxdb_free_elem(db, idx);
398 
399 		napi_gro_receive(&priv->napi, skb);
400 
401 		u64_stats_update_begin(&priv->syncp);
402 		priv->stats.rx_bytes += len;
403 		u64_stats_update_end(&priv->syncp);
404 
405 		if (unlikely(++done >= budget))
406 			break;
407 	}
408 	u64_stats_update_begin(&priv->syncp);
409 	priv->stats.rx_packets += done;
410 	u64_stats_update_end(&priv->syncp);
411 	/* FIXME: Do something to minimize pci accesses */
412 	tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
413 	tn40_rx_alloc_buffers(priv);
414 	return done;
415 }
416 
417 /* TX HW/SW interaction overview
418  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
419  * There are 2 types of TX communication channels between driver and NIC.
420  * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets.
421  * 2) TX Data Fifo - TXD - Holds descriptors of full buffers.
422  *
423  * Currently the NIC supports TSO, checksumming and gather DMA
424  * UFO and IP fragmentation is on the way.
425  *
426  * RX SW Data Structures
427  * ~~~~~~~~~~~~~~~~~~~~~
428  * TXDB is used to keep track of all skbs owned by SW and their DMA addresses.
429  * For TX case, ownership lasts from getting the packet via hard_xmit and
430  * until the HW acknowledges sending the packet by TXF descriptors.
431  * TXDB is implemented as a cyclic buffer.
432  *
433  * FIFO objects keep info about the fifo's size and location, relevant HW
434  * registers, usage and skb db. Each RXD and RXF fifo has their own fifo
435  * structure. Implemented as simple struct.
436  *
437  * TX SW Execution Flow
438  * ~~~~~~~~~~~~~~~~~~~~
439  * OS calls the driver's hard_xmit method with a packet to send. The driver
440  * creates DMA mappings, builds TXD descriptors and kicks the HW by updating
441  * TXD WPTR.
442  *
443  * When a packet is sent, The HW write a TXF descriptor and the SW
444  * frees the original skb. To prevent TXD fifo overflow without
445  * reading HW registers every time, the SW deploys "tx level"
446  * technique. Upon startup, the tx level is initialized to TXD fifo
447  * length. For every sent packet, the SW gets its TXD descriptor size
448  * (from a pre-calculated array) and subtracts it from tx level.  The
449  * size is also stored in txdb. When a TXF ack arrives, the SW fetched
450  * the size of the original TXD descriptor from the txdb and adds it
451  * to the tx level. When the Tx level drops below some predefined
452  * threshold, the driver stops the TX queue. When the TX level rises
453  * above that level, the tx queue is enabled again.
454  *
455  * This technique avoids excessive reading of RPTR and WPTR registers.
456  * As our benchmarks shows, it adds 1.5 Gbit/sec to NIC's throughput.
457  */
tn40_do_tx_db_ptr_next(struct tn40_txdb * db,struct tn40_tx_map ** pptr)458 static void tn40_do_tx_db_ptr_next(struct tn40_txdb *db,
459 				   struct tn40_tx_map **pptr)
460 {
461 	++*pptr;
462 	if (unlikely(*pptr == db->end))
463 		*pptr = db->start;
464 }
465 
tn40_tx_db_inc_rptr(struct tn40_txdb * db)466 static void tn40_tx_db_inc_rptr(struct tn40_txdb *db)
467 {
468 	tn40_do_tx_db_ptr_next(db, &db->rptr);
469 }
470 
tn40_tx_db_inc_wptr(struct tn40_txdb * db)471 static void tn40_tx_db_inc_wptr(struct tn40_txdb *db)
472 {
473 	tn40_do_tx_db_ptr_next(db, &db->wptr);
474 }
475 
tn40_tx_db_init(struct tn40_txdb * d,int sz_type)476 static int tn40_tx_db_init(struct tn40_txdb *d, int sz_type)
477 {
478 	int memsz = TN40_FIFO_SIZE * (1 << (sz_type + 1));
479 
480 	d->start = vzalloc(memsz);
481 	if (!d->start)
482 		return -ENOMEM;
483 	/* In order to differentiate between an empty db state and a full db
484 	 * state at least one element should always be empty in order to
485 	 * avoid rptr == wptr, which means that the db is empty.
486 	 */
487 	d->size = memsz / sizeof(struct tn40_tx_map) - 1;
488 	d->end = d->start + d->size + 1;	/* just after last element */
489 
490 	/* All dbs are created empty */
491 	d->rptr = d->start;
492 	d->wptr = d->start;
493 	return 0;
494 }
495 
tn40_tx_db_close(struct tn40_txdb * d)496 static void tn40_tx_db_close(struct tn40_txdb *d)
497 {
498 	if (d->start) {
499 		vfree(d->start);
500 		d->start = NULL;
501 	}
502 }
503 
504 /* Sizes of tx desc (including padding if needed) as function of the SKB's
505  * frag number
506  * 7 - is number of lwords in txd with one phys buffer
507  * 3 - is number of lwords used for every additional phys buffer
508  * for (i = 0; i < TN40_MAX_PBL; i++) {
509  *	lwords = 7 + (i * 3);
510  *	if (lwords & 1)
511  *		lwords++;	pad it with 1 lword
512  *	tn40_txd_sizes[i].bytes = lwords << 2;
513  *	tn40_txd_sizes[i].qwords = lwords >> 1;
514  * }
515  */
516 static struct {
517 	u16 bytes;
518 	u16 qwords;	/* qword = 64 bit */
519 } tn40_txd_sizes[] = {
520 	{0x20, 0x04},
521 	{0x28, 0x05},
522 	{0x38, 0x07},
523 	{0x40, 0x08},
524 	{0x50, 0x0a},
525 	{0x58, 0x0b},
526 	{0x68, 0x0d},
527 	{0x70, 0x0e},
528 	{0x80, 0x10},
529 	{0x88, 0x11},
530 	{0x98, 0x13},
531 	{0xa0, 0x14},
532 	{0xb0, 0x16},
533 	{0xb8, 0x17},
534 	{0xc8, 0x19},
535 	{0xd0, 0x1a},
536 	{0xe0, 0x1c},
537 	{0xe8, 0x1d},
538 	{0xf8, 0x1f},
539 };
540 
tn40_pbl_set(struct tn40_pbl * pbl,dma_addr_t dma,int len)541 static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len)
542 {
543 	pbl->len = cpu_to_le32(len);
544 	pbl->pa_lo = cpu_to_le32(lower_32_bits(dma));
545 	pbl->pa_hi = cpu_to_le32(upper_32_bits(dma));
546 }
547 
tn40_txdb_set(struct tn40_txdb * db,dma_addr_t dma,int len)548 static void tn40_txdb_set(struct tn40_txdb *db, dma_addr_t dma, int len)
549 {
550 	db->wptr->len = len;
551 	db->wptr->addr.dma = dma;
552 }
553 
554 struct tn40_mapping_info {
555 	dma_addr_t dma;
556 	size_t size;
557 };
558 
559 /**
560  * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks
561  * @priv: NIC private structure
562  * @skb: socket buffer to map
563  * @txdd: pointer to tx descriptor to be updated
564  * @pkt_len: pointer to unsigned long value
565  *
566  * This function creates DMA mappings for skb's data blocks and writes them to
567  * PBL of a new tx descriptor. It also stores them in the tx db, so they could
568  * be unmapped after the data has been sent. It is the responsibility of the
569  * caller to make sure that there is enough space in the txdb. The last
570  * element holds a pointer to skb itself and is marked with a zero length.
571  *
572  * Return: 0 on success and negative value on error.
573  */
tn40_tx_map_skb(struct tn40_priv * priv,struct sk_buff * skb,struct tn40_txd_desc * txdd,unsigned int * pkt_len)574 static int tn40_tx_map_skb(struct tn40_priv *priv, struct sk_buff *skb,
575 			   struct tn40_txd_desc *txdd, unsigned int *pkt_len)
576 {
577 	struct tn40_mapping_info info[TN40_MAX_PBL];
578 	int nr_frags = skb_shinfo(skb)->nr_frags;
579 	struct tn40_pbl *pbl = &txdd->pbl[0];
580 	struct tn40_txdb *db = &priv->txdb;
581 	unsigned int size;
582 	int i, len, ret;
583 	dma_addr_t dma;
584 
585 	netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb,
586 		   skb->len, skb->data_len, nr_frags);
587 	if (nr_frags > TN40_MAX_PBL - 1) {
588 		ret = skb_linearize(skb);
589 		if (ret)
590 			return ret;
591 		nr_frags = skb_shinfo(skb)->nr_frags;
592 	}
593 	/* initial skb */
594 	len = skb->len - skb->data_len;
595 	dma = dma_map_single(&priv->pdev->dev, skb->data, len,
596 			     DMA_TO_DEVICE);
597 	ret = dma_mapping_error(&priv->pdev->dev, dma);
598 	if (ret)
599 		return ret;
600 
601 	tn40_txdb_set(db, dma, len);
602 	tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
603 	*pkt_len = db->wptr->len;
604 
605 	for (i = 0; i < nr_frags; i++) {
606 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
607 
608 		size = skb_frag_size(frag);
609 		dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0,
610 				       size, DMA_TO_DEVICE);
611 
612 		ret = dma_mapping_error(&priv->pdev->dev, dma);
613 		if (ret)
614 			goto mapping_error;
615 		info[i].dma = dma;
616 		info[i].size = size;
617 	}
618 
619 	for (i = 0; i < nr_frags; i++) {
620 		tn40_tx_db_inc_wptr(db);
621 		tn40_txdb_set(db, info[i].dma, info[i].size);
622 		tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
623 		*pkt_len += db->wptr->len;
624 	}
625 
626 	/* SHORT_PKT_FIX */
627 	if (skb->len < TN40_SHORT_PACKET_SIZE)
628 		++nr_frags;
629 
630 	/* Add skb clean up info. */
631 	tn40_tx_db_inc_wptr(db);
632 	db->wptr->len = -tn40_txd_sizes[nr_frags].bytes;
633 	db->wptr->addr.skb = skb;
634 	tn40_tx_db_inc_wptr(db);
635 
636 	return 0;
637  mapping_error:
638 	dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len,
639 		       DMA_TO_DEVICE);
640 	for (; i > 0; i--)
641 		dma_unmap_page(&priv->pdev->dev, info[i - 1].dma,
642 			       info[i - 1].size, DMA_TO_DEVICE);
643 	return -ENOMEM;
644 }
645 
tn40_create_tx_ring(struct tn40_priv * priv)646 static int tn40_create_tx_ring(struct tn40_priv *priv)
647 {
648 	int ret;
649 
650 	ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size,
651 			      TN40_REG_TXD_CFG0_0, TN40_REG_TXD_CFG1_0,
652 			      TN40_REG_TXD_RPTR_0, TN40_REG_TXD_WPTR_0);
653 	if (ret)
654 		return ret;
655 
656 	ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size,
657 			      TN40_REG_TXF_CFG0_0, TN40_REG_TXF_CFG1_0,
658 			      TN40_REG_TXF_RPTR_0, TN40_REG_TXF_WPTR_0);
659 	if (ret)
660 		goto err_free_txd;
661 
662 	/* The TX db has to keep mappings for all packets sent (on
663 	 * TxD) and not yet reclaimed (on TxF).
664 	 */
665 	ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size));
666 	if (ret)
667 		goto err_free_txf;
668 
669 	/* SHORT_PKT_FIX */
670 	priv->b0_len = 64;
671 	priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len,
672 					 &priv->b0_dma, GFP_KERNEL);
673 	if (!priv->b0_va)
674 		goto err_free_db;
675 
676 	priv->tx_level = TN40_MAX_TX_LEVEL;
677 	priv->tx_update_mark = priv->tx_level - 1024;
678 	return 0;
679 err_free_db:
680 	tn40_tx_db_close(&priv->txdb);
681 err_free_txf:
682 	tn40_fifo_free(priv, &priv->txf_fifo0.m);
683 err_free_txd:
684 	tn40_fifo_free(priv, &priv->txd_fifo0.m);
685 	return -ENOMEM;
686 }
687 
688 /**
689  * tn40_tx_space - Calculate the available space in the TX fifo.
690  * @priv: NIC private structure
691  *
692  * Return: available space in TX fifo in bytes
693  */
tn40_tx_space(struct tn40_priv * priv)694 static int tn40_tx_space(struct tn40_priv *priv)
695 {
696 	struct tn40_txd_fifo *f = &priv->txd_fifo0;
697 	int fsize;
698 
699 	f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR;
700 	fsize = f->m.rptr - f->m.wptr;
701 	if (fsize <= 0)
702 		fsize = f->m.memsz + fsize;
703 	return fsize;
704 }
705 
706 #define TN40_TXD_FULL_CHECKSUM 7
707 
tn40_start_xmit(struct sk_buff * skb,struct net_device * ndev)708 static netdev_tx_t tn40_start_xmit(struct sk_buff *skb, struct net_device *ndev)
709 {
710 	struct tn40_priv *priv = netdev_priv(ndev);
711 	struct tn40_txd_fifo *f = &priv->txd_fifo0;
712 	int txd_checksum = TN40_TXD_FULL_CHECKSUM;
713 	struct tn40_txd_desc *txdd;
714 	int nr_frags, len, err;
715 	unsigned int pkt_len;
716 	int txd_vlan_id = 0;
717 	int txd_lgsnd = 0;
718 	int txd_vtag = 0;
719 	int txd_mss = 0;
720 
721 	/* Build tx descriptor */
722 	txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr);
723 	err = tn40_tx_map_skb(priv, skb, txdd, &pkt_len);
724 	if (err) {
725 		u64_stats_update_begin(&priv->syncp);
726 		priv->stats.tx_dropped++;
727 		u64_stats_update_end(&priv->syncp);
728 		dev_kfree_skb(skb);
729 		return NETDEV_TX_OK;
730 	}
731 	nr_frags = skb_shinfo(skb)->nr_frags;
732 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
733 		txd_checksum = 0;
734 
735 	if (skb_shinfo(skb)->gso_size) {
736 		txd_mss = skb_shinfo(skb)->gso_size;
737 		txd_lgsnd = 1;
738 		netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb,
739 			   pkt_len, txd_mss);
740 	}
741 	if (skb_vlan_tag_present(skb)) {
742 		/* Don't cut VLAN ID to 12 bits */
743 		txd_vlan_id = skb_vlan_tag_get(skb);
744 		txd_vtag = 1;
745 	}
746 	txdd->va_hi = 0;
747 	txdd->va_lo = 0;
748 	txdd->length = cpu_to_le16(pkt_len);
749 	txdd->mss = cpu_to_le16(txd_mss);
750 	txdd->txd_val1 =
751 		cpu_to_le32(TN40_TXD_W1_VAL
752 			    (tn40_txd_sizes[nr_frags].qwords, txd_checksum,
753 			     txd_vtag, txd_lgsnd, txd_vlan_id));
754 	netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags,
755 		   tn40_txd_sizes[nr_frags].qwords);
756 	netdev_dbg(priv->ndev, "=== TxD desc =====================\n");
757 	netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n",
758 		   txdd->txd_val1);
759 	netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss,
760 		   txdd->length);
761 	/* SHORT_PKT_FIX */
762 	if (pkt_len < TN40_SHORT_PACKET_SIZE) {
763 		struct tn40_pbl *pbl = &txdd->pbl[++nr_frags];
764 
765 		txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE);
766 		txdd->txd_val1 =
767 			cpu_to_le32(TN40_TXD_W1_VAL
768 				    (tn40_txd_sizes[nr_frags].qwords,
769 				     txd_checksum, txd_vtag, txd_lgsnd,
770 				     txd_vlan_id));
771 		pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len);
772 		pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma));
773 		pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma));
774 		netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX   ==============\n");
775 		netdev_dbg(priv->ndev, "=== nr_frags : %d   ==============\n",
776 			   nr_frags);
777 	}
778 
779 	/* Increment TXD write pointer. In case of fifo wrapping copy
780 	 * reminder of the descriptor to the beginning.
781 	 */
782 	f->m.wptr += tn40_txd_sizes[nr_frags].bytes;
783 	len = f->m.wptr - f->m.memsz;
784 	if (unlikely(len >= 0)) {
785 		f->m.wptr = len;
786 		if (len > 0)
787 			memcpy(f->m.va, f->m.va + f->m.memsz, len);
788 	}
789 	/* Force memory writes to complete before letting the HW know
790 	 * there are new descriptors to fetch.
791 	 */
792 	wmb();
793 
794 	priv->tx_level -= tn40_txd_sizes[nr_frags].bytes;
795 	if (priv->tx_level > priv->tx_update_mark) {
796 		tn40_write_reg(priv, f->m.reg_wptr,
797 			       f->m.wptr & TN40_TXF_WPTR_WR_PTR);
798 	} else {
799 		if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) {
800 			priv->tx_noupd = 0;
801 			tn40_write_reg(priv, f->m.reg_wptr,
802 				       f->m.wptr & TN40_TXF_WPTR_WR_PTR);
803 		}
804 	}
805 
806 	u64_stats_update_begin(&priv->syncp);
807 	priv->stats.tx_packets++;
808 	priv->stats.tx_bytes += pkt_len;
809 	u64_stats_update_end(&priv->syncp);
810 	if (priv->tx_level < TN40_MIN_TX_LEVEL) {
811 		netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level);
812 		netif_stop_queue(ndev);
813 	}
814 
815 	return NETDEV_TX_OK;
816 }
817 
tn40_tx_cleanup(struct tn40_priv * priv)818 static void tn40_tx_cleanup(struct tn40_priv *priv)
819 {
820 	struct tn40_txf_fifo *f = &priv->txf_fifo0;
821 	struct tn40_txdb *db = &priv->txdb;
822 	int tx_level = 0;
823 
824 	f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK;
825 
826 	netif_tx_lock(priv->ndev);
827 	while (f->m.wptr != f->m.rptr) {
828 		f->m.rptr += TN40_TXF_DESC_SZ;
829 		f->m.rptr &= f->m.size_mask;
830 		/* Unmap all fragments */
831 		/* First has to come tx_maps containing DMA */
832 		do {
833 			dma_addr_t addr = db->rptr->addr.dma;
834 			size_t size =  db->rptr->len;
835 
836 			netif_tx_unlock(priv->ndev);
837 			dma_unmap_page(&priv->pdev->dev, addr,
838 				       size, DMA_TO_DEVICE);
839 			netif_tx_lock(priv->ndev);
840 			tn40_tx_db_inc_rptr(db);
841 		} while (db->rptr->len > 0);
842 		tx_level -= db->rptr->len; /* '-' Because the len is negative */
843 
844 		/* Now should come skb pointer - free it */
845 		dev_kfree_skb_any(db->rptr->addr.skb);
846 		netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n",
847 			   db->rptr->addr.skb, -db->rptr->len);
848 		tn40_tx_db_inc_rptr(db);
849 	}
850 
851 	/* Let the HW know which TXF descriptors were cleaned */
852 	tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
853 
854 	/* We reclaimed resources, so in case the Q is stopped by xmit
855 	 * callback, we resume the transmission and use tx_lock to
856 	 * synchronize with xmit.
857 	 */
858 	priv->tx_level += tx_level;
859 	if (priv->tx_noupd) {
860 		priv->tx_noupd = 0;
861 		tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr,
862 			       priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR);
863 	}
864 	if (unlikely(netif_queue_stopped(priv->ndev) &&
865 		     netif_carrier_ok(priv->ndev) &&
866 		     (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) {
867 		netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level);
868 		netif_wake_queue(priv->ndev);
869 	}
870 	netif_tx_unlock(priv->ndev);
871 }
872 
tn40_tx_free_skbs(struct tn40_priv * priv)873 static void tn40_tx_free_skbs(struct tn40_priv *priv)
874 {
875 	struct tn40_txdb *db = &priv->txdb;
876 
877 	while (db->rptr != db->wptr) {
878 		if (likely(db->rptr->len))
879 			dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
880 				       db->rptr->len, DMA_TO_DEVICE);
881 		else
882 			dev_kfree_skb(db->rptr->addr.skb);
883 		tn40_tx_db_inc_rptr(db);
884 	}
885 }
886 
tn40_destroy_tx_ring(struct tn40_priv * priv)887 static void tn40_destroy_tx_ring(struct tn40_priv *priv)
888 {
889 	tn40_tx_free_skbs(priv);
890 	tn40_fifo_free(priv, &priv->txd_fifo0.m);
891 	tn40_fifo_free(priv, &priv->txf_fifo0.m);
892 	tn40_tx_db_close(&priv->txdb);
893 	/* SHORT_PKT_FIX */
894 	if (priv->b0_len) {
895 		dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va,
896 				  priv->b0_dma);
897 		priv->b0_len = 0;
898 	}
899 }
900 
901 /**
902  * tn40_tx_push_desc - Push a descriptor to TxD fifo.
903  *
904  * @priv: NIC private structure
905  * @data: desc's data
906  * @size: desc's size
907  *
908  * This function pushes desc to TxD fifo and overlaps it if needed.
909  *
910  * This function does not check for available space, nor does it check
911  * that the data size is smaller than the fifo size. Checking for
912  * space is the responsibility of the caller.
913  */
tn40_tx_push_desc(struct tn40_priv * priv,void * data,int size)914 static void tn40_tx_push_desc(struct tn40_priv *priv, void *data, int size)
915 {
916 	struct tn40_txd_fifo *f = &priv->txd_fifo0;
917 	int i = f->m.memsz - f->m.wptr;
918 
919 	if (size == 0)
920 		return;
921 
922 	if (i > size) {
923 		memcpy(f->m.va + f->m.wptr, data, size);
924 		f->m.wptr += size;
925 	} else {
926 		memcpy(f->m.va + f->m.wptr, data, i);
927 		f->m.wptr = size - i;
928 		memcpy(f->m.va, data + i, f->m.wptr);
929 	}
930 	tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
931 }
932 
933 /**
934  * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way.
935  *
936  * @priv: NIC private structure
937  * @data: descriptor data
938  * @size: descriptor size
939  *
940  * This function does check for available space and, if necessary,
941  * waits for the NIC to read existing data before writing new data.
942  */
tn40_tx_push_desc_safe(struct tn40_priv * priv,void * data,int size)943 static void tn40_tx_push_desc_safe(struct tn40_priv *priv, void *data, int size)
944 {
945 	int timer = 0;
946 
947 	while (size > 0) {
948 		/* We subtract 8 because when the fifo is full rptr ==
949 		 * wptr, which also means that fifo is empty, we can
950 		 * understand the difference, but could the HW do the
951 		 * same ???
952 		 */
953 		int avail = tn40_tx_space(priv) - 8;
954 
955 		if (avail <= 0) {
956 			if (timer++ > 300) /* Prevent endless loop */
957 				break;
958 			/* Give the HW a chance to clean the fifo */
959 			usleep_range(50, 60);
960 			continue;
961 		}
962 		avail = min(avail, size);
963 		netdev_dbg(priv->ndev,
964 			   "about to push  %d bytes starting %p size %d\n",
965 			   avail, data, size);
966 		tn40_tx_push_desc(priv, data, avail);
967 		size -= avail;
968 		data += avail;
969 	}
970 }
971 
tn40_set_link_speed(struct tn40_priv * priv,u32 speed)972 int tn40_set_link_speed(struct tn40_priv *priv, u32 speed)
973 {
974 	u32 val;
975 	int i;
976 
977 	netdev_dbg(priv->ndev, "speed %d\n", speed);
978 	switch (speed) {
979 	case SPEED_10000:
980 	case SPEED_5000:
981 	case SPEED_2500:
982 		netdev_dbg(priv->ndev, "link_speed %d\n", speed);
983 
984 		tn40_write_reg(priv, 0x1010, 0x217);	/*ETHSD.REFCLK_CONF  */
985 		tn40_write_reg(priv, 0x104c, 0x4c);	/*ETHSD.L0_RX_PCNT  */
986 		tn40_write_reg(priv, 0x1050, 0x4c);	/*ETHSD.L1_RX_PCNT  */
987 		tn40_write_reg(priv, 0x1054, 0x4c);	/*ETHSD.L2_RX_PCNT  */
988 		tn40_write_reg(priv, 0x1058, 0x4c);	/*ETHSD.L3_RX_PCNT  */
989 		tn40_write_reg(priv, 0x102c, 0x434);	/*ETHSD.L0_TX_PCNT  */
990 		tn40_write_reg(priv, 0x1030, 0x434);	/*ETHSD.L1_TX_PCNT  */
991 		tn40_write_reg(priv, 0x1034, 0x434);	/*ETHSD.L2_TX_PCNT  */
992 		tn40_write_reg(priv, 0x1038, 0x434);	/*ETHSD.L3_TX_PCNT  */
993 		tn40_write_reg(priv, 0x6300, 0x0400);	/*MAC.PCS_CTRL */
994 
995 		tn40_write_reg(priv, 0x1018, 0x00);	/*Mike2 */
996 		udelay(5);
997 		tn40_write_reg(priv, 0x1018, 0x04);	/*Mike2 */
998 		udelay(5);
999 		tn40_write_reg(priv, 0x1018, 0x06);	/*Mike2 */
1000 		udelay(5);
1001 		/*MikeFix1 */
1002 		/*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
1003 		tn40_write_reg(priv, 0x103c, 0x81644);	/*ETHSD.L0_TX_DCNT  */
1004 		tn40_write_reg(priv, 0x1040, 0x81644);	/*ETHSD.L1_TX_DCNT  */
1005 		tn40_write_reg(priv, 0x1044, 0x81644);	/*ETHSD.L2_TX_DCNT  */
1006 		tn40_write_reg(priv, 0x1048, 0x81644);	/*ETHSD.L3_TX_DCNT  */
1007 		tn40_write_reg(priv, 0x1014, 0x043);	/*ETHSD.INIT_STAT */
1008 		for (i = 1000; i; i--) {
1009 			usleep_range(50, 60);
1010 			/*ETHSD.INIT_STAT */
1011 			val = tn40_read_reg(priv, 0x1014);
1012 			if (val & (1 << 9)) {
1013 				/*ETHSD.INIT_STAT */
1014 				tn40_write_reg(priv, 0x1014, 0x3);
1015 				/*ETHSD.INIT_STAT */
1016 				val = tn40_read_reg(priv, 0x1014);
1017 
1018 				break;
1019 			}
1020 		}
1021 		if (!i)
1022 			netdev_err(priv->ndev, "MAC init timeout!\n");
1023 
1024 		tn40_write_reg(priv, 0x6350, 0x0);	/*MAC.PCS_IF_MODE */
1025 		tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13);	/*0x93//0x13 */
1026 		tn40_write_reg(priv, 0x111c, 0x7ff);	/*MAC.MAC_RST_CNT */
1027 		usleep_range(2000, 2100);
1028 
1029 		tn40_write_reg(priv, 0x111c, 0x0);	/*MAC.MAC_RST_CNT */
1030 		break;
1031 
1032 	case SPEED_1000:
1033 	case SPEED_100:
1034 		tn40_write_reg(priv, 0x1010, 0x613);	/*ETHSD.REFCLK_CONF */
1035 		tn40_write_reg(priv, 0x104c, 0x4d);	/*ETHSD.L0_RX_PCNT  */
1036 		tn40_write_reg(priv, 0x1050, 0x0);	/*ETHSD.L1_RX_PCNT  */
1037 		tn40_write_reg(priv, 0x1054, 0x0);	/*ETHSD.L2_RX_PCNT  */
1038 		tn40_write_reg(priv, 0x1058, 0x0);	/*ETHSD.L3_RX_PCNT  */
1039 		tn40_write_reg(priv, 0x102c, 0x35);	/*ETHSD.L0_TX_PCNT  */
1040 		tn40_write_reg(priv, 0x1030, 0x0);	/*ETHSD.L1_TX_PCNT  */
1041 		tn40_write_reg(priv, 0x1034, 0x0);	/*ETHSD.L2_TX_PCNT  */
1042 		tn40_write_reg(priv, 0x1038, 0x0);	/*ETHSD.L3_TX_PCNT  */
1043 		tn40_write_reg(priv, 0x6300, 0x01140);	/*MAC.PCS_CTRL */
1044 
1045 		tn40_write_reg(priv, 0x1014, 0x043);	/*ETHSD.INIT_STAT */
1046 		for (i = 1000; i; i--) {
1047 			usleep_range(50, 60);
1048 			val = tn40_read_reg(priv, 0x1014); /*ETHSD.INIT_STAT */
1049 			if (val & (1 << 9)) {
1050 				/*ETHSD.INIT_STAT */
1051 				tn40_write_reg(priv, 0x1014, 0x3);
1052 				/*ETHSD.INIT_STAT */
1053 				val = tn40_read_reg(priv, 0x1014);
1054 
1055 				break;
1056 			}
1057 		}
1058 		if (!i)
1059 			netdev_err(priv->ndev, "MAC init timeout!\n");
1060 
1061 		tn40_write_reg(priv, 0x6350, 0x2b);	/*MAC.PCS_IF_MODE 1g */
1062 		tn40_write_reg(priv, 0x6310, 0x9801);	/*MAC.PCS_DEV_AB */
1063 
1064 		tn40_write_reg(priv, 0x6314, 0x1);	/*MAC.PCS_PART_AB */
1065 		tn40_write_reg(priv, 0x6348, 0xc8);	/*MAC.PCS_LINK_LO */
1066 		tn40_write_reg(priv, 0x634c, 0xc8);	/*MAC.PCS_LINK_HI */
1067 		usleep_range(50, 60);
1068 		tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13);	/*0x93//0x13 */
1069 		tn40_write_reg(priv, 0x111c, 0x7ff);	/*MAC.MAC_RST_CNT */
1070 		usleep_range(2000, 2100);
1071 
1072 		tn40_write_reg(priv, 0x111c, 0x0);	/*MAC.MAC_RST_CNT */
1073 		tn40_write_reg(priv, 0x6300, 0x1140);	/*MAC.PCS_CTRL */
1074 		break;
1075 
1076 	case 0:		/* Link down */
1077 		tn40_write_reg(priv, 0x104c, 0x0);	/*ETHSD.L0_RX_PCNT  */
1078 		tn40_write_reg(priv, 0x1050, 0x0);	/*ETHSD.L1_RX_PCNT  */
1079 		tn40_write_reg(priv, 0x1054, 0x0);	/*ETHSD.L2_RX_PCNT  */
1080 		tn40_write_reg(priv, 0x1058, 0x0);	/*ETHSD.L3_RX_PCNT  */
1081 		tn40_write_reg(priv, 0x102c, 0x0);	/*ETHSD.L0_TX_PCNT  */
1082 		tn40_write_reg(priv, 0x1030, 0x0);	/*ETHSD.L1_TX_PCNT  */
1083 		tn40_write_reg(priv, 0x1034, 0x0);	/*ETHSD.L2_TX_PCNT  */
1084 		tn40_write_reg(priv, 0x1038, 0x0);	/*ETHSD.L3_TX_PCNT  */
1085 
1086 		tn40_write_reg(priv, TN40_REG_CTRLST, 0x800);
1087 		tn40_write_reg(priv, 0x111c, 0x7ff);	/*MAC.MAC_RST_CNT */
1088 		usleep_range(2000, 2100);
1089 
1090 		tn40_write_reg(priv, 0x111c, 0x0);	/*MAC.MAC_RST_CNT */
1091 		break;
1092 
1093 	default:
1094 		netdev_err(priv->ndev,
1095 			   "Link speed was not identified yet (%d)\n", speed);
1096 		speed = 0;
1097 		break;
1098 	}
1099 	return speed;
1100 }
1101 
tn40_link_changed(struct tn40_priv * priv)1102 static void tn40_link_changed(struct tn40_priv *priv)
1103 {
1104 	u32 link = tn40_read_reg(priv,
1105 				 TN40_REG_MAC_LNK_STAT) & TN40_MAC_LINK_STAT;
1106 
1107 	netdev_dbg(priv->ndev, "link changed %u\n", link);
1108 }
1109 
tn40_isr_extra(struct tn40_priv * priv,u32 isr)1110 static void tn40_isr_extra(struct tn40_priv *priv, u32 isr)
1111 {
1112 	if (isr & (TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 | TN40_IR_TMR0)) {
1113 		netdev_dbg(priv->ndev, "isr = 0x%x\n", isr);
1114 		tn40_link_changed(priv);
1115 	}
1116 }
1117 
tn40_isr_napi(int irq,void * dev)1118 static irqreturn_t tn40_isr_napi(int irq, void *dev)
1119 {
1120 	struct tn40_priv *priv = netdev_priv((struct net_device *)dev);
1121 	u32 isr;
1122 
1123 	isr = tn40_read_reg(priv, TN40_REG_ISR_MSK0);
1124 
1125 	if (unlikely(!isr)) {
1126 		tn40_enable_interrupts(priv);
1127 		return IRQ_NONE;	/* Not our interrupt */
1128 	}
1129 
1130 	if (isr & TN40_IR_EXTRA)
1131 		tn40_isr_extra(priv, isr);
1132 
1133 	if (isr & (TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | TN40_IR_TMR1)) {
1134 		if (likely(napi_schedule_prep(&priv->napi))) {
1135 			__napi_schedule(&priv->napi);
1136 			return IRQ_HANDLED;
1137 		}
1138 		/* We get here if an interrupt has slept into the
1139 		 * small time window between these lines in
1140 		 * tn40_poll: tn40_enable_interrupts(priv); return 0;
1141 		 *
1142 		 * Currently interrupts are disabled (since we read
1143 		 * the ISR register) and we have failed to register
1144 		 * the next poll. So we read the regs to trigger the
1145 		 * chip and allow further interrupts.
1146 		 */
1147 		tn40_read_reg(priv, TN40_REG_TXF_WPTR_0);
1148 		tn40_read_reg(priv, TN40_REG_RXD_WPTR_0);
1149 	}
1150 
1151 	tn40_enable_interrupts(priv);
1152 	return IRQ_HANDLED;
1153 }
1154 
tn40_poll(struct napi_struct * napi,int budget)1155 static int tn40_poll(struct napi_struct *napi, int budget)
1156 {
1157 	struct tn40_priv *priv = container_of(napi, struct tn40_priv, napi);
1158 	int work_done;
1159 
1160 	tn40_tx_cleanup(priv);
1161 
1162 	if (!budget)
1163 		return 0;
1164 
1165 	work_done = tn40_rx_receive(priv, budget);
1166 	if (work_done == budget)
1167 		return budget;
1168 
1169 	if (napi_complete_done(napi, work_done))
1170 		tn40_enable_interrupts(priv);
1171 	return work_done;
1172 }
1173 
tn40_fw_load(struct tn40_priv * priv)1174 static int tn40_fw_load(struct tn40_priv *priv)
1175 {
1176 	const struct firmware *fw = NULL;
1177 	int master, ret;
1178 	u32 val;
1179 
1180 	ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev);
1181 	if (ret)
1182 		return ret;
1183 
1184 	master = tn40_read_reg(priv, TN40_REG_INIT_SEMAPHORE);
1185 	if (!tn40_read_reg(priv, TN40_REG_INIT_STATUS) && master) {
1186 		netdev_dbg(priv->ndev, "Loading FW...\n");
1187 		tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size);
1188 		msleep(100);
1189 	}
1190 	ret = read_poll_timeout(tn40_read_reg, val, val, 2000, 400000, false,
1191 				priv, TN40_REG_INIT_STATUS);
1192 	if (master)
1193 		tn40_write_reg(priv, TN40_REG_INIT_SEMAPHORE, 1);
1194 
1195 	if (ret) {
1196 		netdev_err(priv->ndev, "firmware loading failed\n");
1197 		netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n",
1198 			   tn40_read_reg(priv, TN40_REG_VPC),
1199 			   tn40_read_reg(priv, TN40_REG_VIC),
1200 			   tn40_read_reg(priv, TN40_REG_INIT_STATUS));
1201 		ret = -EIO;
1202 	} else {
1203 		netdev_dbg(priv->ndev, "firmware loading success\n");
1204 	}
1205 	release_firmware(fw);
1206 	return ret;
1207 }
1208 
tn40_restore_mac(struct net_device * ndev,struct tn40_priv * priv)1209 static void tn40_restore_mac(struct net_device *ndev, struct tn40_priv *priv)
1210 {
1211 	u32 val;
1212 
1213 	netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
1214 		   tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
1215 		   tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
1216 		   tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
1217 
1218 	val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
1219 	tn40_write_reg(priv, TN40_REG_UNC_MAC2_A, val);
1220 	val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
1221 	tn40_write_reg(priv, TN40_REG_UNC_MAC1_A, val);
1222 	val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
1223 	tn40_write_reg(priv, TN40_REG_UNC_MAC0_A, val);
1224 
1225 	/* More then IP MAC address */
1226 	tn40_write_reg(priv, TN40_REG_MAC_ADDR_0,
1227 		       (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) |
1228 		       (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0]));
1229 	tn40_write_reg(priv, TN40_REG_MAC_ADDR_1,
1230 		       (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4]));
1231 
1232 	netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
1233 		   tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
1234 		   tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
1235 		   tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
1236 }
1237 
tn40_hw_start(struct tn40_priv * priv)1238 static void tn40_hw_start(struct tn40_priv *priv)
1239 {
1240 	tn40_write_reg(priv, TN40_REG_FRM_LENGTH, 0X3FE0);
1241 	tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0X10fd);
1242 	/*MikeFix1 */
1243 	/*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
1244 	tn40_write_reg(priv, 0x103c, 0x81644);	/*ETHSD.L0_TX_DCNT  */
1245 	tn40_write_reg(priv, 0x1040, 0x81644);	/*ETHSD.L1_TX_DCNT  */
1246 	tn40_write_reg(priv, 0x1044, 0x81644);	/*ETHSD.L2_TX_DCNT  */
1247 	tn40_write_reg(priv, 0x1048, 0x81644);	/*ETHSD.L3_TX_DCNT  */
1248 	tn40_write_reg(priv, TN40_REG_RX_FIFO_SECTION, 0x10);
1249 	tn40_write_reg(priv, TN40_REG_TX_FIFO_SECTION, 0xE00010);
1250 	tn40_write_reg(priv, TN40_REG_RX_FULLNESS, 0);
1251 	tn40_write_reg(priv, TN40_REG_TX_FULLNESS, 0);
1252 
1253 	tn40_write_reg(priv, TN40_REG_VGLB, 0);
1254 	tn40_write_reg(priv, TN40_REG_MAX_FRAME_A,
1255 		       priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL);
1256 	tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm);
1257 	tn40_write_reg(priv, TN40_REG_RDINTCM2, 0);
1258 
1259 	/* old val = 0x300064 */
1260 	tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm);
1261 
1262 	/* Enable timer interrupt once in 2 secs. */
1263 	tn40_restore_mac(priv->ndev, priv);
1264 
1265 	/* Pause frame */
1266 	tn40_write_reg(priv, 0x12E0, 0x28);
1267 	tn40_write_reg(priv, TN40_REG_PAUSE_QUANT, 0xFFFF);
1268 	tn40_write_reg(priv, 0x6064, 0xF);
1269 
1270 	tn40_write_reg(priv, TN40_REG_GMAC_RXF_A,
1271 		       TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC |
1272 		       TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB);
1273 
1274 	tn40_enable_interrupts(priv);
1275 }
1276 
tn40_hw_reset(struct tn40_priv * priv)1277 static int tn40_hw_reset(struct tn40_priv *priv)
1278 {
1279 	u32 val;
1280 
1281 	/* Reset sequences: read, write 1, read, write 0 */
1282 	val = tn40_read_reg(priv, TN40_REG_CLKPLL);
1283 	tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8);
1284 	usleep_range(50, 60);
1285 	val = tn40_read_reg(priv, TN40_REG_CLKPLL);
1286 	tn40_write_reg(priv, TN40_REG_CLKPLL, val & ~TN40_CLKPLL_SFTRST);
1287 
1288 	/* Check that the PLLs are locked and reset ended */
1289 	val = read_poll_timeout(tn40_read_reg, val,
1290 				(val & TN40_CLKPLL_LKD) == TN40_CLKPLL_LKD,
1291 				10000, 700000, false, priv, TN40_REG_CLKPLL);
1292 	if (val)
1293 		return -EIO;
1294 
1295 	usleep_range(50, 60);
1296 	/* Do any PCI-E read transaction */
1297 	tn40_read_reg(priv, TN40_REG_RXD_CFG0_0);
1298 	return 0;
1299 }
1300 
tn40_sw_reset(struct tn40_priv * priv)1301 static void tn40_sw_reset(struct tn40_priv *priv)
1302 {
1303 	int i, ret;
1304 	u32 val;
1305 
1306 	/* 1. load MAC (obsolete) */
1307 	/* 2. disable Rx (and Tx) */
1308 	tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0);
1309 	msleep(100);
1310 	/* 3. Disable port */
1311 	tn40_write_reg(priv, TN40_REG_DIS_PORT, 1);
1312 	/* 4. Disable queue */
1313 	tn40_write_reg(priv, TN40_REG_DIS_QU, 1);
1314 	/* 5. Wait until hw is disabled */
1315 	ret = read_poll_timeout(tn40_read_reg, val, val & 1, 10000, 500000,
1316 				false, priv, TN40_REG_RST_PORT);
1317 	if (ret)
1318 		netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
1319 
1320 	/* 6. Disable interrupts */
1321 	tn40_write_reg(priv, TN40_REG_RDINTCM0, 0);
1322 	tn40_write_reg(priv, TN40_REG_TDINTCM0, 0);
1323 	tn40_write_reg(priv, TN40_REG_IMR, 0);
1324 	tn40_read_reg(priv, TN40_REG_ISR);
1325 
1326 	/* 7. Reset queue */
1327 	tn40_write_reg(priv, TN40_REG_RST_QU, 1);
1328 	/* 8. Reset port */
1329 	tn40_write_reg(priv, TN40_REG_RST_PORT, 1);
1330 	/* 9. Zero all read and write pointers */
1331 	for (i = TN40_REG_TXD_WPTR_0; i <= TN40_REG_TXF_RPTR_3; i += 0x10)
1332 		tn40_write_reg(priv, i, 0);
1333 	/* 10. Unset port disable */
1334 	tn40_write_reg(priv, TN40_REG_DIS_PORT, 0);
1335 	/* 11. Unset queue disable */
1336 	tn40_write_reg(priv, TN40_REG_DIS_QU, 0);
1337 	/* 12. Unset queue reset */
1338 	tn40_write_reg(priv, TN40_REG_RST_QU, 0);
1339 	/* 13. Unset port reset */
1340 	tn40_write_reg(priv, TN40_REG_RST_PORT, 0);
1341 	/* 14. Enable Rx */
1342 	/* Skipped. will be done later */
1343 }
1344 
tn40_start(struct tn40_priv * priv)1345 static int tn40_start(struct tn40_priv *priv)
1346 {
1347 	int ret;
1348 
1349 	ret = tn40_create_tx_ring(priv);
1350 	if (ret) {
1351 		netdev_err(priv->ndev, "failed to tx init %d\n", ret);
1352 		return ret;
1353 	}
1354 
1355 	ret = tn40_create_rx_ring(priv);
1356 	if (ret) {
1357 		netdev_err(priv->ndev, "failed to rx init %d\n", ret);
1358 		goto err_tx_ring;
1359 	}
1360 
1361 	tn40_rx_alloc_buffers(priv);
1362 	if (tn40_rxdb_available(priv->rxdb0) != 1) {
1363 		ret = -ENOMEM;
1364 		netdev_err(priv->ndev, "failed to allocate rx buffers\n");
1365 		goto err_rx_ring;
1366 	}
1367 
1368 	ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED,
1369 			  priv->ndev->name, priv->ndev);
1370 	if (ret) {
1371 		netdev_err(priv->ndev, "failed to request irq %d\n", ret);
1372 		goto err_rx_ring;
1373 	}
1374 
1375 	tn40_hw_start(priv);
1376 	return 0;
1377 err_rx_ring:
1378 	tn40_destroy_rx_ring(priv);
1379 err_tx_ring:
1380 	tn40_destroy_tx_ring(priv);
1381 	return ret;
1382 }
1383 
tn40_stop(struct tn40_priv * priv)1384 static void tn40_stop(struct tn40_priv *priv)
1385 {
1386 	tn40_disable_interrupts(priv);
1387 	free_irq(priv->pdev->irq, priv->ndev);
1388 	tn40_sw_reset(priv);
1389 	tn40_destroy_tx_ring(priv);
1390 	tn40_destroy_rx_ring(priv);
1391 }
1392 
tn40_close(struct net_device * ndev)1393 static int tn40_close(struct net_device *ndev)
1394 {
1395 	struct tn40_priv *priv = netdev_priv(ndev);
1396 
1397 	phylink_stop(priv->phylink);
1398 	phylink_disconnect_phy(priv->phylink);
1399 
1400 	napi_disable(&priv->napi);
1401 	netif_napi_del(&priv->napi);
1402 	tn40_stop(priv);
1403 	return 0;
1404 }
1405 
tn40_open(struct net_device * dev)1406 static int tn40_open(struct net_device *dev)
1407 {
1408 	struct tn40_priv *priv = netdev_priv(dev);
1409 	int ret;
1410 
1411 	ret = phylink_connect_phy(priv->phylink, priv->phydev);
1412 	if (ret) {
1413 		netdev_err(dev, "failed to connect to phy %d\n", ret);
1414 		return ret;
1415 	}
1416 	tn40_sw_reset(priv);
1417 	ret = tn40_start(priv);
1418 	if (ret) {
1419 		phylink_disconnect_phy(priv->phylink);
1420 		netdev_err(dev, "failed to start %d\n", ret);
1421 		return ret;
1422 	}
1423 	napi_enable(&priv->napi);
1424 	phylink_start(priv->phylink);
1425 	netif_start_queue(priv->ndev);
1426 	return 0;
1427 }
1428 
__tn40_vlan_rx_vid(struct net_device * ndev,uint16_t vid,int enable)1429 static void __tn40_vlan_rx_vid(struct net_device *ndev, uint16_t vid,
1430 			       int enable)
1431 {
1432 	struct tn40_priv *priv = netdev_priv(ndev);
1433 	u32 reg, bit, val;
1434 
1435 	netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable);
1436 	reg = TN40_REG_VLAN_0 + (vid / 32) * 4;
1437 	bit = 1 << vid % 32;
1438 	val = tn40_read_reg(priv, reg);
1439 	netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit);
1440 	if (enable)
1441 		val |= bit;
1442 	else
1443 		val &= ~bit;
1444 	netdev_dbg(priv->ndev, "new val %x\n", val);
1445 	tn40_write_reg(priv, reg, val);
1446 }
1447 
tn40_vlan_rx_add_vid(struct net_device * ndev,__always_unused __be16 proto,u16 vid)1448 static int tn40_vlan_rx_add_vid(struct net_device *ndev,
1449 				__always_unused __be16 proto, u16 vid)
1450 {
1451 	__tn40_vlan_rx_vid(ndev, vid, 1);
1452 	return 0;
1453 }
1454 
tn40_vlan_rx_kill_vid(struct net_device * ndev,__always_unused __be16 proto,u16 vid)1455 static int tn40_vlan_rx_kill_vid(struct net_device *ndev,
1456 				 __always_unused __be16 proto, u16 vid)
1457 {
1458 	__tn40_vlan_rx_vid(ndev, vid, 0);
1459 	return 0;
1460 }
1461 
tn40_setmulti(struct net_device * ndev)1462 static void tn40_setmulti(struct net_device *ndev)
1463 {
1464 	u32 rxf_val = TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB |
1465 		TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC;
1466 	struct tn40_priv *priv = netdev_priv(ndev);
1467 	int i;
1468 
1469 	/* IMF - imperfect (hash) rx multicast filter */
1470 	/* PMF - perfect rx multicast filter */
1471 
1472 	/* FIXME: RXE(OFF) */
1473 	if (ndev->flags & IFF_PROMISC) {
1474 		rxf_val |= TN40_GMAC_RX_FILTER_PRM;
1475 	} else if (ndev->flags & IFF_ALLMULTI) {
1476 		/* set IMF to accept all multicast frames */
1477 		for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
1478 			tn40_write_reg(priv,
1479 				       TN40_REG_RX_MCST_HASH0 + i * 4, ~0);
1480 	} else if (netdev_mc_count(ndev)) {
1481 		struct netdev_hw_addr *mclist;
1482 		u32 reg, val;
1483 		u8 hash;
1484 
1485 		/* Set IMF to deny all multicast frames */
1486 		for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
1487 			tn40_write_reg(priv,
1488 				       TN40_REG_RX_MCST_HASH0 + i * 4, 0);
1489 
1490 		/* Set PMF to deny all multicast frames */
1491 		for (i = 0; i < TN40_MAC_MCST_NUM; i++) {
1492 			tn40_write_reg(priv,
1493 				       TN40_REG_RX_MAC_MCST0 + i * 8, 0);
1494 			tn40_write_reg(priv,
1495 				       TN40_REG_RX_MAC_MCST1 + i * 8, 0);
1496 		}
1497 		/* Use PMF to accept first MAC_MCST_NUM (15) addresses */
1498 
1499 		/* TBD: Sort the addresses and write them in ascending
1500 		 * order into RX_MAC_MCST regs. we skip this phase now
1501 		 * and accept ALL multicast frames through IMF. Accept
1502 		 * the rest of addresses throw IMF.
1503 		 */
1504 		netdev_for_each_mc_addr(mclist, ndev) {
1505 			hash = 0;
1506 			for (i = 0; i < ETH_ALEN; i++)
1507 				hash ^= mclist->addr[i];
1508 
1509 			reg = TN40_REG_RX_MCST_HASH0 + ((hash >> 5) << 2);
1510 			val = tn40_read_reg(priv, reg);
1511 			val |= (1 << (hash % 32));
1512 			tn40_write_reg(priv, reg, val);
1513 		}
1514 	} else {
1515 		rxf_val |= TN40_GMAC_RX_FILTER_AB;
1516 	}
1517 	tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, rxf_val);
1518 	/* Enable RX */
1519 	/* FIXME: RXE(ON) */
1520 }
1521 
tn40_set_mac(struct net_device * ndev,void * p)1522 static int tn40_set_mac(struct net_device *ndev, void *p)
1523 {
1524 	struct tn40_priv *priv = netdev_priv(ndev);
1525 	struct sockaddr *addr = p;
1526 
1527 	eth_hw_addr_set(ndev, addr->sa_data);
1528 	tn40_restore_mac(ndev, priv);
1529 	return 0;
1530 }
1531 
tn40_mac_init(struct tn40_priv * priv)1532 static void tn40_mac_init(struct tn40_priv *priv)
1533 {
1534 	u8 addr[ETH_ALEN];
1535 	u64 val;
1536 
1537 	val = (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC0_A);
1538 	val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC1_A) << 16;
1539 	val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC2_A) << 32;
1540 
1541 	u64_to_ether_addr(val, addr);
1542 	eth_hw_addr_set(priv->ndev, addr);
1543 }
1544 
tn40_get_stats(struct net_device * ndev,struct rtnl_link_stats64 * stats)1545 static void tn40_get_stats(struct net_device *ndev,
1546 			   struct rtnl_link_stats64 *stats)
1547 {
1548 	struct tn40_priv *priv = netdev_priv(ndev);
1549 	unsigned int start;
1550 
1551 	do {
1552 		start = u64_stats_fetch_begin(&priv->syncp);
1553 		stats->tx_packets = priv->stats.tx_packets;
1554 		stats->tx_bytes = priv->stats.tx_bytes;
1555 		stats->tx_dropped = priv->stats.tx_dropped;
1556 
1557 		stats->rx_packets = priv->stats.rx_packets;
1558 		stats->rx_bytes = priv->stats.rx_bytes;
1559 		stats->rx_dropped = priv->stats.rx_dropped;
1560 		stats->rx_errors = priv->stats.rx_errors;
1561 	} while (u64_stats_fetch_retry(&priv->syncp, start));
1562 }
1563 
1564 static const struct net_device_ops tn40_netdev_ops = {
1565 	.ndo_open = tn40_open,
1566 	.ndo_stop = tn40_close,
1567 	.ndo_start_xmit = tn40_start_xmit,
1568 	.ndo_validate_addr = eth_validate_addr,
1569 	.ndo_set_rx_mode = tn40_setmulti,
1570 	.ndo_get_stats64 = tn40_get_stats,
1571 	.ndo_set_mac_address = tn40_set_mac,
1572 	.ndo_vlan_rx_add_vid = tn40_vlan_rx_add_vid,
1573 	.ndo_vlan_rx_kill_vid = tn40_vlan_rx_kill_vid,
1574 };
1575 
tn40_ethtool_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)1576 static int tn40_ethtool_get_link_ksettings(struct net_device *ndev,
1577 					   struct ethtool_link_ksettings *cmd)
1578 {
1579 	struct tn40_priv *priv = netdev_priv(ndev);
1580 
1581 	return phylink_ethtool_ksettings_get(priv->phylink, cmd);
1582 }
1583 
1584 static const struct ethtool_ops tn40_ethtool_ops = {
1585 	.get_link = ethtool_op_get_link,
1586 	.get_link_ksettings = tn40_ethtool_get_link_ksettings,
1587 };
1588 
tn40_get_queue_stats_rx(struct net_device * ndev,int idx,struct netdev_queue_stats_rx * stats)1589 static void tn40_get_queue_stats_rx(struct net_device *ndev, int idx,
1590 				    struct netdev_queue_stats_rx *stats)
1591 {
1592 	struct tn40_priv *priv = netdev_priv(ndev);
1593 	unsigned int start;
1594 
1595 	do {
1596 		start = u64_stats_fetch_begin(&priv->syncp);
1597 
1598 		stats->packets = priv->stats.rx_packets;
1599 		stats->bytes = priv->stats.rx_bytes;
1600 		stats->alloc_fail = priv->alloc_fail;
1601 	} while (u64_stats_fetch_retry(&priv->syncp, start));
1602 }
1603 
tn40_get_queue_stats_tx(struct net_device * ndev,int idx,struct netdev_queue_stats_tx * stats)1604 static void tn40_get_queue_stats_tx(struct net_device *ndev, int idx,
1605 				    struct netdev_queue_stats_tx *stats)
1606 {
1607 	struct tn40_priv *priv = netdev_priv(ndev);
1608 	unsigned int start;
1609 
1610 	do {
1611 		start = u64_stats_fetch_begin(&priv->syncp);
1612 
1613 		stats->packets = priv->stats.tx_packets;
1614 		stats->bytes = priv->stats.tx_bytes;
1615 	} while (u64_stats_fetch_retry(&priv->syncp, start));
1616 }
1617 
tn40_get_base_stats(struct net_device * ndev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)1618 static void tn40_get_base_stats(struct net_device *ndev,
1619 				struct netdev_queue_stats_rx *rx,
1620 				struct netdev_queue_stats_tx *tx)
1621 {
1622 	rx->packets = 0;
1623 	rx->bytes = 0;
1624 	rx->alloc_fail = 0;
1625 
1626 	tx->packets = 0;
1627 	tx->bytes = 0;
1628 }
1629 
1630 static const struct netdev_stat_ops tn40_stat_ops = {
1631 	.get_queue_stats_rx = tn40_get_queue_stats_rx,
1632 	.get_queue_stats_tx = tn40_get_queue_stats_tx,
1633 	.get_base_stats = tn40_get_base_stats,
1634 };
1635 
tn40_priv_init(struct tn40_priv * priv)1636 static int tn40_priv_init(struct tn40_priv *priv)
1637 {
1638 	int ret;
1639 
1640 	tn40_set_link_speed(priv, 0);
1641 
1642 	/* Set GPIO[9:0] to output 0 */
1643 	tn40_write_reg(priv, 0x51E0, 0x30010006);	/* GPIO_OE_ WR CMD */
1644 	tn40_write_reg(priv, 0x51F0, 0x0);	/* GPIO_OE_ DATA */
1645 	tn40_write_reg(priv, TN40_REG_MDIO_CMD_STAT, 0x3ec8);
1646 
1647 	/* we use tx descriptors to load a firmware. */
1648 	ret = tn40_create_tx_ring(priv);
1649 	if (ret)
1650 		return ret;
1651 	ret = tn40_fw_load(priv);
1652 	tn40_destroy_tx_ring(priv);
1653 	return ret;
1654 }
1655 
tn40_netdev_alloc(struct pci_dev * pdev)1656 static struct net_device *tn40_netdev_alloc(struct pci_dev *pdev)
1657 {
1658 	struct net_device *ndev;
1659 
1660 	ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv));
1661 	if (!ndev)
1662 		return NULL;
1663 	ndev->netdev_ops = &tn40_netdev_ops;
1664 	ndev->ethtool_ops = &tn40_ethtool_ops;
1665 	ndev->stat_ops = &tn40_stat_ops;
1666 	ndev->tx_queue_len = TN40_NDEV_TXQ_LEN;
1667 	ndev->mem_start = pci_resource_start(pdev, 0);
1668 	ndev->mem_end = pci_resource_end(pdev, 0);
1669 	ndev->min_mtu = ETH_ZLEN;
1670 	ndev->max_mtu = TN40_MAX_MTU;
1671 
1672 	ndev->features = NETIF_F_IP_CSUM |
1673 		NETIF_F_SG |
1674 		NETIF_F_FRAGLIST |
1675 		NETIF_F_TSO | NETIF_F_GRO |
1676 		NETIF_F_RXCSUM |
1677 		NETIF_F_RXHASH |
1678 		NETIF_F_HW_VLAN_CTAG_TX |
1679 		NETIF_F_HW_VLAN_CTAG_RX |
1680 		NETIF_F_HW_VLAN_CTAG_FILTER;
1681 	ndev->vlan_features = NETIF_F_IP_CSUM |
1682 			       NETIF_F_SG |
1683 			       NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH;
1684 
1685 	if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) {
1686 		ndev->features |= NETIF_F_HIGHDMA;
1687 		ndev->vlan_features |= NETIF_F_HIGHDMA;
1688 	}
1689 	ndev->hw_features |= ndev->features;
1690 
1691 	SET_NETDEV_DEV(ndev, &pdev->dev);
1692 	netif_stop_queue(ndev);
1693 	return ndev;
1694 }
1695 
tn40_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1696 static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1697 {
1698 	struct net_device *ndev;
1699 	struct tn40_priv *priv;
1700 	unsigned int nvec = 1;
1701 	void __iomem *regs;
1702 	int ret;
1703 
1704 	ret = pci_enable_device(pdev);
1705 	if (ret)
1706 		return ret;
1707 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1708 	if (ret) {
1709 		dev_err(&pdev->dev, "failed to set DMA mask.\n");
1710 		goto err_disable_device;
1711 	}
1712 
1713 	ret = pci_request_regions(pdev, TN40_DRV_NAME);
1714 	if (ret) {
1715 		dev_err(&pdev->dev, "failed to request PCI regions.\n");
1716 		goto err_disable_device;
1717 	}
1718 
1719 	pci_set_master(pdev);
1720 
1721 	regs = pci_iomap(pdev, 0, TN40_REGS_SIZE);
1722 	if (!regs) {
1723 		ret = -EIO;
1724 		dev_err(&pdev->dev, "failed to map PCI bar.\n");
1725 		goto err_free_regions;
1726 	}
1727 
1728 	ndev = tn40_netdev_alloc(pdev);
1729 	if (!ndev) {
1730 		ret = -ENOMEM;
1731 		dev_err(&pdev->dev, "failed to allocate netdev.\n");
1732 		goto err_iounmap;
1733 	}
1734 
1735 	priv = netdev_priv(ndev);
1736 	pci_set_drvdata(pdev, priv);
1737 	netif_napi_add(ndev, &priv->napi, tn40_poll);
1738 
1739 	priv->regs = regs;
1740 	priv->pdev = pdev;
1741 	priv->ndev = ndev;
1742 	/* Initialize fifo sizes. */
1743 	priv->txd_size = 3;
1744 	priv->txf_size = 3;
1745 	priv->rxd_size = 3;
1746 	priv->rxf_size = 3;
1747 	/* Initialize the initial coalescing registers. */
1748 	priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12);
1749 	priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12);
1750 
1751 	ret = tn40_hw_reset(priv);
1752 	if (ret) {
1753 		dev_err(&pdev->dev, "failed to reset HW.\n");
1754 		goto err_unset_drvdata;
1755 	}
1756 
1757 	ret = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI);
1758 	if (ret < 0) {
1759 		dev_err(&pdev->dev, "failed to allocate irq.\n");
1760 		goto err_unset_drvdata;
1761 	}
1762 
1763 	ret = tn40_mdiobus_init(priv);
1764 	if (ret) {
1765 		dev_err(&pdev->dev, "failed to initialize mdio bus.\n");
1766 		goto err_free_irq;
1767 	}
1768 
1769 	priv->stats_flag =
1770 		((tn40_read_reg(priv, TN40_FPGA_VER) & 0xFFF) != 308);
1771 	u64_stats_init(&priv->syncp);
1772 
1773 	priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE |
1774 		TN40_IR_TMR0 | TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 |
1775 		TN40_IR_TMR1;
1776 
1777 	tn40_mac_init(priv);
1778 	ret = tn40_phy_register(priv);
1779 	if (ret) {
1780 		dev_err(&pdev->dev, "failed to set up PHY.\n");
1781 		goto err_free_irq;
1782 	}
1783 
1784 	ret = tn40_priv_init(priv);
1785 	if (ret) {
1786 		dev_err(&pdev->dev, "failed to initialize tn40_priv.\n");
1787 		goto err_unregister_phydev;
1788 	}
1789 
1790 	ret = register_netdev(ndev);
1791 	if (ret) {
1792 		dev_err(&pdev->dev, "failed to register netdev.\n");
1793 		goto err_unregister_phydev;
1794 	}
1795 	return 0;
1796 err_unregister_phydev:
1797 	tn40_phy_unregister(priv);
1798 err_free_irq:
1799 	pci_free_irq_vectors(pdev);
1800 err_unset_drvdata:
1801 	pci_set_drvdata(pdev, NULL);
1802 err_iounmap:
1803 	iounmap(regs);
1804 err_free_regions:
1805 	pci_release_regions(pdev);
1806 err_disable_device:
1807 	pci_disable_device(pdev);
1808 	return ret;
1809 }
1810 
tn40_remove(struct pci_dev * pdev)1811 static void tn40_remove(struct pci_dev *pdev)
1812 {
1813 	struct tn40_priv *priv = pci_get_drvdata(pdev);
1814 	struct net_device *ndev = priv->ndev;
1815 
1816 	unregister_netdev(ndev);
1817 
1818 	tn40_phy_unregister(priv);
1819 	pci_free_irq_vectors(priv->pdev);
1820 	pci_set_drvdata(pdev, NULL);
1821 	iounmap(priv->regs);
1822 	pci_release_regions(pdev);
1823 	pci_disable_device(pdev);
1824 }
1825 
1826 static const struct pci_device_id tn40_id_table[] = {
1827 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
1828 			 PCI_VENDOR_ID_TEHUTI, 0x3015) },
1829 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
1830 			 PCI_VENDOR_ID_DLINK, 0x4d00) },
1831 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
1832 			 PCI_VENDOR_ID_ASUSTEK, 0x8709) },
1833 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
1834 			 PCI_VENDOR_ID_EDIMAX, 0x8103) },
1835 	{ }
1836 };
1837 
1838 static struct pci_driver tn40_driver = {
1839 	.name = TN40_DRV_NAME,
1840 	.id_table = tn40_id_table,
1841 	.probe = tn40_probe,
1842 	.remove = tn40_remove,
1843 };
1844 
1845 module_pci_driver(tn40_driver);
1846 
1847 MODULE_DEVICE_TABLE(pci, tn40_id_table);
1848 MODULE_LICENSE("GPL");
1849 MODULE_FIRMWARE(TN40_FIRMWARE_NAME);
1850 MODULE_DESCRIPTION("Tehuti Network TN40xx Driver");
1851