xref: /illumos-gate/usr/src/uts/common/io/xge/drv/xgell.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *  Copyright (c) 2002-2009 Neterion, Inc.
29  *  All right Reserved.
30  *
31  *  FileName :    xgell.c
32  *
33  *  Description:  Xge Link Layer data path implementation
34  *
35  */
36 
37 #include "xgell.h"
38 
39 #include <netinet/ip.h>
40 #include <netinet/tcp.h>
41 #include <netinet/udp.h>
42 
43 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
44     sizeof (struct ether_vlan_header))
45 
46 #define	HEADROOM		2	/* for DIX-only packets */
47 
header_free_func(void * arg)48 void header_free_func(void *arg) { }
49 frtn_t header_frtn = {header_free_func, NULL};
50 
51 /* DMA attributes used for Tx side */
52 static struct ddi_dma_attr tx_dma_attr = {
53 	DMA_ATTR_V0,			/* dma_attr_version */
54 	0x0ULL,				/* dma_attr_addr_lo */
55 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
56 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
57 #if defined(__sparc)
58 	0x2000,				/* dma_attr_align */
59 #else
60 	0x1000,				/* dma_attr_align */
61 #endif
62 	0xFC00FC,			/* dma_attr_burstsizes */
63 	0x1,				/* dma_attr_minxfer */
64 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
65 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
66 	18,				/* dma_attr_sgllen */
67 	(unsigned int)1,		/* dma_attr_granular */
68 	0				/* dma_attr_flags */
69 };
70 
71 /*
72  * DMA attributes used when using ddi_dma_mem_alloc to
73  * allocat HAL descriptors and Rx buffers during replenish
74  */
75 static struct ddi_dma_attr hal_dma_attr = {
76 	DMA_ATTR_V0,			/* dma_attr_version */
77 	0x0ULL,				/* dma_attr_addr_lo */
78 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
79 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
80 #if defined(__sparc)
81 	0x2000,				/* dma_attr_align */
82 #else
83 	0x1000,				/* dma_attr_align */
84 #endif
85 	0xFC00FC,			/* dma_attr_burstsizes */
86 	0x1,				/* dma_attr_minxfer */
87 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
88 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
89 	1,				/* dma_attr_sgllen */
90 	(unsigned int)1,		/* dma_attr_sgllen */
91 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
92 };
93 
94 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
95 
96 static int		xgell_m_stat(void *, uint_t, uint64_t *);
97 static int		xgell_m_start(void *);
98 static void		xgell_m_stop(void *);
99 static int		xgell_m_promisc(void *, boolean_t);
100 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
101 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
102 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
103 
104 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
105 
106 static mac_callbacks_t xgell_m_callbacks = {
107 	XGELL_M_CALLBACK_FLAGS,
108 	xgell_m_stat,
109 	xgell_m_start,
110 	xgell_m_stop,
111 	xgell_m_promisc,
112 	xgell_m_multicst,
113 	NULL,
114 	NULL,
115 	NULL,
116 	xgell_m_ioctl,
117 	xgell_m_getcapab
118 };
119 
120 /*
121  * xge_device_poll
122  *
123  * Timeout should call me every 1s. xge_callback_event_queued should call me
124  * when HAL hope event was rescheduled.
125  */
126 /*ARGSUSED*/
127 void
xge_device_poll(void * data)128 xge_device_poll(void *data)
129 {
130 	xgelldev_t *lldev = xge_hal_device_private(data);
131 
132 	mutex_enter(&lldev->genlock);
133 	if (lldev->is_initialized) {
134 		xge_hal_device_poll(data);
135 		lldev->timeout_id = timeout(xge_device_poll, data,
136 		    XGE_DEV_POLL_TICKS);
137 	} else if (lldev->in_reset == 1) {
138 		lldev->timeout_id = timeout(xge_device_poll, data,
139 		    XGE_DEV_POLL_TICKS);
140 	} else {
141 		lldev->timeout_id = 0;
142 	}
143 	mutex_exit(&lldev->genlock);
144 }
145 
146 /*
147  * xge_device_poll_now
148  *
149  * Will call xge_device_poll() immediately
150  */
151 void
xge_device_poll_now(void * data)152 xge_device_poll_now(void *data)
153 {
154 	xgelldev_t *lldev = xge_hal_device_private(data);
155 
156 	mutex_enter(&lldev->genlock);
157 	if (lldev->is_initialized) {
158 		xge_hal_device_poll(data);
159 	}
160 	mutex_exit(&lldev->genlock);
161 }
162 
163 /*
164  * xgell_callback_link_up
165  *
166  * This function called by HAL to notify HW link up state change.
167  */
168 void
xgell_callback_link_up(void * userdata)169 xgell_callback_link_up(void *userdata)
170 {
171 	xgelldev_t *lldev = (xgelldev_t *)userdata;
172 
173 	mac_link_update(lldev->mh, LINK_STATE_UP);
174 }
175 
176 /*
177  * xgell_callback_link_down
178  *
179  * This function called by HAL to notify HW link down state change.
180  */
181 void
xgell_callback_link_down(void * userdata)182 xgell_callback_link_down(void *userdata)
183 {
184 	xgelldev_t *lldev = (xgelldev_t *)userdata;
185 
186 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
187 }
188 
189 /*
190  * xgell_rx_buffer_replenish_all
191  *
192  * To replenish all freed dtr(s) with buffers in free pool. It's called by
193  * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
194  * Must be called with pool_lock held.
195  */
196 static void
xgell_rx_buffer_replenish_all(xgell_rx_ring_t * ring)197 xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
198 {
199 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
200 	xge_hal_dtr_h dtr;
201 	xgell_rx_buffer_t *rx_buffer;
202 	xgell_rxd_priv_t *rxd_priv;
203 
204 	xge_assert(mutex_owned(&bf_pool->pool_lock));
205 
206 	while ((bf_pool->free > 0) &&
207 	    (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
208 		xge_assert(bf_pool->head);
209 
210 		rx_buffer = bf_pool->head;
211 
212 		bf_pool->head = rx_buffer->next;
213 		bf_pool->free--;
214 
215 		xge_assert(rx_buffer->dma_addr);
216 
217 		rxd_priv = (xgell_rxd_priv_t *)
218 		    xge_hal_ring_dtr_private(ring->channelh, dtr);
219 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
220 		    bf_pool->size);
221 
222 		rxd_priv->rx_buffer = rx_buffer;
223 		xge_hal_ring_dtr_post(ring->channelh, dtr);
224 	}
225 }
226 
227 /*
228  * xgell_rx_buffer_release
229  *
230  * The only thing done here is to put the buffer back to the pool.
231  * Calling this function need be protected by mutex, bf_pool.pool_lock.
232  */
233 static void
xgell_rx_buffer_release(xgell_rx_buffer_t * rx_buffer)234 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
235 {
236 	xgell_rx_ring_t *ring = rx_buffer->ring;
237 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
238 
239 	xge_assert(mutex_owned(&bf_pool->pool_lock));
240 
241 	/* Put the buffer back to pool */
242 	rx_buffer->next = bf_pool->head;
243 	bf_pool->head = rx_buffer;
244 
245 	bf_pool->free++;
246 }
247 
248 /*
249  * xgell_rx_buffer_recycle
250  *
251  * Called by desballoc() to "free" the resource.
252  * We will try to replenish all descripters.
253  */
254 
255 /*
256  * Previously there were much lock contention between xgell_rx_1b_compl() and
257  * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
258  * effect on rx performance. A separate recycle list is introduced to overcome
259  * this. The recycle list is used to record the rx buffer that has been recycled
260  * and these buffers will be retuned back to the free list in bulk instead of
261  * one-by-one.
262  */
263 
264 static void
xgell_rx_buffer_recycle(char * arg)265 xgell_rx_buffer_recycle(char *arg)
266 {
267 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
268 	xgell_rx_ring_t *ring = rx_buffer->ring;
269 	xgelldev_t *lldev = ring->lldev;
270 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
271 
272 	mutex_enter(&bf_pool->recycle_lock);
273 
274 	rx_buffer->next = bf_pool->recycle_head;
275 	bf_pool->recycle_head = rx_buffer;
276 	if (bf_pool->recycle_tail == NULL)
277 		bf_pool->recycle_tail = rx_buffer;
278 	bf_pool->recycle++;
279 
280 	/*
281 	 * Before finding a good way to set this hiwat, just always call to
282 	 * replenish_all. *TODO*
283 	 */
284 	if ((lldev->is_initialized != 0) && (ring->live) &&
285 	    (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
286 		mutex_enter(&bf_pool->pool_lock);
287 		bf_pool->recycle_tail->next = bf_pool->head;
288 		bf_pool->head = bf_pool->recycle_head;
289 		bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
290 		bf_pool->post -= bf_pool->recycle;
291 		bf_pool->free += bf_pool->recycle;
292 		bf_pool->recycle = 0;
293 		xgell_rx_buffer_replenish_all(ring);
294 		mutex_exit(&bf_pool->pool_lock);
295 	}
296 
297 	mutex_exit(&bf_pool->recycle_lock);
298 }
299 
300 /*
301  * xgell_rx_buffer_alloc
302  *
303  * Allocate one rx buffer and return with the pointer to the buffer.
304  * Return NULL if failed.
305  */
306 static xgell_rx_buffer_t *
xgell_rx_buffer_alloc(xgell_rx_ring_t * ring)307 xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
308 {
309 	xgelldev_t *lldev = ring->lldev;
310 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
311 	xge_hal_device_t *hldev;
312 	void *vaddr;
313 	ddi_dma_handle_t dma_handle;
314 	ddi_acc_handle_t dma_acch;
315 	dma_addr_t dma_addr;
316 	uint_t ncookies;
317 	ddi_dma_cookie_t dma_cookie;
318 	size_t real_size;
319 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
320 	xgell_rx_buffer_t *rx_buffer;
321 
322 	hldev = (xge_hal_device_t *)lldev->devh;
323 
324 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
325 	    0, &dma_handle) != DDI_SUCCESS) {
326 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
327 		    XGELL_IFNAME, lldev->instance);
328 		goto handle_failed;
329 	}
330 
331 	/* reserve some space at the end of the buffer for recycling */
332 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
333 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
334 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
335 	    DDI_SUCCESS) {
336 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
337 		    XGELL_IFNAME, lldev->instance);
338 		goto mem_failed;
339 	}
340 
341 	if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
342 	    real_size) {
343 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
344 		    XGELL_IFNAME, lldev->instance);
345 		goto bind_failed;
346 	}
347 
348 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
349 	    bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
350 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
351 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
352 		    XGELL_IFNAME, lldev->instance);
353 		goto bind_failed;
354 	}
355 
356 	if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
357 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
358 		    XGELL_IFNAME, lldev->instance);
359 		goto check_failed;
360 	}
361 
362 	dma_addr = dma_cookie.dmac_laddress;
363 
364 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
365 	    sizeof (xgell_rx_buffer_t));
366 	rx_buffer->next = NULL;
367 	rx_buffer->vaddr = vaddr;
368 	rx_buffer->dma_addr = dma_addr;
369 	rx_buffer->dma_handle = dma_handle;
370 	rx_buffer->dma_acch = dma_acch;
371 	rx_buffer->ring = ring;
372 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
373 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
374 
375 	return (rx_buffer);
376 
377 check_failed:
378 	(void) ddi_dma_unbind_handle(dma_handle);
379 bind_failed:
380 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
381 	ddi_dma_mem_free(&dma_acch);
382 mem_failed:
383 	ddi_dma_free_handle(&dma_handle);
384 handle_failed:
385 
386 	return (NULL);
387 }
388 
389 /*
390  * xgell_rx_destroy_buffer_pool
391  *
392  * Destroy buffer pool. If there is still any buffer hold by upper layer,
393  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
394  */
395 static boolean_t
xgell_rx_destroy_buffer_pool(xgell_rx_ring_t * ring)396 xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
397 {
398 	xgelldev_t *lldev = ring->lldev;
399 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
400 	xgell_rx_buffer_t *rx_buffer;
401 	ddi_dma_handle_t  dma_handle;
402 	ddi_acc_handle_t  dma_acch;
403 	int i;
404 
405 	/*
406 	 * If the pool has been destroied, just return B_TRUE
407 	 */
408 	if (!bf_pool->live)
409 		return (B_TRUE);
410 
411 	mutex_enter(&bf_pool->recycle_lock);
412 	if (bf_pool->recycle > 0) {
413 		mutex_enter(&bf_pool->pool_lock);
414 		bf_pool->recycle_tail->next = bf_pool->head;
415 		bf_pool->head = bf_pool->recycle_head;
416 		bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
417 		bf_pool->post -= bf_pool->recycle;
418 		bf_pool->free += bf_pool->recycle;
419 		bf_pool->recycle = 0;
420 		mutex_exit(&bf_pool->pool_lock);
421 	}
422 	mutex_exit(&bf_pool->recycle_lock);
423 
424 	/*
425 	 * If there is any posted buffer, the driver should reject to be
426 	 * detached. Need notice upper layer to release them.
427 	 */
428 	if (bf_pool->post != 0) {
429 		xge_debug_ll(XGE_ERR,
430 		    "%s%d has some buffers not be recycled, try later!",
431 		    XGELL_IFNAME, lldev->instance);
432 		return (B_FALSE);
433 	}
434 
435 	/*
436 	 * Release buffers one by one.
437 	 */
438 	for (i = bf_pool->total; i > 0; i--) {
439 		rx_buffer = bf_pool->head;
440 		xge_assert(rx_buffer != NULL);
441 
442 		bf_pool->head = rx_buffer->next;
443 
444 		dma_handle = rx_buffer->dma_handle;
445 		dma_acch = rx_buffer->dma_acch;
446 
447 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
448 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
449 			bf_pool->head = rx_buffer;
450 			return (B_FALSE);
451 		}
452 		ddi_dma_mem_free(&dma_acch);
453 		ddi_dma_free_handle(&dma_handle);
454 
455 		bf_pool->total--;
456 		bf_pool->free--;
457 	}
458 
459 	xge_assert(!mutex_owned(&bf_pool->pool_lock));
460 
461 	mutex_destroy(&bf_pool->recycle_lock);
462 	mutex_destroy(&bf_pool->pool_lock);
463 	bf_pool->live = B_FALSE;
464 
465 	return (B_TRUE);
466 }
467 
468 /*
469  * xgell_rx_create_buffer_pool
470  *
471  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
472  */
473 static boolean_t
xgell_rx_create_buffer_pool(xgell_rx_ring_t * ring)474 xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
475 {
476 	xgelldev_t *lldev = ring->lldev;
477 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
478 	xge_hal_device_t *hldev;
479 	xgell_rx_buffer_t *rx_buffer;
480 	int i;
481 
482 	if (bf_pool->live)
483 		return (B_TRUE);
484 
485 	hldev = (xge_hal_device_t *)lldev->devh;
486 
487 	bf_pool->total = 0;
488 	bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
489 	bf_pool->head = NULL;
490 	bf_pool->free = 0;
491 	bf_pool->post = 0;
492 	bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
493 	bf_pool->recycle = 0;
494 	bf_pool->recycle_head = NULL;
495 	bf_pool->recycle_tail = NULL;
496 	bf_pool->live = B_TRUE;
497 
498 	mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
499 	    DDI_INTR_PRI(hldev->irqh));
500 	mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
501 	    DDI_INTR_PRI(hldev->irqh));
502 
503 	/*
504 	 * Allocate buffers one by one. If failed, destroy whole pool by
505 	 * call to xgell_rx_destroy_buffer_pool().
506 	 */
507 
508 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
509 		if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
510 			(void) xgell_rx_destroy_buffer_pool(ring);
511 			return (B_FALSE);
512 		}
513 
514 		rx_buffer->next = bf_pool->head;
515 		bf_pool->head = rx_buffer;
516 
517 		bf_pool->total++;
518 		bf_pool->free++;
519 	}
520 
521 	return (B_TRUE);
522 }
523 
524 /*
525  * xgell_rx_dtr_replenish
526  *
527  * Replenish descriptor with rx_buffer in RX buffer pool.
528  * The dtr should be post right away.
529  */
530 xge_hal_status_e
xgell_rx_dtr_replenish(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,int index,void * userdata,xge_hal_channel_reopen_e reopen)531 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
532     void *userdata, xge_hal_channel_reopen_e reopen)
533 {
534 	xgell_rx_ring_t *ring = userdata;
535 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
536 	xgell_rx_buffer_t *rx_buffer;
537 	xgell_rxd_priv_t *rxd_priv;
538 
539 	mutex_enter(&bf_pool->pool_lock);
540 	if (bf_pool->head == NULL) {
541 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
542 		return (XGE_HAL_FAIL);
543 	}
544 	rx_buffer = bf_pool->head;
545 	xge_assert(rx_buffer);
546 	xge_assert(rx_buffer->dma_addr);
547 
548 	bf_pool->head = rx_buffer->next;
549 	bf_pool->free--;
550 	mutex_exit(&bf_pool->pool_lock);
551 
552 	rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
553 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
554 
555 	rxd_priv->rx_buffer = rx_buffer;
556 
557 	return (XGE_HAL_OK);
558 }
559 
560 /*
561  * xgell_get_ip_offset
562  *
563  * Calculate the offset to IP header.
564  */
565 static inline int
xgell_get_ip_offset(xge_hal_dtr_info_t * ext_info)566 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
567 {
568 	int ip_off;
569 
570 	/* get IP-header offset */
571 	switch (ext_info->frame) {
572 	case XGE_HAL_FRAME_TYPE_DIX:
573 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
574 		break;
575 	case XGE_HAL_FRAME_TYPE_IPX:
576 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
577 		    XGE_HAL_HEADER_802_2_SIZE +
578 		    XGE_HAL_HEADER_SNAP_SIZE);
579 		break;
580 	case XGE_HAL_FRAME_TYPE_LLC:
581 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
582 		    XGE_HAL_HEADER_802_2_SIZE);
583 		break;
584 	case XGE_HAL_FRAME_TYPE_SNAP:
585 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
586 		    XGE_HAL_HEADER_SNAP_SIZE);
587 		break;
588 	default:
589 		ip_off = 0;
590 		break;
591 	}
592 
593 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
594 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
595 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
596 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
597 	}
598 
599 	return (ip_off);
600 }
601 
602 /*
603  * xgell_rx_hcksum_assoc
604  *
605  * Judge the packet type and then call to hcksum_assoc() to associate
606  * h/w checksum information.
607  */
608 static inline void
xgell_rx_hcksum_assoc(mblk_t * mp,char * vaddr,int pkt_length,xge_hal_dtr_info_t * ext_info)609 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
610     xge_hal_dtr_info_t *ext_info)
611 {
612 	int cksum_flags = 0;
613 
614 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
615 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
616 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
617 				cksum_flags |= HCK_IPV4_HDRCKSUM_OK;
618 			}
619 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
620 				cksum_flags |= HCK_FULLCKSUM_OK;
621 			}
622 			if (cksum_flags != 0) {
623 				mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
624 			}
625 		}
626 	} else if (ext_info->proto &
627 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
628 		/*
629 		 * Just pass the partial cksum up to IP.
630 		 */
631 		int ip_off = xgell_get_ip_offset(ext_info);
632 		int start, end = pkt_length - ip_off;
633 
634 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
635 			struct ip *ip =
636 			    (struct ip *)(vaddr + ip_off);
637 			start = ip->ip_hl * 4;
638 		} else {
639 			start = 40;
640 		}
641 		cksum_flags |= HCK_PARTIALCKSUM;
642 		mac_hcksum_set(mp, start, 0, end,
643 		    ntohs(ext_info->l4_cksum), cksum_flags);
644 	}
645 }
646 
647 /*
648  * xgell_rx_1b_msg_alloc
649  *
650  * Allocate message header for data buffer, and decide if copy the packet to
651  * new data buffer to release big rx_buffer to save memory.
652  *
653  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
654  * new message and copy the payload in.
655  */
656 static mblk_t *
xgell_rx_1b_msg_alloc(xgell_rx_ring_t * ring,xgell_rx_buffer_t * rx_buffer,int pkt_length,xge_hal_dtr_info_t * ext_info,boolean_t * copyit)657 xgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
658     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
659 {
660 	xgelldev_t *lldev = ring->lldev;
661 	mblk_t *mp;
662 	char *vaddr;
663 
664 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
665 	/*
666 	 * Copy packet into new allocated message buffer, if pkt_length
667 	 * is less than XGELL_RX_DMA_LOWAT
668 	 */
669 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
670 		if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
671 			return (NULL);
672 		}
673 		mp->b_rptr += HEADROOM;
674 		bcopy(vaddr, mp->b_rptr, pkt_length);
675 		mp->b_wptr = mp->b_rptr + pkt_length;
676 		*copyit = B_TRUE;
677 		return (mp);
678 	}
679 
680 	/*
681 	 * Just allocate mblk for current data buffer
682 	 */
683 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
684 	    &rx_buffer->frtn)) == NULL) {
685 		/* Drop it */
686 		return (NULL);
687 	}
688 	/*
689 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
690 	 */
691 	mp->b_wptr += pkt_length;
692 
693 	return (mp);
694 }
695 
696 /*
697  * xgell_rx_1b_callback
698  *
699  * If the interrupt is because of a received frame or if the receive ring
700  * contains fresh as yet un-processed frames, this function is called.
701  */
702 static xge_hal_status_e
xgell_rx_1b_callback(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,u8 t_code,void * userdata)703 xgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
704     void *userdata)
705 {
706 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
707 	xgelldev_t *lldev = ring->lldev;
708 	xgell_rx_buffer_t *rx_buffer;
709 	mblk_t *mp_head = NULL;
710 	mblk_t *mp_end  = NULL;
711 	int pkt_burst = 0;
712 
713 	xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
714 
715 	mutex_enter(&ring->bf_pool.pool_lock);
716 	do {
717 		int pkt_length;
718 		dma_addr_t dma_data;
719 		mblk_t *mp;
720 		boolean_t copyit = B_FALSE;
721 
722 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
723 		    xge_hal_ring_dtr_private(channelh, dtr));
724 		xge_hal_dtr_info_t ext_info;
725 
726 		rx_buffer = rxd_priv->rx_buffer;
727 
728 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
729 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
730 
731 		xge_assert(dma_data == rx_buffer->dma_addr);
732 
733 		if (t_code != 0) {
734 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
735 			    " completed due to error t_code %01x", XGELL_IFNAME,
736 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
737 
738 			(void) xge_hal_device_handle_tcode(channelh, dtr,
739 			    t_code);
740 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
741 			xgell_rx_buffer_release(rx_buffer);
742 			continue;
743 		}
744 
745 		/*
746 		 * Sync the DMA memory
747 		 */
748 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
749 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
750 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
751 			    XGELL_IFNAME, lldev->instance);
752 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
753 			xgell_rx_buffer_release(rx_buffer);
754 			continue;
755 		}
756 
757 		/*
758 		 * Allocate message for the packet.
759 		 */
760 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
761 			copyit = B_TRUE;
762 		} else {
763 			copyit = B_FALSE;
764 		}
765 
766 		mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
767 		    &ext_info, &copyit);
768 
769 		xge_hal_ring_dtr_free(channelh, dtr);
770 
771 		/*
772 		 * Release the buffer and recycle it later
773 		 */
774 		if ((mp == NULL) || copyit) {
775 			xgell_rx_buffer_release(rx_buffer);
776 		} else {
777 			/*
778 			 * Count it since the buffer should be loaned up.
779 			 */
780 			ring->bf_pool.post++;
781 		}
782 		if (mp == NULL) {
783 			xge_debug_ll(XGE_ERR,
784 			    "%s%d: rx: can not allocate mp mblk",
785 			    XGELL_IFNAME, lldev->instance);
786 			continue;
787 		}
788 
789 		/*
790 		 * Associate cksum_flags per packet type and h/w
791 		 * cksum flags.
792 		 */
793 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
794 		    pkt_length, &ext_info);
795 
796 		ring->rx_pkts++;
797 		ring->rx_bytes += pkt_length;
798 
799 		if (mp_head == NULL) {
800 			mp_head = mp;
801 			mp_end = mp;
802 		} else {
803 			mp_end->b_next = mp;
804 			mp_end = mp;
805 		}
806 
807 		/*
808 		 * Inlined implemented polling function.
809 		 */
810 		if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
811 			ring->poll_mp = mp_head;
812 		}
813 		if (ring->poll_mp != NULL) {
814 			if ((ring->poll_bytes -= pkt_length) <= 0) {
815 				/* have polled enough packets. */
816 				break;
817 			} else {
818 				/* continue polling packets. */
819 				continue;
820 			}
821 		}
822 
823 		/*
824 		 * We're not in polling mode, so try to chain more messages
825 		 * or send the chain up according to pkt_burst.
826 		 */
827 		if (++pkt_burst < lldev->config.rx_pkt_burst)
828 			continue;
829 
830 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
831 			/* Replenish rx buffers */
832 			xgell_rx_buffer_replenish_all(ring);
833 		}
834 		mutex_exit(&ring->bf_pool.pool_lock);
835 		if (mp_head != NULL) {
836 			mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
837 			    ring->ring_gen_num);
838 		}
839 		mp_head = mp_end  = NULL;
840 		pkt_burst = 0;
841 		mutex_enter(&ring->bf_pool.pool_lock);
842 
843 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
844 	    XGE_HAL_OK);
845 
846 	/*
847 	 * Always call replenish_all to recycle rx_buffers.
848 	 */
849 	xgell_rx_buffer_replenish_all(ring);
850 	mutex_exit(&ring->bf_pool.pool_lock);
851 
852 	/*
853 	 * If we're not in polling cycle, call mac_rx(), otherwise
854 	 * just return while leaving packets chained to ring->poll_mp.
855 	 */
856 	if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
857 		mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
858 		    ring->ring_gen_num);
859 	}
860 
861 	return (XGE_HAL_OK);
862 }
863 
864 mblk_t *
xgell_rx_poll(void * arg,int bytes_to_pickup)865 xgell_rx_poll(void *arg, int bytes_to_pickup)
866 {
867 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)arg;
868 	int got_rx = 0;
869 	mblk_t *mp;
870 
871 	xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
872 
873 	ring->poll_mp = NULL;
874 	ring->poll_bytes = bytes_to_pickup;
875 	(void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
876 
877 	mp = ring->poll_mp;
878 	ring->poll_bytes = -1;
879 	ring->polled_bytes += got_rx;
880 	ring->poll_mp = NULL;
881 
882 	return (mp);
883 }
884 
885 /*
886  * xgell_xmit_compl
887  *
888  * If an interrupt was raised to indicate DMA complete of the Tx packet,
889  * this function is called. It identifies the last TxD whose buffer was
890  * freed and frees all skbs whose data have already DMA'ed into the NICs
891  * internal memory.
892  */
893 static xge_hal_status_e
xgell_xmit_compl(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,u8 t_code,void * userdata)894 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
895     void *userdata)
896 {
897 	xgell_tx_ring_t *ring = userdata;
898 	xgelldev_t *lldev = ring->lldev;
899 
900 	do {
901 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
902 		    xge_hal_fifo_dtr_private(dtr));
903 		int i;
904 
905 		if (t_code) {
906 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
907 			    " completed due to error t_code %01x", XGELL_IFNAME,
908 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
909 
910 			(void) xge_hal_device_handle_tcode(channelh, dtr,
911 			    t_code);
912 		}
913 
914 		for (i = 0; i < txd_priv->handle_cnt; i++) {
915 			if (txd_priv->dma_handles[i] != NULL) {
916 				xge_assert(txd_priv->dma_handles[i]);
917 				(void) ddi_dma_unbind_handle(
918 				    txd_priv->dma_handles[i]);
919 				ddi_dma_free_handle(&txd_priv->dma_handles[i]);
920 				txd_priv->dma_handles[i] = 0;
921 			}
922 		}
923 		txd_priv->handle_cnt = 0;
924 
925 		xge_hal_fifo_dtr_free(channelh, dtr);
926 
927 		if (txd_priv->mblk != NULL) {
928 			freemsg(txd_priv->mblk);
929 			txd_priv->mblk = NULL;
930 		}
931 
932 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
933 	    XGE_HAL_OK);
934 
935 	if (ring->need_resched)
936 		mac_tx_ring_update(lldev->mh, ring->ring_handle);
937 
938 	return (XGE_HAL_OK);
939 }
940 
941 mblk_t *
xgell_ring_tx(void * arg,mblk_t * mp)942 xgell_ring_tx(void *arg, mblk_t *mp)
943 {
944 	xgell_tx_ring_t *ring = (xgell_tx_ring_t *)arg;
945 	mblk_t *bp;
946 	xgelldev_t *lldev = ring->lldev;
947 	xge_hal_device_t *hldev = lldev->devh;
948 	xge_hal_status_e status;
949 	xge_hal_dtr_h dtr;
950 	xgell_txd_priv_t *txd_priv;
951 	uint32_t hckflags;
952 	uint32_t lsoflags;
953 	uint32_t mss;
954 	int handle_cnt, frag_cnt, ret, i, copied;
955 	boolean_t used_copy;
956 	uint64_t sent_bytes;
957 
958 	handle_cnt = frag_cnt = 0;
959 	sent_bytes = 0;
960 
961 	if (!lldev->is_initialized || lldev->in_reset)
962 		return (mp);
963 
964 	/*
965 	 * If the free Tx dtrs count reaches the lower threshold,
966 	 * inform the gld to stop sending more packets till the free
967 	 * dtrs count exceeds higher threshold. Driver informs the
968 	 * gld through gld_sched call, when the free dtrs count exceeds
969 	 * the higher threshold.
970 	 */
971 	if (xge_hal_channel_dtr_count(ring->channelh)
972 	    <= XGELL_TX_LEVEL_LOW) {
973 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
974 		    "free descriptors count at low threshold %d",
975 		    XGELL_IFNAME, lldev->instance,
976 		    ((xge_hal_channel_t *)ring->channelh)->post_qid,
977 		    XGELL_TX_LEVEL_LOW);
978 		goto _exit;
979 	}
980 
981 	status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
982 	if (status != XGE_HAL_OK) {
983 		switch (status) {
984 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
985 			xge_debug_ll(XGE_ERR,
986 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
987 			    lldev->instance,
988 			    ((xge_hal_channel_t *)
989 			    ring->channelh)->post_qid);
990 			goto _exit;
991 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
992 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
993 			    " out of descriptors.", XGELL_IFNAME,
994 			    lldev->instance,
995 			    ((xge_hal_channel_t *)
996 			    ring->channelh)->post_qid);
997 			goto _exit;
998 		default:
999 			return (mp);
1000 		}
1001 	}
1002 
1003 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1004 	txd_priv->mblk = mp;
1005 
1006 	/*
1007 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1008 	 * do insertion.
1009 	 *
1010 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1011 	 * is the other implementation for xge. The driver can simple bcopy()
1012 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1013 	 * automatically, since it's impossible that GLD sends down mp(s) with
1014 	 * splited ether_vlan_header.
1015 	 *
1016 	 * struct ether_vlan_header *evhp;
1017 	 * uint16_t tci;
1018 	 *
1019 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1020 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1021 	 *	tci = ntohs(evhp->ether_tci);
1022 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1023 	 *	    2 * ETHERADDRL);
1024 	 *	mp->b_rptr += VLAN_TAGSZ;
1025 	 *
1026 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1027 	 * }
1028 	 */
1029 
1030 	copied = 0;
1031 	used_copy = B_FALSE;
1032 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1033 		int mblen;
1034 		uint_t ncookies;
1035 		ddi_dma_cookie_t dma_cookie;
1036 		ddi_dma_handle_t dma_handle;
1037 
1038 		/* skip zero-length message blocks */
1039 		mblen = MBLKL(bp);
1040 		if (mblen == 0) {
1041 			continue;
1042 		}
1043 
1044 		sent_bytes += mblen;
1045 
1046 		/*
1047 		 * Check the message length to decide to DMA or bcopy() data
1048 		 * to tx descriptor(s).
1049 		 */
1050 		if (mblen < lldev->config.tx_dma_lowat &&
1051 		    (copied + mblen) < lldev->tx_copied_max) {
1052 			xge_hal_status_e rc;
1053 			rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
1054 			    dtr, bp->b_rptr, mblen);
1055 			if (rc == XGE_HAL_OK) {
1056 				used_copy = B_TRUE;
1057 				copied += mblen;
1058 				continue;
1059 			} else if (used_copy) {
1060 				xge_hal_fifo_dtr_buffer_finalize(
1061 				    ring->channelh, dtr, frag_cnt++);
1062 				used_copy = B_FALSE;
1063 			}
1064 		} else if (used_copy) {
1065 			xge_hal_fifo_dtr_buffer_finalize(ring->channelh,
1066 			    dtr, frag_cnt++);
1067 			used_copy = B_FALSE;
1068 		}
1069 
1070 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1071 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1072 		if (ret != DDI_SUCCESS) {
1073 			xge_debug_ll(XGE_ERR,
1074 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1075 			    lldev->instance);
1076 			goto _exit_cleanup;
1077 		}
1078 
1079 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1080 		    (caddr_t)bp->b_rptr, mblen,
1081 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1082 		    &dma_cookie, &ncookies);
1083 
1084 		switch (ret) {
1085 		case DDI_DMA_MAPPED:
1086 			/* everything's fine */
1087 			break;
1088 
1089 		case DDI_DMA_NORESOURCES:
1090 			xge_debug_ll(XGE_ERR,
1091 			    "%s%d: can not bind dma address",
1092 			    XGELL_IFNAME, lldev->instance);
1093 			ddi_dma_free_handle(&dma_handle);
1094 			goto _exit_cleanup;
1095 
1096 		case DDI_DMA_NOMAPPING:
1097 		case DDI_DMA_INUSE:
1098 		case DDI_DMA_TOOBIG:
1099 		default:
1100 			/* drop packet, don't retry */
1101 			xge_debug_ll(XGE_ERR,
1102 			    "%s%d: can not map message buffer",
1103 			    XGELL_IFNAME, lldev->instance);
1104 			ddi_dma_free_handle(&dma_handle);
1105 			goto _exit_cleanup;
1106 		}
1107 
1108 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1109 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1110 			    "requested c:%d+f:%d", XGELL_IFNAME,
1111 			    lldev->instance, ncookies, frag_cnt);
1112 			(void) ddi_dma_unbind_handle(dma_handle);
1113 			ddi_dma_free_handle(&dma_handle);
1114 			goto _exit_cleanup;
1115 		}
1116 
1117 		/* setup the descriptors for this data buffer */
1118 		while (ncookies) {
1119 			xge_hal_fifo_dtr_buffer_set(ring->channelh, dtr,
1120 			    frag_cnt++, dma_cookie.dmac_laddress,
1121 			    dma_cookie.dmac_size);
1122 			if (--ncookies) {
1123 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1124 			}
1125 
1126 		}
1127 
1128 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1129 
1130 		if (bp->b_cont &&
1131 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1132 		    hldev->config.fifo.max_frags)) {
1133 			mblk_t *nmp;
1134 
1135 			xge_debug_ll(XGE_TRACE,
1136 			    "too many FRAGs [%d], pull up them", frag_cnt);
1137 
1138 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1139 				/* Drop packet, don't retry */
1140 				xge_debug_ll(XGE_ERR,
1141 				    "%s%d: can not pullup message buffer",
1142 				    XGELL_IFNAME, lldev->instance);
1143 				goto _exit_cleanup;
1144 			}
1145 			freemsg(bp->b_cont);
1146 			bp->b_cont = nmp;
1147 		}
1148 	}
1149 
1150 	/* finalize unfinished copies */
1151 	if (used_copy) {
1152 		xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
1153 		    frag_cnt++);
1154 	}
1155 
1156 	txd_priv->handle_cnt = handle_cnt;
1157 
1158 	/*
1159 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1160 	 * do all necessary work.
1161 	 */
1162 	mac_lso_get(mp, &mss, &lsoflags);
1163 
1164 	if (lsoflags & HW_LSO) {
1165 		xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
1166 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1167 	}
1168 
1169 	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags);
1170 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1171 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1172 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1173 	}
1174 	if (hckflags & HCK_FULLCKSUM) {
1175 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1176 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1177 	}
1178 
1179 	xge_hal_fifo_dtr_post(ring->channelh, dtr);
1180 
1181 	/* Update per-ring tx statistics */
1182 	atomic_inc_64(&ring->tx_pkts);
1183 	atomic_add_64(&ring->tx_bytes, sent_bytes);
1184 
1185 	return (NULL);
1186 
1187 _exit_cleanup:
1188 	/*
1189 	 * Could not successfully transmit but have changed the message,
1190 	 * so just free it and return NULL
1191 	 */
1192 	for (i = 0; i < handle_cnt; i++) {
1193 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1194 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1195 		txd_priv->dma_handles[i] = 0;
1196 	}
1197 
1198 	xge_hal_fifo_dtr_free(ring->channelh, dtr);
1199 
1200 	freemsg(mp);
1201 	return (NULL);
1202 
1203 _exit:
1204 	ring->need_resched = B_TRUE;
1205 	return (mp);
1206 }
1207 
1208 /*
1209  * xgell_ring_macaddr_init
1210  */
1211 static void
xgell_rx_ring_maddr_init(xgell_rx_ring_t * ring)1212 xgell_rx_ring_maddr_init(xgell_rx_ring_t *ring)
1213 {
1214 	int i;
1215 	xgelldev_t *lldev = ring->lldev;
1216 	xge_hal_device_t *hldev = lldev->devh;
1217 	int slot_start;
1218 
1219 	xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
1220 
1221 	ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
1222 	ring->mmac.naddrfree = ring->mmac.naddr;
1223 
1224 	/*
1225 	 * For the default rx ring, the first MAC address is the factory one.
1226 	 * This will be set by the framework, so need to clear it for now.
1227 	 */
1228 	(void) xge_hal_device_macaddr_clear(hldev, 0);
1229 
1230 	/*
1231 	 * Read the MAC address Configuration Memory from HAL.
1232 	 * The first slot will hold a factory MAC address, contents in other
1233 	 * slots will be FF:FF:FF:FF:FF:FF.
1234 	 */
1235 	slot_start = ring->index * 32;
1236 	for (i = 0; i < ring->mmac.naddr; i++) {
1237 		(void) xge_hal_device_macaddr_get(hldev, slot_start + i,
1238 		    ring->mmac.mac_addr + i);
1239 		ring->mmac.mac_addr_set[i] = B_FALSE;
1240 	}
1241 }
1242 
1243 static int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
1244 
1245 static int
xgell_addmac(void * arg,const uint8_t * mac_addr)1246 xgell_addmac(void *arg, const uint8_t *mac_addr)
1247 {
1248 	xgell_rx_ring_t *ring = arg;
1249 	xgelldev_t *lldev = ring->lldev;
1250 	xge_hal_device_t *hldev = lldev->devh;
1251 	int slot;
1252 	int slot_start;
1253 
1254 	xge_debug_ll(XGE_TRACE, "%s", "xgell_addmac");
1255 
1256 	mutex_enter(&lldev->genlock);
1257 
1258 	if (ring->mmac.naddrfree == 0) {
1259 		mutex_exit(&lldev->genlock);
1260 		return (ENOSPC);
1261 	}
1262 
1263 	/* First slot is for factory MAC address */
1264 	for (slot = 0; slot < ring->mmac.naddr; slot++) {
1265 		if (ring->mmac.mac_addr_set[slot] == B_FALSE) {
1266 			break;
1267 		}
1268 	}
1269 
1270 	ASSERT(slot < ring->mmac.naddr);
1271 
1272 	slot_start = ring->index * 32;
1273 
1274 	if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
1275 	    0) {
1276 		mutex_exit(&lldev->genlock);
1277 		return (EIO);
1278 	}
1279 
1280 	/* Simply enable RTS for the whole section. */
1281 	(void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
1282 
1283 	/*
1284 	 * Read back the MAC address from HAL to keep the array up to date.
1285 	 */
1286 	if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
1287 	    ring->mmac.mac_addr + slot) != XGE_HAL_OK) {
1288 		(void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1289 		return (EIO);
1290 	}
1291 
1292 	ring->mmac.mac_addr_set[slot] = B_TRUE;
1293 	ring->mmac.naddrfree--;
1294 
1295 	mutex_exit(&lldev->genlock);
1296 
1297 	return (0);
1298 }
1299 
1300 static int
xgell_remmac(void * arg,const uint8_t * mac_addr)1301 xgell_remmac(void *arg, const uint8_t *mac_addr)
1302 {
1303 	xgell_rx_ring_t *ring = arg;
1304 	xgelldev_t *lldev = ring->lldev;
1305 	xge_hal_device_t *hldev = lldev->devh;
1306 	xge_hal_status_e status;
1307 	int slot;
1308 	int slot_start;
1309 
1310 	xge_debug_ll(XGE_TRACE, "%s", "xgell_remmac");
1311 
1312 	slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
1313 	if (slot == -1)
1314 		return (EINVAL);
1315 
1316 	slot_start = ring->index * 32;
1317 
1318 	/*
1319 	 * Adjust slot to the offset in the MAC array of this ring (group).
1320 	 */
1321 	slot -= slot_start;
1322 
1323 	/*
1324 	 * Only can remove a pre-set MAC address for this ring (group).
1325 	 */
1326 	if (slot < 0 || slot >= ring->mmac.naddr)
1327 		return (EINVAL);
1328 
1329 
1330 	xge_assert(ring->mmac.mac_addr_set[slot]);
1331 
1332 	mutex_enter(&lldev->genlock);
1333 	if (!ring->mmac.mac_addr_set[slot]) {
1334 		mutex_exit(&lldev->genlock);
1335 		/*
1336 		 * The result will be unexpected when reach here. WARNING!
1337 		 */
1338 		xge_debug_ll(XGE_ERR,
1339 		    "%s%d: caller is trying to remove an unset MAC address",
1340 		    XGELL_IFNAME, lldev->instance);
1341 		return (ENXIO);
1342 	}
1343 
1344 	status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1345 	if (status != XGE_HAL_OK) {
1346 		mutex_exit(&lldev->genlock);
1347 		return (EIO);
1348 	}
1349 
1350 	ring->mmac.mac_addr_set[slot] = B_FALSE;
1351 	ring->mmac.naddrfree++;
1352 
1353 	/*
1354 	 * TODO: Disable MAC RTS if all addresses have been cleared.
1355 	 */
1356 
1357 	/*
1358 	 * Read back the MAC address from HAL to keep the array up to date.
1359 	 */
1360 	(void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
1361 	    ring->mmac.mac_addr + slot);
1362 	mutex_exit(&lldev->genlock);
1363 
1364 	return (0);
1365 }
1366 
1367 /*
1368  * Temporarily calling hal function.
1369  *
1370  * With MSI-X implementation, no lock is needed, so that the interrupt
1371  * handling could be faster.
1372  */
1373 int
xgell_rx_ring_intr_enable(mac_intr_handle_t ih)1374 xgell_rx_ring_intr_enable(mac_intr_handle_t ih)
1375 {
1376 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1377 
1378 	mutex_enter(&ring->ring_lock);
1379 	xge_hal_device_rx_channel_disable_polling(ring->channelh);
1380 	mutex_exit(&ring->ring_lock);
1381 
1382 	return (0);
1383 }
1384 
1385 int
xgell_rx_ring_intr_disable(mac_intr_handle_t ih)1386 xgell_rx_ring_intr_disable(mac_intr_handle_t ih)
1387 {
1388 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1389 
1390 	mutex_enter(&ring->ring_lock);
1391 	xge_hal_device_rx_channel_enable_polling(ring->channelh);
1392 	mutex_exit(&ring->ring_lock);
1393 
1394 	return (0);
1395 }
1396 
1397 static int
xgell_rx_ring_start(mac_ring_driver_t rh,uint64_t mr_gen_num)1398 xgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1399 {
1400 	xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
1401 
1402 	rx_ring->ring_gen_num = mr_gen_num;
1403 
1404 	return (0);
1405 }
1406 
1407 /*ARGSUSED*/
1408 static void
xgell_rx_ring_stop(mac_ring_driver_t rh)1409 xgell_rx_ring_stop(mac_ring_driver_t rh)
1410 {
1411 }
1412 
1413 /*ARGSUSED*/
1414 static int
xgell_tx_ring_start(mac_ring_driver_t rh,uint64_t useless)1415 xgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
1416 {
1417 	return (0);
1418 }
1419 
1420 /*ARGSUSED*/
1421 static void
xgell_tx_ring_stop(mac_ring_driver_t rh)1422 xgell_tx_ring_stop(mac_ring_driver_t rh)
1423 {
1424 }
1425 
1426 /*
1427  * Callback funtion for MAC layer to register all rings.
1428  *
1429  * Xframe hardware doesn't support grouping explicitly, so the driver needs
1430  * to pretend having resource groups. We may also optionally group all 8 rx
1431  * rings into a single group for increased scalability on CMT architectures,
1432  * or group one rx ring per group for maximum virtualization.
1433  *
1434  * TX grouping is actually done by framework, so, just register all TX
1435  * resources without grouping them.
1436  */
1437 void
xgell_fill_ring(void * arg,mac_ring_type_t rtype,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)1438 xgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1439     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1440 {
1441 	xgelldev_t *lldev = (xgelldev_t *)arg;
1442 	mac_intr_t *mintr;
1443 
1444 	switch (rtype) {
1445 	case MAC_RING_TYPE_RX: {
1446 		xgell_rx_ring_t *rx_ring;
1447 
1448 		xge_assert(index < lldev->init_rx_rings);
1449 		xge_assert(rg_index < lldev->init_rx_groups);
1450 
1451 		/*
1452 		 * Performance vs. Virtualization
1453 		 */
1454 		if (lldev->init_rx_rings == lldev->init_rx_groups)
1455 			rx_ring = lldev->rx_ring + rg_index;
1456 		else
1457 			rx_ring = lldev->rx_ring + index;
1458 
1459 		rx_ring->ring_handle = rh;
1460 
1461 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
1462 		infop->mri_start = xgell_rx_ring_start;
1463 		infop->mri_stop = xgell_rx_ring_stop;
1464 		infop->mri_poll = xgell_rx_poll;
1465 		infop->mri_stat = xgell_rx_ring_stat;
1466 
1467 		mintr = &infop->mri_intr;
1468 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1469 		mintr->mi_enable = xgell_rx_ring_intr_enable;
1470 		mintr->mi_disable = xgell_rx_ring_intr_disable;
1471 
1472 		break;
1473 	}
1474 	case MAC_RING_TYPE_TX: {
1475 		xgell_tx_ring_t *tx_ring;
1476 
1477 		xge_assert(rg_index == -1);
1478 
1479 		xge_assert((index >= 0) && (index < lldev->init_tx_rings));
1480 
1481 		tx_ring = lldev->tx_ring + index;
1482 		tx_ring->ring_handle = rh;
1483 
1484 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
1485 		infop->mri_start = xgell_tx_ring_start;
1486 		infop->mri_stop = xgell_tx_ring_stop;
1487 		infop->mri_tx = xgell_ring_tx;
1488 		infop->mri_stat = xgell_tx_ring_stat;
1489 
1490 		break;
1491 	}
1492 	default:
1493 		break;
1494 	}
1495 }
1496 
1497 void
xgell_fill_group(void * arg,mac_ring_type_t rtype,const int index,mac_group_info_t * infop,mac_group_handle_t gh)1498 xgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
1499     mac_group_info_t *infop, mac_group_handle_t gh)
1500 {
1501 	xgelldev_t *lldev = (xgelldev_t *)arg;
1502 
1503 	switch (rtype) {
1504 	case MAC_RING_TYPE_RX: {
1505 		xgell_rx_ring_t *rx_ring;
1506 
1507 		xge_assert(index < lldev->init_rx_groups);
1508 
1509 		rx_ring = lldev->rx_ring + index;
1510 
1511 		rx_ring->group_handle = gh;
1512 
1513 		infop->mgi_driver = (mac_group_driver_t)rx_ring;
1514 		infop->mgi_start = NULL;
1515 		infop->mgi_stop = NULL;
1516 		infop->mgi_addmac = xgell_addmac;
1517 		infop->mgi_remmac = xgell_remmac;
1518 		infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
1519 
1520 		break;
1521 	}
1522 	case MAC_RING_TYPE_TX:
1523 		xge_assert(0);
1524 		break;
1525 	default:
1526 		break;
1527 	}
1528 }
1529 
1530 /*
1531  * xgell_macaddr_set
1532  */
1533 static int
xgell_maddr_set(xgelldev_t * lldev,int index,uint8_t * macaddr)1534 xgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
1535 {
1536 	xge_hal_device_t *hldev = lldev->devh;
1537 	xge_hal_status_e status;
1538 
1539 	xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
1540 
1541 	xge_debug_ll(XGE_TRACE,
1542 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1543 	    macaddr[0], macaddr[1], macaddr[2],
1544 	    macaddr[3], macaddr[4], macaddr[5]);
1545 
1546 	status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
1547 
1548 	if (status != XGE_HAL_OK) {
1549 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1550 		    XGELL_IFNAME, lldev->instance);
1551 		return (EIO);
1552 	}
1553 
1554 	return (0);
1555 }
1556 
1557 /*
1558  * xgell_rx_dtr_term
1559  *
1560  * Function will be called by HAL to terminate all DTRs for
1561  * Ring(s) type of channels.
1562  */
1563 static void
xgell_rx_dtr_term(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_state_e state,void * userdata,xge_hal_channel_reopen_e reopen)1564 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1565     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1566 {
1567 	xgell_rxd_priv_t *rxd_priv =
1568 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1569 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1570 
1571 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1572 		xgell_rx_ring_t *ring = rx_buffer->ring;
1573 
1574 		mutex_enter(&ring->bf_pool.pool_lock);
1575 		xge_hal_ring_dtr_free(channelh, dtrh);
1576 		xgell_rx_buffer_release(rx_buffer);
1577 		mutex_exit(&ring->bf_pool.pool_lock);
1578 	}
1579 }
1580 
1581 /*
1582  * To open a rx ring.
1583  */
1584 static boolean_t
xgell_rx_ring_open(xgell_rx_ring_t * rx_ring)1585 xgell_rx_ring_open(xgell_rx_ring_t *rx_ring)
1586 {
1587 	xge_hal_status_e status;
1588 	xge_hal_channel_attr_t attr;
1589 	xgelldev_t *lldev = rx_ring->lldev;
1590 	xge_hal_device_t *hldev = lldev->devh;
1591 
1592 	if (rx_ring->live)
1593 		return (B_TRUE);
1594 
1595 	/* Create the buffer pool first */
1596 	if (!xgell_rx_create_buffer_pool(rx_ring)) {
1597 		xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
1598 		    rx_ring->index);
1599 		return (B_FALSE);
1600 	}
1601 
1602 	/* Default ring initialization */
1603 	attr.post_qid		= rx_ring->index;
1604 	attr.compl_qid		= 0;
1605 	attr.callback		= xgell_rx_1b_callback;
1606 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1607 	attr.flags		= 0;
1608 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1609 	attr.dtr_init		= xgell_rx_dtr_replenish;
1610 	attr.dtr_term		= xgell_rx_dtr_term;
1611 	attr.userdata		= rx_ring;
1612 
1613 	status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
1614 	    XGE_HAL_CHANNEL_OC_NORMAL);
1615 	if (status != XGE_HAL_OK) {
1616 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1617 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1618 		(void) xgell_rx_destroy_buffer_pool(rx_ring);
1619 		return (B_FALSE);
1620 	}
1621 
1622 	xgell_rx_ring_maddr_init(rx_ring);
1623 
1624 	mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
1625 	    DDI_INTR_PRI(hldev->irqh));
1626 
1627 	rx_ring->poll_bytes = -1;
1628 	rx_ring->polled_bytes = 0;
1629 	rx_ring->poll_mp = NULL;
1630 	rx_ring->live = B_TRUE;
1631 
1632 	xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
1633 	    rx_ring->index);
1634 
1635 	return (B_TRUE);
1636 }
1637 
1638 static void
xgell_rx_ring_close(xgell_rx_ring_t * rx_ring)1639 xgell_rx_ring_close(xgell_rx_ring_t *rx_ring)
1640 {
1641 	if (!rx_ring->live)
1642 		return;
1643 	xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1644 	rx_ring->channelh = NULL;
1645 	/* This may not clean up all used buffers, driver will handle it */
1646 	if (xgell_rx_destroy_buffer_pool(rx_ring))
1647 		rx_ring->live = B_FALSE;
1648 
1649 	mutex_destroy(&rx_ring->ring_lock);
1650 }
1651 
1652 /*
1653  * xgell_rx_open
1654  * @lldev: the link layer object
1655  *
1656  * Initialize and open all RX channels.
1657  */
1658 static boolean_t
xgell_rx_open(xgelldev_t * lldev)1659 xgell_rx_open(xgelldev_t *lldev)
1660 {
1661 	xgell_rx_ring_t *rx_ring;
1662 	int i;
1663 
1664 	if (lldev->live_rx_rings != 0)
1665 		return (B_TRUE);
1666 
1667 	lldev->live_rx_rings = 0;
1668 
1669 	/*
1670 	 * Initialize all rings
1671 	 */
1672 	for (i = 0; i < lldev->init_rx_rings; i++) {
1673 		rx_ring = &lldev->rx_ring[i];
1674 		rx_ring->index = i;
1675 		rx_ring->lldev = lldev;
1676 		rx_ring->live = B_FALSE;
1677 
1678 		if (!xgell_rx_ring_open(rx_ring))
1679 			return (B_FALSE);
1680 
1681 		lldev->live_rx_rings++;
1682 	}
1683 
1684 	return (B_TRUE);
1685 }
1686 
1687 static void
xgell_rx_close(xgelldev_t * lldev)1688 xgell_rx_close(xgelldev_t *lldev)
1689 {
1690 	xgell_rx_ring_t *rx_ring;
1691 	int i;
1692 
1693 	if (lldev->live_rx_rings == 0)
1694 		return;
1695 
1696 	/*
1697 	 * Close all rx rings
1698 	 */
1699 	for (i = 0; i < lldev->init_rx_rings; i++) {
1700 		rx_ring = &lldev->rx_ring[i];
1701 
1702 		if (rx_ring->live) {
1703 			xgell_rx_ring_close(rx_ring);
1704 			lldev->live_rx_rings--;
1705 		}
1706 	}
1707 
1708 	xge_assert(lldev->live_rx_rings == 0);
1709 }
1710 
1711 /*
1712  * xgell_tx_term
1713  *
1714  * Function will be called by HAL to terminate all DTRs for
1715  * Fifo(s) type of channels.
1716  */
1717 static void
xgell_tx_term(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_state_e state,void * userdata,xge_hal_channel_reopen_e reopen)1718 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1719     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1720 {
1721 	xgell_txd_priv_t *txd_priv =
1722 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1723 	mblk_t *mp = txd_priv->mblk;
1724 	int i;
1725 
1726 	/*
1727 	 * for Tx we must clean up the DTR *only* if it has been
1728 	 * posted!
1729 	 */
1730 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1731 		return;
1732 	}
1733 
1734 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1735 		xge_assert(txd_priv->dma_handles[i]);
1736 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1737 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1738 		txd_priv->dma_handles[i] = 0;
1739 	}
1740 
1741 	xge_hal_fifo_dtr_free(channelh, dtrh);
1742 
1743 	if (mp) {
1744 		txd_priv->mblk = NULL;
1745 		freemsg(mp);
1746 	}
1747 }
1748 
1749 static boolean_t
xgell_tx_ring_open(xgell_tx_ring_t * tx_ring)1750 xgell_tx_ring_open(xgell_tx_ring_t *tx_ring)
1751 {
1752 	xge_hal_status_e status;
1753 	xge_hal_channel_attr_t attr;
1754 	xgelldev_t *lldev = tx_ring->lldev;
1755 
1756 	if (tx_ring->live)
1757 		return (B_TRUE);
1758 
1759 	attr.post_qid		= tx_ring->index;
1760 	attr.compl_qid		= 0;
1761 	attr.callback		= xgell_xmit_compl;
1762 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1763 	attr.flags		= 0;
1764 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1765 	attr.dtr_init		= NULL;
1766 	attr.dtr_term		= xgell_tx_term;
1767 	attr.userdata		= tx_ring;
1768 
1769 	status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
1770 	    XGE_HAL_CHANNEL_OC_NORMAL);
1771 	if (status != XGE_HAL_OK) {
1772 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
1773 		    "code %d", XGELL_IFNAME, lldev->instance, status);
1774 		return (B_FALSE);
1775 	}
1776 
1777 	tx_ring->live = B_TRUE;
1778 
1779 	return (B_TRUE);
1780 }
1781 
1782 static void
xgell_tx_ring_close(xgell_tx_ring_t * tx_ring)1783 xgell_tx_ring_close(xgell_tx_ring_t *tx_ring)
1784 {
1785 	if (!tx_ring->live)
1786 		return;
1787 	xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1788 	tx_ring->live = B_FALSE;
1789 }
1790 
1791 /*
1792  * xgell_tx_open
1793  * @lldev: the link layer object
1794  *
1795  * Initialize and open all TX channels.
1796  */
1797 static boolean_t
xgell_tx_open(xgelldev_t * lldev)1798 xgell_tx_open(xgelldev_t *lldev)
1799 {
1800 	xgell_tx_ring_t *tx_ring;
1801 	int i;
1802 
1803 	if (lldev->live_tx_rings != 0)
1804 		return (B_TRUE);
1805 
1806 	lldev->live_tx_rings = 0;
1807 
1808 	/*
1809 	 * Enable rings by reserve sequence to match the h/w sequences.
1810 	 */
1811 	for (i = 0; i < lldev->init_tx_rings; i++) {
1812 		tx_ring = &lldev->tx_ring[i];
1813 		tx_ring->index = i;
1814 		tx_ring->lldev = lldev;
1815 		tx_ring->live = B_FALSE;
1816 
1817 		if (!xgell_tx_ring_open(tx_ring))
1818 			return (B_FALSE);
1819 
1820 		lldev->live_tx_rings++;
1821 	}
1822 
1823 	return (B_TRUE);
1824 }
1825 
1826 static void
xgell_tx_close(xgelldev_t * lldev)1827 xgell_tx_close(xgelldev_t *lldev)
1828 {
1829 	xgell_tx_ring_t *tx_ring;
1830 	int i;
1831 
1832 	if (lldev->live_tx_rings == 0)
1833 		return;
1834 
1835 	/*
1836 	 * Enable rings by reserve sequence to match the h/w sequences.
1837 	 */
1838 	for (i = 0; i < lldev->init_tx_rings; i++) {
1839 		tx_ring = &lldev->tx_ring[i];
1840 		if (tx_ring->live) {
1841 			xgell_tx_ring_close(tx_ring);
1842 			lldev->live_tx_rings--;
1843 		}
1844 	}
1845 }
1846 
1847 static int
xgell_initiate_start(xgelldev_t * lldev)1848 xgell_initiate_start(xgelldev_t *lldev)
1849 {
1850 	xge_hal_status_e status;
1851 	xge_hal_device_t *hldev = lldev->devh;
1852 	int maxpkt = hldev->config.mtu;
1853 
1854 	/* check initial mtu before enabling the device */
1855 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1856 	if (status != XGE_HAL_OK) {
1857 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1858 		    XGELL_IFNAME, lldev->instance, maxpkt);
1859 		return (EINVAL);
1860 	}
1861 
1862 	/* set initial mtu before enabling the device */
1863 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1864 	if (status != XGE_HAL_OK) {
1865 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1866 		    XGELL_IFNAME, lldev->instance, maxpkt);
1867 		return (EIO);
1868 	}
1869 
1870 	/* tune jumbo/normal frame UFC counters */
1871 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
1872 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1873 	    XGE_HAL_DEFAULT_RX_UFC_B_J :
1874 	    XGE_HAL_DEFAULT_RX_UFC_B_N;
1875 
1876 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
1877 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1878 	    XGE_HAL_DEFAULT_RX_UFC_C_J :
1879 	    XGE_HAL_DEFAULT_RX_UFC_C_N;
1880 
1881 	/* now, enable the device */
1882 	status = xge_hal_device_enable(lldev->devh);
1883 	if (status != XGE_HAL_OK) {
1884 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1885 		    XGELL_IFNAME, lldev->instance);
1886 		return (EIO);
1887 	}
1888 
1889 	if (!xgell_rx_open(lldev)) {
1890 		status = xge_hal_device_disable(lldev->devh);
1891 		if (status != XGE_HAL_OK) {
1892 			u64 adapter_status;
1893 			(void) xge_hal_device_status(lldev->devh,
1894 			    &adapter_status);
1895 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1896 			    "the device. adaper status 0x%"PRIx64
1897 			    " returned status %d",
1898 			    XGELL_IFNAME, lldev->instance,
1899 			    (uint64_t)adapter_status, status);
1900 		}
1901 		xgell_rx_close(lldev);
1902 		xge_os_mdelay(1500);
1903 		return (ENOMEM);
1904 	}
1905 
1906 	if (!xgell_tx_open(lldev)) {
1907 		status = xge_hal_device_disable(lldev->devh);
1908 		if (status != XGE_HAL_OK) {
1909 			u64 adapter_status;
1910 			(void) xge_hal_device_status(lldev->devh,
1911 			    &adapter_status);
1912 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1913 			    "the device. adaper status 0x%"PRIx64
1914 			    " returned status %d",
1915 			    XGELL_IFNAME, lldev->instance,
1916 			    (uint64_t)adapter_status, status);
1917 		}
1918 		xgell_tx_close(lldev);
1919 		xgell_rx_close(lldev);
1920 		xge_os_mdelay(1500);
1921 		return (ENOMEM);
1922 	}
1923 
1924 	/* time to enable interrupts */
1925 	(void) xge_enable_intrs(lldev);
1926 	xge_hal_device_intr_enable(lldev->devh);
1927 
1928 	lldev->is_initialized = 1;
1929 
1930 	return (0);
1931 }
1932 
1933 static void
xgell_initiate_stop(xgelldev_t * lldev)1934 xgell_initiate_stop(xgelldev_t *lldev)
1935 {
1936 	xge_hal_status_e status;
1937 
1938 	lldev->is_initialized = 0;
1939 
1940 	status = xge_hal_device_disable(lldev->devh);
1941 	if (status != XGE_HAL_OK) {
1942 		u64 adapter_status;
1943 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1944 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1945 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1946 		    XGELL_IFNAME, lldev->instance,
1947 		    (uint64_t)adapter_status, status);
1948 	}
1949 	xge_hal_device_intr_disable(lldev->devh);
1950 	/* disable OS ISR's */
1951 	xge_disable_intrs(lldev);
1952 
1953 	xge_debug_ll(XGE_TRACE, "%s",
1954 	    "waiting for device irq to become quiescent...");
1955 	xge_os_mdelay(1500);
1956 
1957 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1958 
1959 	xgell_rx_close(lldev);
1960 	xgell_tx_close(lldev);
1961 }
1962 
1963 /*
1964  * xgell_m_start
1965  * @arg: pointer to device private strucutre(hldev)
1966  *
1967  * This function is called by MAC Layer to enable the XFRAME
1968  * firmware to generate interrupts and also prepare the
1969  * driver to call mac_rx for delivering receive packets
1970  * to MAC Layer.
1971  */
1972 static int
xgell_m_start(void * arg)1973 xgell_m_start(void *arg)
1974 {
1975 	xgelldev_t *lldev = arg;
1976 	xge_hal_device_t *hldev = lldev->devh;
1977 	int ret;
1978 
1979 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1980 	    lldev->instance);
1981 
1982 	mutex_enter(&lldev->genlock);
1983 
1984 	if (lldev->is_initialized) {
1985 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1986 		    XGELL_IFNAME, lldev->instance);
1987 		mutex_exit(&lldev->genlock);
1988 		return (EINVAL);
1989 	}
1990 
1991 	hldev->terminating = 0;
1992 	if (ret = xgell_initiate_start(lldev)) {
1993 		mutex_exit(&lldev->genlock);
1994 		return (ret);
1995 	}
1996 
1997 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1998 
1999 	mutex_exit(&lldev->genlock);
2000 
2001 	return (0);
2002 }
2003 
2004 /*
2005  * xgell_m_stop
2006  * @arg: pointer to device private data (hldev)
2007  *
2008  * This function is called by the MAC Layer to disable
2009  * the XFRAME firmware for generating any interrupts and
2010  * also stop the driver from calling mac_rx() for
2011  * delivering data packets to the MAC Layer.
2012  */
2013 static void
xgell_m_stop(void * arg)2014 xgell_m_stop(void *arg)
2015 {
2016 	xgelldev_t *lldev = arg;
2017 	xge_hal_device_t *hldev = lldev->devh;
2018 
2019 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
2020 
2021 	mutex_enter(&lldev->genlock);
2022 	if (!lldev->is_initialized) {
2023 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
2024 		mutex_exit(&lldev->genlock);
2025 		return;
2026 	}
2027 
2028 	xge_hal_device_terminating(hldev);
2029 	xgell_initiate_stop(lldev);
2030 
2031 	/* reset device */
2032 	(void) xge_hal_device_reset(lldev->devh);
2033 
2034 	mutex_exit(&lldev->genlock);
2035 
2036 	if (lldev->timeout_id != 0) {
2037 		(void) untimeout(lldev->timeout_id);
2038 	}
2039 
2040 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
2041 }
2042 
2043 /*
2044  * xgell_onerr_reset
2045  * @lldev: pointer to xgelldev_t structure
2046  *
2047  * This function is called by HAL Event framework to reset the HW
2048  * This function is must be called with genlock taken.
2049  */
2050 int
xgell_onerr_reset(xgelldev_t * lldev)2051 xgell_onerr_reset(xgelldev_t *lldev)
2052 {
2053 	int rc = 0;
2054 
2055 	if (!lldev->is_initialized) {
2056 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
2057 		    XGELL_IFNAME, lldev->instance);
2058 		return (rc);
2059 	}
2060 
2061 	lldev->in_reset = 1;
2062 	xgell_initiate_stop(lldev);
2063 
2064 	/* reset device */
2065 	(void) xge_hal_device_reset(lldev->devh);
2066 
2067 	rc = xgell_initiate_start(lldev);
2068 	lldev->in_reset = 0;
2069 
2070 	return (rc);
2071 }
2072 
2073 /*
2074  * xgell_m_multicst
2075  * @arg: pointer to device private strucutre(hldev)
2076  * @add:
2077  * @mc_addr:
2078  *
2079  * This function is called by MAC Layer to enable or
2080  * disable device-level reception of specific multicast addresses.
2081  */
2082 static int
xgell_m_multicst(void * arg,boolean_t add,const uint8_t * mc_addr)2083 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
2084 {
2085 	xge_hal_status_e status;
2086 	xgelldev_t *lldev = (xgelldev_t *)arg;
2087 	xge_hal_device_t *hldev = lldev->devh;
2088 
2089 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
2090 
2091 	mutex_enter(&lldev->genlock);
2092 
2093 	if (!lldev->is_initialized) {
2094 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
2095 		    XGELL_IFNAME, lldev->instance);
2096 		mutex_exit(&lldev->genlock);
2097 		return (EIO);
2098 	}
2099 
2100 	/* FIXME: missing HAL functionality: enable_one() */
2101 
2102 	status = (add) ?
2103 	    xge_hal_device_mcast_enable(hldev) :
2104 	    xge_hal_device_mcast_disable(hldev);
2105 
2106 	if (status != XGE_HAL_OK) {
2107 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
2108 		    add ? "enable" : "disable", status);
2109 		mutex_exit(&lldev->genlock);
2110 		return (EIO);
2111 	}
2112 
2113 	mutex_exit(&lldev->genlock);
2114 
2115 	return (0);
2116 }
2117 
2118 
2119 /*
2120  * xgell_m_promisc
2121  * @arg: pointer to device private strucutre(hldev)
2122  * @on:
2123  *
2124  * This function is called by MAC Layer to enable or
2125  * disable the reception of all the packets on the medium
2126  */
2127 static int
xgell_m_promisc(void * arg,boolean_t on)2128 xgell_m_promisc(void *arg, boolean_t on)
2129 {
2130 	xgelldev_t *lldev = (xgelldev_t *)arg;
2131 	xge_hal_device_t *hldev = lldev->devh;
2132 
2133 	mutex_enter(&lldev->genlock);
2134 
2135 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
2136 
2137 	if (!lldev->is_initialized) {
2138 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
2139 		    XGELL_IFNAME, lldev->instance);
2140 		mutex_exit(&lldev->genlock);
2141 		return (EIO);
2142 	}
2143 
2144 	if (on) {
2145 		xge_hal_device_promisc_enable(hldev);
2146 	} else {
2147 		xge_hal_device_promisc_disable(hldev);
2148 	}
2149 
2150 	mutex_exit(&lldev->genlock);
2151 
2152 	return (0);
2153 }
2154 
2155 /*
2156  * xgell_m_stat
2157  * @arg: pointer to device private strucutre(hldev)
2158  *
2159  * This function is called by MAC Layer to get network statistics
2160  * from the driver.
2161  */
2162 static int
xgell_m_stat(void * arg,uint_t stat,uint64_t * val)2163 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
2164 {
2165 	xge_hal_stats_hw_info_t *hw_info;
2166 	xgelldev_t *lldev = (xgelldev_t *)arg;
2167 	xge_hal_device_t *hldev = lldev->devh;
2168 
2169 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
2170 
2171 	mutex_enter(&lldev->genlock);
2172 
2173 	if (!lldev->is_initialized) {
2174 		mutex_exit(&lldev->genlock);
2175 		return (EAGAIN);
2176 	}
2177 
2178 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
2179 		mutex_exit(&lldev->genlock);
2180 		return (EAGAIN);
2181 	}
2182 
2183 	switch (stat) {
2184 	case MAC_STAT_IFSPEED:
2185 		*val = 10000000000ull; /* 10G */
2186 		break;
2187 
2188 	case MAC_STAT_MULTIRCV:
2189 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
2190 		    hw_info->rmac_vld_mcst_frms;
2191 		break;
2192 
2193 	case MAC_STAT_BRDCSTRCV:
2194 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
2195 		    hw_info->rmac_vld_bcst_frms;
2196 		break;
2197 
2198 	case MAC_STAT_MULTIXMT:
2199 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
2200 		    hw_info->tmac_mcst_frms;
2201 		break;
2202 
2203 	case MAC_STAT_BRDCSTXMT:
2204 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
2205 		    hw_info->tmac_bcst_frms;
2206 		break;
2207 
2208 	case MAC_STAT_RBYTES:
2209 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
2210 		    hw_info->rmac_ttl_octets;
2211 		break;
2212 
2213 	case MAC_STAT_NORCVBUF:
2214 		*val = hw_info->rmac_drop_frms;
2215 		break;
2216 
2217 	case MAC_STAT_IERRORS:
2218 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
2219 		    hw_info->rmac_discarded_frms;
2220 		break;
2221 
2222 	case MAC_STAT_OBYTES:
2223 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
2224 		    hw_info->tmac_ttl_octets;
2225 		break;
2226 
2227 	case MAC_STAT_NOXMTBUF:
2228 		*val = hw_info->tmac_drop_frms;
2229 		break;
2230 
2231 	case MAC_STAT_OERRORS:
2232 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
2233 		    hw_info->tmac_any_err_frms;
2234 		break;
2235 
2236 	case MAC_STAT_IPACKETS:
2237 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
2238 		    hw_info->rmac_vld_frms;
2239 		break;
2240 
2241 	case MAC_STAT_OPACKETS:
2242 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
2243 		    hw_info->tmac_frms;
2244 		break;
2245 
2246 	case ETHER_STAT_FCS_ERRORS:
2247 		*val = hw_info->rmac_fcs_err_frms;
2248 		break;
2249 
2250 	case ETHER_STAT_TOOLONG_ERRORS:
2251 		*val = hw_info->rmac_long_frms;
2252 		break;
2253 
2254 	case ETHER_STAT_LINK_DUPLEX:
2255 		*val = LINK_DUPLEX_FULL;
2256 		break;
2257 
2258 	default:
2259 		mutex_exit(&lldev->genlock);
2260 		return (ENOTSUP);
2261 	}
2262 
2263 	mutex_exit(&lldev->genlock);
2264 
2265 	return (0);
2266 }
2267 
2268 /*
2269  * Retrieve a value for one of the statistics for a particular rx ring
2270  */
2271 int
xgell_rx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)2272 xgell_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2273 {
2274 	xgell_rx_ring_t	*rx_ring = (xgell_rx_ring_t *)rh;
2275 
2276 	switch (stat) {
2277 	case MAC_STAT_RBYTES:
2278 		*val = rx_ring->rx_bytes;
2279 		break;
2280 
2281 	case MAC_STAT_IPACKETS:
2282 		*val = rx_ring->rx_pkts;
2283 		break;
2284 
2285 	default:
2286 		*val = 0;
2287 		return (ENOTSUP);
2288 	}
2289 
2290 	return (0);
2291 }
2292 
2293 /*
2294  * Retrieve a value for one of the statistics for a particular tx ring
2295  */
2296 int
xgell_tx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)2297 xgell_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2298 {
2299 	xgell_tx_ring_t	*tx_ring = (xgell_tx_ring_t *)rh;
2300 
2301 	switch (stat) {
2302 	case MAC_STAT_OBYTES:
2303 		*val = tx_ring->tx_bytes;
2304 		break;
2305 
2306 	case MAC_STAT_OPACKETS:
2307 		*val = tx_ring->tx_pkts;
2308 		break;
2309 
2310 	default:
2311 		*val = 0;
2312 		return (ENOTSUP);
2313 	}
2314 
2315 	return (0);
2316 }
2317 
2318 /*
2319  * xgell_device_alloc - Allocate new LL device
2320  */
2321 int
xgell_device_alloc(xge_hal_device_h devh,dev_info_t * dev_info,xgelldev_t ** lldev_out)2322 xgell_device_alloc(xge_hal_device_h devh,
2323     dev_info_t *dev_info, xgelldev_t **lldev_out)
2324 {
2325 	xgelldev_t *lldev;
2326 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2327 	int instance = ddi_get_instance(dev_info);
2328 
2329 	*lldev_out = NULL;
2330 
2331 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
2332 	    XGELL_IFNAME, instance);
2333 
2334 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
2335 
2336 	lldev->devh = hldev;
2337 	lldev->instance = instance;
2338 	lldev->dev_info = dev_info;
2339 
2340 	*lldev_out = lldev;
2341 
2342 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
2343 
2344 	return (DDI_SUCCESS);
2345 }
2346 
2347 /*
2348  * xgell_device_free
2349  */
2350 void
xgell_device_free(xgelldev_t * lldev)2351 xgell_device_free(xgelldev_t *lldev)
2352 {
2353 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
2354 	    XGELL_IFNAME, lldev->instance);
2355 
2356 	kmem_free(lldev, sizeof (xgelldev_t));
2357 }
2358 
2359 /*
2360  * xgell_ioctl
2361  */
2362 static void
xgell_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2363 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2364 {
2365 	xgelldev_t *lldev = arg;
2366 	struct iocblk *iocp;
2367 	int err = 0;
2368 	int cmd;
2369 	int need_privilege = 1;
2370 	int ret = 0;
2371 
2372 
2373 	iocp = (struct iocblk *)mp->b_rptr;
2374 	iocp->ioc_error = 0;
2375 	cmd = iocp->ioc_cmd;
2376 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
2377 	switch (cmd) {
2378 	case ND_GET:
2379 		need_privilege = 0;
2380 		/* FALLTHRU */
2381 	case ND_SET:
2382 		break;
2383 	default:
2384 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
2385 		miocnak(wq, mp, 0, EINVAL);
2386 		return;
2387 	}
2388 
2389 	if (need_privilege) {
2390 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2391 		if (err != 0) {
2392 			xge_debug_ll(XGE_ERR,
2393 			    "drv_priv(): rejected cmd 0x%x, err %d",
2394 			    cmd, err);
2395 			miocnak(wq, mp, 0, err);
2396 			return;
2397 		}
2398 	}
2399 
2400 	switch (cmd) {
2401 	case ND_GET:
2402 		/*
2403 		 * If nd_getset() returns B_FALSE, the command was
2404 		 * not valid (e.g. unknown name), so we just tell the
2405 		 * top-level ioctl code to send a NAK (with code EINVAL).
2406 		 *
2407 		 * Otherwise, nd_getset() will have built the reply to
2408 		 * be sent (but not actually sent it), so we tell the
2409 		 * caller to send the prepared reply.
2410 		 */
2411 		ret = nd_getset(wq, lldev->ndp, mp);
2412 		xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2413 		break;
2414 
2415 	case ND_SET:
2416 		ret = nd_getset(wq, lldev->ndp, mp);
2417 		xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2418 		break;
2419 
2420 	default:
2421 		break;
2422 	}
2423 
2424 	if (ret == B_FALSE) {
2425 		xge_debug_ll(XGE_ERR,
2426 		    "nd_getset(): rejected cmd 0x%x, err %d",
2427 		    cmd, err);
2428 		miocnak(wq, mp, 0, EINVAL);
2429 	} else {
2430 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
2431 		    M_IOCACK : M_IOCNAK;
2432 		qreply(wq, mp);
2433 	}
2434 }
2435 
2436 
2437 static boolean_t
xgell_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)2438 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2439 {
2440 	xgelldev_t *lldev = arg;
2441 
2442 	xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
2443 
2444 	switch (cap) {
2445 	case MAC_CAPAB_HCKSUM: {
2446 		uint32_t *hcksum_txflags = cap_data;
2447 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2448 		    HCKSUM_IPHDRCKSUM;
2449 		break;
2450 	}
2451 	case MAC_CAPAB_LSO: {
2452 		mac_capab_lso_t *cap_lso = cap_data;
2453 
2454 		if (lldev->config.lso_enable) {
2455 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2456 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
2457 			break;
2458 		} else {
2459 			return (B_FALSE);
2460 		}
2461 	}
2462 	case MAC_CAPAB_RINGS: {
2463 		mac_capab_rings_t *cap_rings = cap_data;
2464 
2465 		switch (cap_rings->mr_type) {
2466 		case MAC_RING_TYPE_RX:
2467 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2468 			cap_rings->mr_rnum = lldev->init_rx_rings;
2469 			cap_rings->mr_gnum = lldev->init_rx_groups;
2470 			cap_rings->mr_rget = xgell_fill_ring;
2471 			cap_rings->mr_gget = xgell_fill_group;
2472 			break;
2473 		case MAC_RING_TYPE_TX:
2474 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2475 			cap_rings->mr_rnum = lldev->init_tx_rings;
2476 			cap_rings->mr_gnum = 0;
2477 			cap_rings->mr_rget = xgell_fill_ring;
2478 			cap_rings->mr_gget = NULL;
2479 			break;
2480 		default:
2481 			break;
2482 		}
2483 		break;
2484 	}
2485 	default:
2486 		return (B_FALSE);
2487 	}
2488 	return (B_TRUE);
2489 }
2490 
2491 static int
xgell_stats_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2492 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2493 {
2494 	xgelldev_t *lldev = (xgelldev_t *)cp;
2495 	xge_hal_status_e status;
2496 	int count = 0, retsize;
2497 	char *buf;
2498 
2499 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2500 	if (buf == NULL) {
2501 		return (ENOSPC);
2502 	}
2503 
2504 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2505 	    buf, &retsize);
2506 	if (status != XGE_HAL_OK) {
2507 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2508 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2509 		return (EINVAL);
2510 	}
2511 	count += retsize;
2512 
2513 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2514 	    XGELL_STATS_BUFSIZE - count,
2515 	    buf+count, &retsize);
2516 	if (status != XGE_HAL_OK) {
2517 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2518 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2519 		return (EINVAL);
2520 	}
2521 	count += retsize;
2522 
2523 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2524 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2525 	if (status != XGE_HAL_OK) {
2526 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2527 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2528 		return (EINVAL);
2529 	}
2530 	count += retsize;
2531 
2532 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2533 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2534 	if (status != XGE_HAL_OK) {
2535 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2536 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2537 		return (EINVAL);
2538 	}
2539 	count += retsize;
2540 
2541 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2542 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2543 	if (status != XGE_HAL_OK) {
2544 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2545 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2546 		return (EINVAL);
2547 	}
2548 	count += retsize;
2549 
2550 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2551 	(void) mi_mpprintf(mp, "%s", buf);
2552 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2553 
2554 	return (0);
2555 }
2556 
2557 static int
xgell_pciconf_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2558 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2559 {
2560 	xgelldev_t *lldev = (xgelldev_t *)cp;
2561 	xge_hal_status_e status;
2562 	int retsize;
2563 	char *buf;
2564 
2565 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2566 	if (buf == NULL) {
2567 		return (ENOSPC);
2568 	}
2569 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2570 	    buf, &retsize);
2571 	if (status != XGE_HAL_OK) {
2572 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2573 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2574 		return (EINVAL);
2575 	}
2576 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2577 	(void) mi_mpprintf(mp, "%s", buf);
2578 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2579 
2580 	return (0);
2581 }
2582 
2583 static int
xgell_about_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2584 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2585 {
2586 	xgelldev_t *lldev = (xgelldev_t *)cp;
2587 	xge_hal_status_e status;
2588 	int retsize;
2589 	char *buf;
2590 
2591 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2592 	if (buf == NULL) {
2593 		return (ENOSPC);
2594 	}
2595 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2596 	    buf, &retsize);
2597 	if (status != XGE_HAL_OK) {
2598 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2599 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2600 		return (EINVAL);
2601 	}
2602 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2603 	(void) mi_mpprintf(mp, "%s", buf);
2604 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2605 
2606 	return (0);
2607 }
2608 
2609 static unsigned long bar0_offset = 0x110; /* adapter_control */
2610 
2611 static int
xgell_bar0_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2612 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2613 {
2614 	xgelldev_t *lldev = (xgelldev_t *)cp;
2615 	xge_hal_status_e status;
2616 	int retsize;
2617 	char *buf;
2618 
2619 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2620 	if (buf == NULL) {
2621 		return (ENOSPC);
2622 	}
2623 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2624 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2625 	if (status != XGE_HAL_OK) {
2626 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2627 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2628 		return (EINVAL);
2629 	}
2630 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2631 	(void) mi_mpprintf(mp, "%s", buf);
2632 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2633 
2634 	return (0);
2635 }
2636 
2637 static int
xgell_bar0_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2638 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2639 {
2640 	unsigned long old_offset = bar0_offset;
2641 	char *end;
2642 
2643 	if (value && *value == '0' &&
2644 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2645 		value += 2;
2646 	}
2647 
2648 	bar0_offset = mi_strtol(value, &end, 16);
2649 	if (end == value) {
2650 		bar0_offset = old_offset;
2651 		return (EINVAL);
2652 	}
2653 
2654 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2655 
2656 	return (0);
2657 }
2658 
2659 static int
xgell_debug_level_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2660 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2661 {
2662 	char *buf;
2663 
2664 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2665 	if (buf == NULL) {
2666 		return (ENOSPC);
2667 	}
2668 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2669 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2670 
2671 	return (0);
2672 }
2673 
2674 static int
xgell_debug_level_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2675 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2676     cred_t *credp)
2677 {
2678 	int level;
2679 	char *end;
2680 
2681 	level = mi_strtol(value, &end, 10);
2682 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2683 		return (EINVAL);
2684 	}
2685 
2686 	xge_hal_driver_debug_level_set(level);
2687 
2688 	return (0);
2689 }
2690 
2691 static int
xgell_debug_module_mask_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2692 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2693 {
2694 	char *buf;
2695 
2696 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2697 	if (buf == NULL) {
2698 		return (ENOSPC);
2699 	}
2700 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2701 	    xge_hal_driver_debug_module_mask());
2702 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2703 
2704 	return (0);
2705 }
2706 
2707 static int
xgell_debug_module_mask_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2708 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2709 			    cred_t *credp)
2710 {
2711 	u32 mask;
2712 	char *end;
2713 
2714 	if (value && *value == '0' &&
2715 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2716 		value += 2;
2717 	}
2718 
2719 	mask = mi_strtol(value, &end, 16);
2720 	if (end == value) {
2721 		return (EINVAL);
2722 	}
2723 
2724 	xge_hal_driver_debug_module_mask_set(mask);
2725 
2726 	return (0);
2727 }
2728 
2729 static int
xgell_devconfig_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2730 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2731 {
2732 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2733 	xge_hal_status_e status;
2734 	int retsize;
2735 	char *buf;
2736 
2737 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2738 	if (buf == NULL) {
2739 		return (ENOSPC);
2740 	}
2741 	status = xge_hal_aux_device_config_read(lldev->devh,
2742 	    XGELL_DEVCONF_BUFSIZE, buf, &retsize);
2743 	if (status != XGE_HAL_OK) {
2744 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2745 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2746 		    status);
2747 		return (EINVAL);
2748 	}
2749 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2750 	(void) mi_mpprintf(mp, "%s", buf);
2751 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2752 
2753 	return (0);
2754 }
2755 
2756 /*
2757  * xgell_device_register
2758  * @devh: pointer on HAL device
2759  * @config: pointer on this network device configuration
2760  * @ll_out: output pointer. Will be assigned to valid LL device.
2761  *
2762  * This function will allocate and register network device
2763  */
2764 int
xgell_device_register(xgelldev_t * lldev,xgell_config_t * config)2765 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2766 {
2767 	mac_register_t *macp = NULL;
2768 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2769 
2770 	/*
2771 	 * Initialize some NDD interface for internal debug.
2772 	 */
2773 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2774 	    (caddr_t)lldev) == B_FALSE)
2775 		goto xgell_ndd_fail;
2776 
2777 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2778 	    (caddr_t)lldev) == B_FALSE)
2779 		goto xgell_ndd_fail;
2780 
2781 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2782 	    (caddr_t)lldev) == B_FALSE)
2783 		goto xgell_ndd_fail;
2784 
2785 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2786 	    (caddr_t)lldev) == B_FALSE)
2787 		goto xgell_ndd_fail;
2788 
2789 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2790 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2791 		goto xgell_ndd_fail;
2792 
2793 	if (nd_load(&lldev->ndp, "debug_module_mask",
2794 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2795 	    (caddr_t)lldev) == B_FALSE)
2796 		goto xgell_ndd_fail;
2797 
2798 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2799 	    (caddr_t)lldev) == B_FALSE)
2800 		goto xgell_ndd_fail;
2801 
2802 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2803 
2804 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
2805 	    DDI_INTR_PRI(hldev->irqh));
2806 
2807 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2808 		goto xgell_register_fail;
2809 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2810 	macp->m_driver = lldev;
2811 	macp->m_dip = lldev->dev_info;
2812 	macp->m_src_addr = hldev->macaddr[0];
2813 	macp->m_callbacks = &xgell_m_callbacks;
2814 	macp->m_min_sdu = 0;
2815 	macp->m_max_sdu = hldev->config.mtu;
2816 	macp->m_margin = VLAN_TAGSZ;
2817 	macp->m_v12n = MAC_VIRT_LEVEL1;
2818 
2819 	/*
2820 	 * MAC Registration.
2821 	 */
2822 	if (mac_register(macp, &lldev->mh) != 0)
2823 		goto xgell_register_fail;
2824 
2825 	/* Always free the macp after register */
2826 	if (macp != NULL)
2827 		mac_free(macp);
2828 
2829 	/* Calculate tx_copied_max here ??? */
2830 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2831 	    hldev->config.fifo.alignment_size *
2832 	    hldev->config.fifo.max_aligned_frags;
2833 
2834 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2835 	    XGELL_IFNAME, lldev->instance);
2836 
2837 	return (DDI_SUCCESS);
2838 
2839 xgell_ndd_fail:
2840 	nd_free(&lldev->ndp);
2841 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2842 	return (DDI_FAILURE);
2843 
2844 xgell_register_fail:
2845 	if (macp != NULL)
2846 		mac_free(macp);
2847 	nd_free(&lldev->ndp);
2848 	mutex_destroy(&lldev->genlock);
2849 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2850 	return (DDI_FAILURE);
2851 }
2852 
2853 /*
2854  * xgell_device_unregister
2855  * @devh: pointer on HAL device
2856  * @lldev: pointer to valid LL device.
2857  *
2858  * This function will unregister and free network device
2859  */
2860 int
xgell_device_unregister(xgelldev_t * lldev)2861 xgell_device_unregister(xgelldev_t *lldev)
2862 {
2863 	if (mac_unregister(lldev->mh) != 0) {
2864 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2865 		    XGELL_IFNAME, lldev->instance);
2866 		return (DDI_FAILURE);
2867 	}
2868 
2869 	mutex_destroy(&lldev->genlock);
2870 
2871 	nd_free(&lldev->ndp);
2872 
2873 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2874 	    XGELL_IFNAME, lldev->instance);
2875 
2876 	return (DDI_SUCCESS);
2877 }
2878