xref: /titanic_51/usr/src/uts/common/io/xge/drv/xgell.c (revision bfed486ad8de8b8ebc6345a8e10accae08bf2f45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *  Copyright (c) 2002-2008 Neterion, Inc.
29  *  All right Reserved.
30  *
31  *  FileName :    xgell.c
32  *
33  *  Description:  Xge Link Layer data path implementation
34  *
35  */
36 
37 #include "xgell.h"
38 
39 #include <netinet/ip.h>
40 #include <netinet/tcp.h>
41 #include <netinet/udp.h>
42 
43 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
44     sizeof (struct ether_vlan_header))
45 
46 #define	HEADROOM		2	/* for DIX-only packets */
47 
48 void header_free_func(void *arg) { }
49 frtn_t header_frtn = {header_free_func, NULL};
50 
51 /* DMA attributes used for Tx side */
52 static struct ddi_dma_attr tx_dma_attr = {
53 	DMA_ATTR_V0,			/* dma_attr_version */
54 	0x0ULL,				/* dma_attr_addr_lo */
55 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
56 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
57 #if defined(__sparc)
58 	0x2000,				/* dma_attr_align */
59 #else
60 	0x1000,				/* dma_attr_align */
61 #endif
62 	0xFC00FC,			/* dma_attr_burstsizes */
63 	0x1,				/* dma_attr_minxfer */
64 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
65 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
66 	18,				/* dma_attr_sgllen */
67 	(unsigned int)1,		/* dma_attr_granular */
68 	0				/* dma_attr_flags */
69 };
70 
71 /*
72  * DMA attributes used when using ddi_dma_mem_alloc to
73  * allocat HAL descriptors and Rx buffers during replenish
74  */
75 static struct ddi_dma_attr hal_dma_attr = {
76 	DMA_ATTR_V0,			/* dma_attr_version */
77 	0x0ULL,				/* dma_attr_addr_lo */
78 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
79 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
80 #if defined(__sparc)
81 	0x2000,				/* dma_attr_align */
82 #else
83 	0x1000,				/* dma_attr_align */
84 #endif
85 	0xFC00FC,			/* dma_attr_burstsizes */
86 	0x1,				/* dma_attr_minxfer */
87 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
88 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
89 	1,				/* dma_attr_sgllen */
90 	(unsigned int)1,		/* dma_attr_sgllen */
91 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
92 };
93 
94 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
95 
96 static int		xgell_m_stat(void *, uint_t, uint64_t *);
97 static int		xgell_m_start(void *);
98 static void		xgell_m_stop(void *);
99 static int		xgell_m_promisc(void *, boolean_t);
100 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
101 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
102 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
103 
104 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
105 
106 static mac_callbacks_t xgell_m_callbacks = {
107 	XGELL_M_CALLBACK_FLAGS,
108 	xgell_m_stat,
109 	xgell_m_start,
110 	xgell_m_stop,
111 	xgell_m_promisc,
112 	xgell_m_multicst,
113 	NULL,
114 	NULL,
115 	xgell_m_ioctl,
116 	xgell_m_getcapab
117 };
118 
119 /*
120  * xge_device_poll
121  *
122  * Timeout should call me every 1s. xge_callback_event_queued should call me
123  * when HAL hope event was rescheduled.
124  */
125 /*ARGSUSED*/
126 void
127 xge_device_poll(void *data)
128 {
129 	xgelldev_t *lldev = xge_hal_device_private(data);
130 
131 	mutex_enter(&lldev->genlock);
132 	if (lldev->is_initialized) {
133 		xge_hal_device_poll(data);
134 		lldev->timeout_id = timeout(xge_device_poll, data,
135 		    XGE_DEV_POLL_TICKS);
136 	} else if (lldev->in_reset == 1) {
137 		lldev->timeout_id = timeout(xge_device_poll, data,
138 		    XGE_DEV_POLL_TICKS);
139 	} else {
140 		lldev->timeout_id = 0;
141 	}
142 	mutex_exit(&lldev->genlock);
143 }
144 
145 /*
146  * xge_device_poll_now
147  *
148  * Will call xge_device_poll() immediately
149  */
150 void
151 xge_device_poll_now(void *data)
152 {
153 	xgelldev_t *lldev = xge_hal_device_private(data);
154 
155 	mutex_enter(&lldev->genlock);
156 	if (lldev->is_initialized) {
157 		xge_hal_device_poll(data);
158 	}
159 	mutex_exit(&lldev->genlock);
160 }
161 
162 /*
163  * xgell_callback_link_up
164  *
165  * This function called by HAL to notify HW link up state change.
166  */
167 void
168 xgell_callback_link_up(void *userdata)
169 {
170 	xgelldev_t *lldev = (xgelldev_t *)userdata;
171 
172 	mac_link_update(lldev->mh, LINK_STATE_UP);
173 }
174 
175 /*
176  * xgell_callback_link_down
177  *
178  * This function called by HAL to notify HW link down state change.
179  */
180 void
181 xgell_callback_link_down(void *userdata)
182 {
183 	xgelldev_t *lldev = (xgelldev_t *)userdata;
184 
185 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
186 }
187 
188 /*
189  * xgell_rx_buffer_replenish_all
190  *
191  * To replenish all freed dtr(s) with buffers in free pool. It's called by
192  * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
193  * Must be called with pool_lock held.
194  */
195 static void
196 xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
197 {
198 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
199 	xge_hal_dtr_h dtr;
200 	xgell_rx_buffer_t *rx_buffer;
201 	xgell_rxd_priv_t *rxd_priv;
202 
203 	xge_assert(mutex_owned(&bf_pool->pool_lock));
204 
205 	while ((bf_pool->free > 0) &&
206 	    (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
207 		xge_assert(bf_pool->head);
208 
209 		rx_buffer = bf_pool->head;
210 
211 		bf_pool->head = rx_buffer->next;
212 		bf_pool->free--;
213 
214 		xge_assert(rx_buffer->dma_addr);
215 
216 		rxd_priv = (xgell_rxd_priv_t *)
217 		    xge_hal_ring_dtr_private(ring->channelh, dtr);
218 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
219 		    bf_pool->size);
220 
221 		rxd_priv->rx_buffer = rx_buffer;
222 		xge_hal_ring_dtr_post(ring->channelh, dtr);
223 	}
224 }
225 
226 /*
227  * xgell_rx_buffer_release
228  *
229  * The only thing done here is to put the buffer back to the pool.
230  * Calling this function need be protected by mutex, bf_pool.pool_lock.
231  */
232 static void
233 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
234 {
235 	xgell_rx_ring_t *ring = rx_buffer->ring;
236 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
237 
238 	xge_assert(mutex_owned(&bf_pool->pool_lock));
239 
240 	/* Put the buffer back to pool */
241 	rx_buffer->next = bf_pool->head;
242 	bf_pool->head = rx_buffer;
243 
244 	bf_pool->free++;
245 }
246 
247 /*
248  * xgell_rx_buffer_recycle
249  *
250  * Called by desballoc() to "free" the resource.
251  * We will try to replenish all descripters.
252  */
253 
254 /*
255  * Previously there were much lock contention between xgell_rx_1b_compl() and
256  * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
257  * effect on rx performance. A separate recycle list is introduced to overcome
258  * this. The recycle list is used to record the rx buffer that has been recycled
259  * and these buffers will be retuned back to the free list in bulk instead of
260  * one-by-one.
261  */
262 
263 static void
264 xgell_rx_buffer_recycle(char *arg)
265 {
266 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
267 	xgell_rx_ring_t *ring = rx_buffer->ring;
268 	xgelldev_t *lldev = ring->lldev;
269 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
270 
271 	mutex_enter(&bf_pool->recycle_lock);
272 
273 	rx_buffer->next = bf_pool->recycle_head;
274 	bf_pool->recycle_head = rx_buffer;
275 	if (bf_pool->recycle_tail == NULL)
276 		bf_pool->recycle_tail = rx_buffer;
277 	bf_pool->recycle++;
278 
279 	/*
280 	 * Before finding a good way to set this hiwat, just always call to
281 	 * replenish_all. *TODO*
282 	 */
283 	if ((lldev->is_initialized != 0) && (ring->live) &&
284 	    (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
285 		mutex_enter(&bf_pool->pool_lock);
286 		bf_pool->recycle_tail->next = bf_pool->head;
287 		bf_pool->head = bf_pool->recycle_head;
288 		bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
289 		bf_pool->post -= bf_pool->recycle;
290 		bf_pool->free += bf_pool->recycle;
291 		bf_pool->recycle = 0;
292 		xgell_rx_buffer_replenish_all(ring);
293 		mutex_exit(&bf_pool->pool_lock);
294 	}
295 
296 	mutex_exit(&bf_pool->recycle_lock);
297 }
298 
299 /*
300  * xgell_rx_buffer_alloc
301  *
302  * Allocate one rx buffer and return with the pointer to the buffer.
303  * Return NULL if failed.
304  */
305 static xgell_rx_buffer_t *
306 xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
307 {
308 	xgelldev_t *lldev = ring->lldev;
309 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
310 	xge_hal_device_t *hldev;
311 	void *vaddr;
312 	ddi_dma_handle_t dma_handle;
313 	ddi_acc_handle_t dma_acch;
314 	dma_addr_t dma_addr;
315 	uint_t ncookies;
316 	ddi_dma_cookie_t dma_cookie;
317 	size_t real_size;
318 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
319 	xgell_rx_buffer_t *rx_buffer;
320 
321 	hldev = (xge_hal_device_t *)lldev->devh;
322 
323 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
324 	    0, &dma_handle) != DDI_SUCCESS) {
325 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
326 		    XGELL_IFNAME, lldev->instance);
327 		goto handle_failed;
328 	}
329 
330 	/* reserve some space at the end of the buffer for recycling */
331 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
332 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
333 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
334 	    DDI_SUCCESS) {
335 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
336 		    XGELL_IFNAME, lldev->instance);
337 		goto mem_failed;
338 	}
339 
340 	if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
341 	    real_size) {
342 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
343 		    XGELL_IFNAME, lldev->instance);
344 		goto bind_failed;
345 	}
346 
347 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
348 	    bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
349 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
350 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
351 		    XGELL_IFNAME, lldev->instance);
352 		goto bind_failed;
353 	}
354 
355 	if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
356 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
357 		    XGELL_IFNAME, lldev->instance);
358 		goto check_failed;
359 	}
360 
361 	dma_addr = dma_cookie.dmac_laddress;
362 
363 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
364 	    sizeof (xgell_rx_buffer_t));
365 	rx_buffer->next = NULL;
366 	rx_buffer->vaddr = vaddr;
367 	rx_buffer->dma_addr = dma_addr;
368 	rx_buffer->dma_handle = dma_handle;
369 	rx_buffer->dma_acch = dma_acch;
370 	rx_buffer->ring = ring;
371 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
372 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
373 
374 	return (rx_buffer);
375 
376 check_failed:
377 	(void) ddi_dma_unbind_handle(dma_handle);
378 bind_failed:
379 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
380 	ddi_dma_mem_free(&dma_acch);
381 mem_failed:
382 	ddi_dma_free_handle(&dma_handle);
383 handle_failed:
384 
385 	return (NULL);
386 }
387 
388 /*
389  * xgell_rx_destroy_buffer_pool
390  *
391  * Destroy buffer pool. If there is still any buffer hold by upper layer,
392  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
393  */
394 static boolean_t
395 xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
396 {
397 	xgelldev_t *lldev = ring->lldev;
398 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
399 	xgell_rx_buffer_t *rx_buffer;
400 	ddi_dma_handle_t  dma_handle;
401 	ddi_acc_handle_t  dma_acch;
402 	int i;
403 
404 	/*
405 	 * If the pool has been destroied, just return B_TRUE
406 	 */
407 	if (!bf_pool->live)
408 		return (B_TRUE);
409 
410 	mutex_enter(&bf_pool->recycle_lock);
411 	if (bf_pool->recycle > 0) {
412 		mutex_enter(&bf_pool->pool_lock);
413 		bf_pool->recycle_tail->next = bf_pool->head;
414 		bf_pool->head = bf_pool->recycle_head;
415 		bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
416 		bf_pool->post -= bf_pool->recycle;
417 		bf_pool->free += bf_pool->recycle;
418 		bf_pool->recycle = 0;
419 		mutex_exit(&bf_pool->pool_lock);
420 	}
421 	mutex_exit(&bf_pool->recycle_lock);
422 
423 	/*
424 	 * If there is any posted buffer, the driver should reject to be
425 	 * detached. Need notice upper layer to release them.
426 	 */
427 	if (bf_pool->post != 0) {
428 		xge_debug_ll(XGE_ERR,
429 		    "%s%d has some buffers not be recycled, try later!",
430 		    XGELL_IFNAME, lldev->instance);
431 		return (B_FALSE);
432 	}
433 
434 	/*
435 	 * Release buffers one by one.
436 	 */
437 	for (i = bf_pool->total; i > 0; i--) {
438 		rx_buffer = bf_pool->head;
439 		xge_assert(rx_buffer != NULL);
440 
441 		bf_pool->head = rx_buffer->next;
442 
443 		dma_handle = rx_buffer->dma_handle;
444 		dma_acch = rx_buffer->dma_acch;
445 
446 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
447 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
448 			bf_pool->head = rx_buffer;
449 			return (B_FALSE);
450 		}
451 		ddi_dma_mem_free(&dma_acch);
452 		ddi_dma_free_handle(&dma_handle);
453 
454 		bf_pool->total--;
455 		bf_pool->free--;
456 	}
457 
458 	xge_assert(!mutex_owned(&bf_pool->pool_lock));
459 
460 	mutex_destroy(&bf_pool->recycle_lock);
461 	mutex_destroy(&bf_pool->pool_lock);
462 	bf_pool->live = B_FALSE;
463 
464 	return (B_TRUE);
465 }
466 
467 /*
468  * xgell_rx_create_buffer_pool
469  *
470  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
471  */
472 static boolean_t
473 xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
474 {
475 	xgelldev_t *lldev = ring->lldev;
476 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
477 	xge_hal_device_t *hldev;
478 	xgell_rx_buffer_t *rx_buffer;
479 	int i;
480 
481 	if (bf_pool->live)
482 		return (B_TRUE);
483 
484 	hldev = (xge_hal_device_t *)lldev->devh;
485 
486 	bf_pool->total = 0;
487 	bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
488 	bf_pool->head = NULL;
489 	bf_pool->free = 0;
490 	bf_pool->post = 0;
491 	bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
492 	bf_pool->recycle = 0;
493 	bf_pool->recycle_head = NULL;
494 	bf_pool->recycle_tail = NULL;
495 	bf_pool->live = B_TRUE;
496 
497 	mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
498 	    DDI_INTR_PRI(hldev->irqh));
499 	mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
500 	    DDI_INTR_PRI(hldev->irqh));
501 
502 	/*
503 	 * Allocate buffers one by one. If failed, destroy whole pool by
504 	 * call to xgell_rx_destroy_buffer_pool().
505 	 */
506 
507 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
508 		if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
509 			(void) xgell_rx_destroy_buffer_pool(ring);
510 			return (B_FALSE);
511 		}
512 
513 		rx_buffer->next = bf_pool->head;
514 		bf_pool->head = rx_buffer;
515 
516 		bf_pool->total++;
517 		bf_pool->free++;
518 	}
519 
520 	return (B_TRUE);
521 }
522 
523 /*
524  * xgell_rx_dtr_replenish
525  *
526  * Replenish descriptor with rx_buffer in RX buffer pool.
527  * The dtr should be post right away.
528  */
529 xge_hal_status_e
530 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
531     void *userdata, xge_hal_channel_reopen_e reopen)
532 {
533 	xgell_rx_ring_t *ring = userdata;
534 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
535 	xgell_rx_buffer_t *rx_buffer;
536 	xgell_rxd_priv_t *rxd_priv;
537 
538 	mutex_enter(&bf_pool->pool_lock);
539 	if (bf_pool->head == NULL) {
540 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
541 		return (XGE_HAL_FAIL);
542 	}
543 	rx_buffer = bf_pool->head;
544 	xge_assert(rx_buffer);
545 	xge_assert(rx_buffer->dma_addr);
546 
547 	bf_pool->head = rx_buffer->next;
548 	bf_pool->free--;
549 	mutex_exit(&bf_pool->pool_lock);
550 
551 	rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
552 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
553 
554 	rxd_priv->rx_buffer = rx_buffer;
555 
556 	return (XGE_HAL_OK);
557 }
558 
559 /*
560  * xgell_get_ip_offset
561  *
562  * Calculate the offset to IP header.
563  */
564 static inline int
565 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
566 {
567 	int ip_off;
568 
569 	/* get IP-header offset */
570 	switch (ext_info->frame) {
571 	case XGE_HAL_FRAME_TYPE_DIX:
572 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
573 		break;
574 	case XGE_HAL_FRAME_TYPE_IPX:
575 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
576 		    XGE_HAL_HEADER_802_2_SIZE +
577 		    XGE_HAL_HEADER_SNAP_SIZE);
578 		break;
579 	case XGE_HAL_FRAME_TYPE_LLC:
580 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
581 		    XGE_HAL_HEADER_802_2_SIZE);
582 		break;
583 	case XGE_HAL_FRAME_TYPE_SNAP:
584 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
585 		    XGE_HAL_HEADER_SNAP_SIZE);
586 		break;
587 	default:
588 		ip_off = 0;
589 		break;
590 	}
591 
592 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
593 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
594 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
595 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
596 	}
597 
598 	return (ip_off);
599 }
600 
601 /*
602  * xgell_rx_hcksum_assoc
603  *
604  * Judge the packet type and then call to hcksum_assoc() to associate
605  * h/w checksum information.
606  */
607 static inline void
608 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
609     xge_hal_dtr_info_t *ext_info)
610 {
611 	int cksum_flags = 0;
612 
613 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
614 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
615 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
616 				cksum_flags |= HCK_IPV4_HDRCKSUM;
617 			}
618 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
619 				cksum_flags |= HCK_FULLCKSUM_OK;
620 			}
621 			if (cksum_flags) {
622 				cksum_flags |= HCK_FULLCKSUM;
623 				(void) hcksum_assoc(mp, NULL, NULL, 0,
624 				    0, 0, 0, cksum_flags, 0);
625 			}
626 		}
627 	} else if (ext_info->proto &
628 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
629 		/*
630 		 * Just pass the partial cksum up to IP.
631 		 */
632 		int ip_off = xgell_get_ip_offset(ext_info);
633 		int start, end = pkt_length - ip_off;
634 
635 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
636 			struct ip *ip =
637 			    (struct ip *)(vaddr + ip_off);
638 			start = ip->ip_hl * 4 + ip_off;
639 		} else {
640 			start = ip_off + 40;
641 		}
642 		cksum_flags |= HCK_PARTIALCKSUM;
643 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
644 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
645 		    0);
646 	}
647 }
648 
649 /*
650  * xgell_rx_1b_msg_alloc
651  *
652  * Allocate message header for data buffer, and decide if copy the packet to
653  * new data buffer to release big rx_buffer to save memory.
654  *
655  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
656  * new message and copy the payload in.
657  */
658 static mblk_t *
659 xgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
660     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
661 {
662 	xgelldev_t *lldev = ring->lldev;
663 	mblk_t *mp;
664 	char *vaddr;
665 
666 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
667 	/*
668 	 * Copy packet into new allocated message buffer, if pkt_length
669 	 * is less than XGELL_RX_DMA_LOWAT
670 	 */
671 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
672 		if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
673 			return (NULL);
674 		}
675 		mp->b_rptr += HEADROOM;
676 		bcopy(vaddr, mp->b_rptr, pkt_length);
677 		mp->b_wptr = mp->b_rptr + pkt_length;
678 		*copyit = B_TRUE;
679 		return (mp);
680 	}
681 
682 	/*
683 	 * Just allocate mblk for current data buffer
684 	 */
685 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
686 	    &rx_buffer->frtn)) == NULL) {
687 		/* Drop it */
688 		return (NULL);
689 	}
690 	/*
691 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
692 	 */
693 	mp->b_wptr += pkt_length;
694 
695 	return (mp);
696 }
697 
698 /*
699  * xgell_rx_1b_callback
700  *
701  * If the interrupt is because of a received frame or if the receive ring
702  * contains fresh as yet un-processed frames, this function is called.
703  */
704 static xge_hal_status_e
705 xgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
706     void *userdata)
707 {
708 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
709 	xgelldev_t *lldev = ring->lldev;
710 	xgell_rx_buffer_t *rx_buffer;
711 	mblk_t *mp_head = NULL;
712 	mblk_t *mp_end  = NULL;
713 	int pkt_burst = 0;
714 
715 	xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
716 
717 	mutex_enter(&ring->bf_pool.pool_lock);
718 	do {
719 		int pkt_length;
720 		dma_addr_t dma_data;
721 		mblk_t *mp;
722 		boolean_t copyit = B_FALSE;
723 
724 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
725 		    xge_hal_ring_dtr_private(channelh, dtr));
726 		xge_hal_dtr_info_t ext_info;
727 
728 		rx_buffer = rxd_priv->rx_buffer;
729 
730 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
731 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
732 
733 		xge_assert(dma_data == rx_buffer->dma_addr);
734 
735 		if (t_code != 0) {
736 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
737 			    " completed due to error t_code %01x", XGELL_IFNAME,
738 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
739 
740 			(void) xge_hal_device_handle_tcode(channelh, dtr,
741 			    t_code);
742 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
743 			xgell_rx_buffer_release(rx_buffer);
744 			continue;
745 		}
746 
747 		/*
748 		 * Sync the DMA memory
749 		 */
750 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
751 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
752 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
753 			    XGELL_IFNAME, lldev->instance);
754 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
755 			xgell_rx_buffer_release(rx_buffer);
756 			continue;
757 		}
758 
759 		/*
760 		 * Allocate message for the packet.
761 		 */
762 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
763 			copyit = B_TRUE;
764 		} else {
765 			copyit = B_FALSE;
766 		}
767 
768 		mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
769 		    &ext_info, &copyit);
770 
771 		xge_hal_ring_dtr_free(channelh, dtr);
772 
773 		/*
774 		 * Release the buffer and recycle it later
775 		 */
776 		if ((mp == NULL) || copyit) {
777 			xgell_rx_buffer_release(rx_buffer);
778 		} else {
779 			/*
780 			 * Count it since the buffer should be loaned up.
781 			 */
782 			ring->bf_pool.post++;
783 		}
784 		if (mp == NULL) {
785 			xge_debug_ll(XGE_ERR,
786 			    "%s%d: rx: can not allocate mp mblk",
787 			    XGELL_IFNAME, lldev->instance);
788 			continue;
789 		}
790 
791 		/*
792 		 * Associate cksum_flags per packet type and h/w
793 		 * cksum flags.
794 		 */
795 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
796 		    pkt_length, &ext_info);
797 
798 		ring->received_bytes += pkt_length;
799 
800 		if (mp_head == NULL) {
801 			mp_head = mp;
802 			mp_end = mp;
803 		} else {
804 			mp_end->b_next = mp;
805 			mp_end = mp;
806 		}
807 
808 		/*
809 		 * Inlined implemented polling function.
810 		 */
811 		if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
812 			ring->poll_mp = mp_head;
813 		}
814 		if (ring->poll_mp != NULL) {
815 			if ((ring->poll_bytes -= pkt_length) <= 0) {
816 				/* have polled enough packets. */
817 				break;
818 			} else {
819 				/* continue polling packets. */
820 				continue;
821 			}
822 		}
823 
824 		/*
825 		 * We're not in polling mode, so try to chain more messages
826 		 * or send the chain up according to pkt_burst.
827 		 */
828 		if (++pkt_burst < lldev->config.rx_pkt_burst)
829 			continue;
830 
831 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
832 			/* Replenish rx buffers */
833 			xgell_rx_buffer_replenish_all(ring);
834 		}
835 		mutex_exit(&ring->bf_pool.pool_lock);
836 		if (mp_head != NULL) {
837 			mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
838 			    ring->ring_gen_num);
839 		}
840 		mp_head = mp_end  = NULL;
841 		pkt_burst = 0;
842 		mutex_enter(&ring->bf_pool.pool_lock);
843 
844 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
845 	    XGE_HAL_OK);
846 
847 	/*
848 	 * Always call replenish_all to recycle rx_buffers.
849 	 */
850 	xgell_rx_buffer_replenish_all(ring);
851 	mutex_exit(&ring->bf_pool.pool_lock);
852 
853 	/*
854 	 * If we're not in polling cycle, call mac_rx(), otherwise
855 	 * just return while leaving packets chained to ring->poll_mp.
856 	 */
857 	if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
858 		mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
859 		    ring->ring_gen_num);
860 	}
861 
862 	return (XGE_HAL_OK);
863 }
864 
865 mblk_t *
866 xgell_rx_poll(void *arg, int bytes_to_pickup)
867 {
868 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)arg;
869 	int got_rx = 0;
870 	mblk_t *mp;
871 
872 	xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
873 
874 	ring->poll_mp = NULL;
875 	ring->poll_bytes = bytes_to_pickup;
876 	(void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
877 
878 	mp = ring->poll_mp;
879 	ring->poll_bytes = -1;
880 	ring->polled_bytes += got_rx;
881 	ring->poll_mp = NULL;
882 
883 	return (mp);
884 }
885 
886 /*
887  * xgell_xmit_compl
888  *
889  * If an interrupt was raised to indicate DMA complete of the Tx packet,
890  * this function is called. It identifies the last TxD whose buffer was
891  * freed and frees all skbs whose data have already DMA'ed into the NICs
892  * internal memory.
893  */
894 static xge_hal_status_e
895 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
896     void *userdata)
897 {
898 	xgell_tx_ring_t *ring = userdata;
899 	xgelldev_t *lldev = ring->lldev;
900 
901 	do {
902 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
903 		    xge_hal_fifo_dtr_private(dtr));
904 		int i;
905 
906 		if (t_code) {
907 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
908 			    " completed due to error t_code %01x", XGELL_IFNAME,
909 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
910 
911 			(void) xge_hal_device_handle_tcode(channelh, dtr,
912 			    t_code);
913 		}
914 
915 		for (i = 0; i < txd_priv->handle_cnt; i++) {
916 			if (txd_priv->dma_handles[i] != NULL) {
917 				xge_assert(txd_priv->dma_handles[i]);
918 				(void) ddi_dma_unbind_handle(
919 				    txd_priv->dma_handles[i]);
920 				ddi_dma_free_handle(&txd_priv->dma_handles[i]);
921 				txd_priv->dma_handles[i] = 0;
922 			}
923 		}
924 		txd_priv->handle_cnt = 0;
925 
926 		xge_hal_fifo_dtr_free(channelh, dtr);
927 
928 		if (txd_priv->mblk != NULL) {
929 			freemsg(txd_priv->mblk);
930 			txd_priv->mblk = NULL;
931 		}
932 
933 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
934 	    XGE_HAL_OK);
935 
936 	if (ring->need_resched)
937 		mac_tx_ring_update(lldev->mh, ring->ring_handle);
938 
939 	return (XGE_HAL_OK);
940 }
941 
942 mblk_t *
943 xgell_ring_tx(void *arg, mblk_t *mp)
944 {
945 	xgell_tx_ring_t *ring = (xgell_tx_ring_t *)arg;
946 	mblk_t *bp;
947 	xgelldev_t *lldev = ring->lldev;
948 	xge_hal_device_t *hldev = lldev->devh;
949 	xge_hal_status_e status;
950 	xge_hal_dtr_h dtr;
951 	xgell_txd_priv_t *txd_priv;
952 	uint32_t hckflags;
953 	uint32_t lsoflags;
954 	uint32_t mss;
955 	int handle_cnt, frag_cnt, ret, i, copied;
956 	boolean_t used_copy;
957 
958 _begin:
959 	handle_cnt = frag_cnt = 0;
960 
961 	if (!lldev->is_initialized || lldev->in_reset)
962 		return (mp);
963 
964 	/*
965 	 * If the free Tx dtrs count reaches the lower threshold,
966 	 * inform the gld to stop sending more packets till the free
967 	 * dtrs count exceeds higher threshold. Driver informs the
968 	 * gld through gld_sched call, when the free dtrs count exceeds
969 	 * the higher threshold.
970 	 */
971 	if (xge_hal_channel_dtr_count(ring->channelh)
972 	    <= XGELL_TX_LEVEL_LOW) {
973 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
974 		    "free descriptors count at low threshold %d",
975 		    XGELL_IFNAME, lldev->instance,
976 		    ((xge_hal_channel_t *)ring->channelh)->post_qid,
977 		    XGELL_TX_LEVEL_LOW);
978 		goto _exit;
979 	}
980 
981 	status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
982 	if (status != XGE_HAL_OK) {
983 		switch (status) {
984 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
985 			xge_debug_ll(XGE_ERR,
986 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
987 			    lldev->instance,
988 			    ((xge_hal_channel_t *)
989 			    ring->channelh)->post_qid);
990 			goto _exit;
991 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
992 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
993 			    " out of descriptors.", XGELL_IFNAME,
994 			    lldev->instance,
995 			    ((xge_hal_channel_t *)
996 			    ring->channelh)->post_qid);
997 			goto _exit;
998 		default:
999 			return (mp);
1000 		}
1001 	}
1002 
1003 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1004 	txd_priv->mblk = mp;
1005 
1006 	/*
1007 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1008 	 * do insertion.
1009 	 *
1010 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1011 	 * is the other implementation for xge. The driver can simple bcopy()
1012 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1013 	 * automatically, since it's impossible that GLD sends down mp(s) with
1014 	 * splited ether_vlan_header.
1015 	 *
1016 	 * struct ether_vlan_header *evhp;
1017 	 * uint16_t tci;
1018 	 *
1019 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1020 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1021 	 *	tci = ntohs(evhp->ether_tci);
1022 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1023 	 *	    2 * ETHERADDRL);
1024 	 *	mp->b_rptr += VLAN_TAGSZ;
1025 	 *
1026 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1027 	 * }
1028 	 */
1029 
1030 	copied = 0;
1031 	used_copy = B_FALSE;
1032 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1033 		int mblen;
1034 		uint_t ncookies;
1035 		ddi_dma_cookie_t dma_cookie;
1036 		ddi_dma_handle_t dma_handle;
1037 
1038 		/* skip zero-length message blocks */
1039 		mblen = MBLKL(bp);
1040 		if (mblen == 0) {
1041 			continue;
1042 		}
1043 
1044 		ring->sent_bytes += mblen;
1045 
1046 		/*
1047 		 * Check the message length to decide to DMA or bcopy() data
1048 		 * to tx descriptor(s).
1049 		 */
1050 		if (mblen < lldev->config.tx_dma_lowat &&
1051 		    (copied + mblen) < lldev->tx_copied_max) {
1052 			xge_hal_status_e rc;
1053 			rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
1054 			    dtr, bp->b_rptr, mblen);
1055 			if (rc == XGE_HAL_OK) {
1056 				used_copy = B_TRUE;
1057 				copied += mblen;
1058 				continue;
1059 			} else if (used_copy) {
1060 				xge_hal_fifo_dtr_buffer_finalize(
1061 				    ring->channelh, dtr, frag_cnt++);
1062 				used_copy = B_FALSE;
1063 			}
1064 		} else if (used_copy) {
1065 			xge_hal_fifo_dtr_buffer_finalize(ring->channelh,
1066 			    dtr, frag_cnt++);
1067 			used_copy = B_FALSE;
1068 		}
1069 
1070 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1071 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1072 		if (ret != DDI_SUCCESS) {
1073 			xge_debug_ll(XGE_ERR,
1074 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1075 			    lldev->instance);
1076 			goto _exit_cleanup;
1077 		}
1078 
1079 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1080 		    (caddr_t)bp->b_rptr, mblen,
1081 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1082 		    &dma_cookie, &ncookies);
1083 
1084 		switch (ret) {
1085 		case DDI_DMA_MAPPED:
1086 			/* everything's fine */
1087 			break;
1088 
1089 		case DDI_DMA_NORESOURCES:
1090 			xge_debug_ll(XGE_ERR,
1091 			    "%s%d: can not bind dma address",
1092 			    XGELL_IFNAME, lldev->instance);
1093 			ddi_dma_free_handle(&dma_handle);
1094 			goto _exit_cleanup;
1095 
1096 		case DDI_DMA_NOMAPPING:
1097 		case DDI_DMA_INUSE:
1098 		case DDI_DMA_TOOBIG:
1099 		default:
1100 			/* drop packet, don't retry */
1101 			xge_debug_ll(XGE_ERR,
1102 			    "%s%d: can not map message buffer",
1103 			    XGELL_IFNAME, lldev->instance);
1104 			ddi_dma_free_handle(&dma_handle);
1105 			goto _exit_cleanup;
1106 		}
1107 
1108 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1109 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1110 			    "requested c:%d+f:%d", XGELL_IFNAME,
1111 			    lldev->instance, ncookies, frag_cnt);
1112 			(void) ddi_dma_unbind_handle(dma_handle);
1113 			ddi_dma_free_handle(&dma_handle);
1114 			goto _exit_cleanup;
1115 		}
1116 
1117 		/* setup the descriptors for this data buffer */
1118 		while (ncookies) {
1119 			xge_hal_fifo_dtr_buffer_set(ring->channelh, dtr,
1120 			    frag_cnt++, dma_cookie.dmac_laddress,
1121 			    dma_cookie.dmac_size);
1122 			if (--ncookies) {
1123 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1124 			}
1125 
1126 		}
1127 
1128 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1129 
1130 		if (bp->b_cont &&
1131 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1132 		    hldev->config.fifo.max_frags)) {
1133 			mblk_t *nmp;
1134 
1135 			xge_debug_ll(XGE_TRACE,
1136 			    "too many FRAGs [%d], pull up them", frag_cnt);
1137 
1138 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1139 				/* Drop packet, don't retry */
1140 				xge_debug_ll(XGE_ERR,
1141 				    "%s%d: can not pullup message buffer",
1142 				    XGELL_IFNAME, lldev->instance);
1143 				goto _exit_cleanup;
1144 			}
1145 			freemsg(bp->b_cont);
1146 			bp->b_cont = nmp;
1147 		}
1148 	}
1149 
1150 	/* finalize unfinished copies */
1151 	if (used_copy) {
1152 		xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
1153 		    frag_cnt++);
1154 	}
1155 
1156 	txd_priv->handle_cnt = handle_cnt;
1157 
1158 	/*
1159 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1160 	 * do all necessary work.
1161 	 */
1162 	lso_info_get(mp, &mss, &lsoflags);
1163 
1164 	if (lsoflags & HW_LSO) {
1165 		xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
1166 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1167 	}
1168 
1169 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &hckflags);
1170 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1171 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1172 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1173 	}
1174 	if (hckflags & HCK_FULLCKSUM) {
1175 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1176 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1177 	}
1178 
1179 	xge_hal_fifo_dtr_post(ring->channelh, dtr);
1180 
1181 	return (NULL);
1182 
1183 _exit_cleanup:
1184 	/*
1185 	 * Could not successfully transmit but have changed the message,
1186 	 * so just free it and return NULL
1187 	 */
1188 	for (i = 0; i < handle_cnt; i++) {
1189 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1190 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1191 		txd_priv->dma_handles[i] = 0;
1192 	}
1193 
1194 	xge_hal_fifo_dtr_free(ring->channelh, dtr);
1195 
1196 	freemsg(mp);
1197 	return (NULL);
1198 
1199 _exit:
1200 	ring->need_resched = B_TRUE;
1201 	return (mp);
1202 }
1203 
1204 /*
1205  * xgell_ring_macaddr_init
1206  */
1207 static void
1208 xgell_rx_ring_maddr_init(xgell_rx_ring_t *ring)
1209 {
1210 	int i;
1211 	xgelldev_t *lldev = ring->lldev;
1212 	xge_hal_device_t *hldev = lldev->devh;
1213 	int slot_start;
1214 
1215 	xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
1216 
1217 	ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
1218 	ring->mmac.naddrfree = ring->mmac.naddr;
1219 
1220 	/*
1221 	 * For the default rx ring, the first MAC address is the factory one.
1222 	 * This will be set by the framework, so need to clear it for now.
1223 	 */
1224 	(void) xge_hal_device_macaddr_clear(hldev, 0);
1225 
1226 	/*
1227 	 * Read the MAC address Configuration Memory from HAL.
1228 	 * The first slot will hold a factory MAC address, contents in other
1229 	 * slots will be FF:FF:FF:FF:FF:FF.
1230 	 */
1231 	slot_start = ring->index * 32;
1232 	for (i = 0; i < ring->mmac.naddr; i++) {
1233 		(void) xge_hal_device_macaddr_get(hldev, slot_start + i,
1234 		    ring->mmac.mac_addr + i);
1235 		ring->mmac.mac_addr_set[i] = B_FALSE;
1236 	}
1237 }
1238 
1239 static int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
1240 
1241 static int
1242 xgell_addmac(void *arg, const uint8_t *mac_addr)
1243 {
1244 	xgell_rx_ring_t *ring = arg;
1245 	xgelldev_t *lldev = ring->lldev;
1246 	xge_hal_device_t *hldev = lldev->devh;
1247 	int slot;
1248 	int slot_start;
1249 
1250 	xge_debug_ll(XGE_TRACE, "%s", "xgell_addmac");
1251 
1252 	mutex_enter(&lldev->genlock);
1253 
1254 	if (ring->mmac.naddrfree == 0) {
1255 		mutex_exit(&lldev->genlock);
1256 		return (ENOSPC);
1257 	}
1258 
1259 	/* First slot is for factory MAC address */
1260 	for (slot = 0; slot < ring->mmac.naddr; slot++) {
1261 		if (ring->mmac.mac_addr_set[slot] == B_FALSE) {
1262 			break;
1263 		}
1264 	}
1265 
1266 	ASSERT(slot < ring->mmac.naddr);
1267 
1268 	slot_start = ring->index * 32;
1269 
1270 	if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
1271 	    0) {
1272 		mutex_exit(&lldev->genlock);
1273 		return (EIO);
1274 	}
1275 
1276 	/* Simply enable RTS for the whole section. */
1277 	(void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
1278 
1279 	/*
1280 	 * Read back the MAC address from HAL to keep the array up to date.
1281 	 */
1282 	if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
1283 	    ring->mmac.mac_addr + slot) != XGE_HAL_OK) {
1284 		(void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1285 		return (EIO);
1286 	}
1287 
1288 	ring->mmac.mac_addr_set[slot] = B_TRUE;
1289 	ring->mmac.naddrfree--;
1290 
1291 	mutex_exit(&lldev->genlock);
1292 
1293 	return (0);
1294 }
1295 
1296 static int
1297 xgell_remmac(void *arg, const uint8_t *mac_addr)
1298 {
1299 	xgell_rx_ring_t *ring = arg;
1300 	xgelldev_t *lldev = ring->lldev;
1301 	xge_hal_device_t *hldev = lldev->devh;
1302 	xge_hal_status_e status;
1303 	int slot;
1304 	int slot_start;
1305 
1306 	xge_debug_ll(XGE_TRACE, "%s", "xgell_remmac");
1307 
1308 	slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
1309 	if (slot == -1)
1310 		return (EINVAL);
1311 
1312 	slot_start = ring->index * 32;
1313 
1314 	/*
1315 	 * Adjust slot to the offset in the MAC array of this ring (group).
1316 	 */
1317 	slot -= slot_start;
1318 
1319 	/*
1320 	 * Only can remove a pre-set MAC address for this ring (group).
1321 	 */
1322 	if (slot < 0 || slot >= ring->mmac.naddr)
1323 		return (EINVAL);
1324 
1325 
1326 	xge_assert(ring->mmac.mac_addr_set[slot]);
1327 
1328 	mutex_enter(&lldev->genlock);
1329 	if (!ring->mmac.mac_addr_set[slot]) {
1330 		mutex_exit(&lldev->genlock);
1331 		/*
1332 		 * The result will be unexpected when reach here. WARNING!
1333 		 */
1334 		xge_debug_ll(XGE_ERR,
1335 		    "%s%d: caller is trying to remove an unset MAC address",
1336 		    XGELL_IFNAME, lldev->instance);
1337 		return (ENXIO);
1338 	}
1339 
1340 	status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1341 	if (status != XGE_HAL_OK) {
1342 		mutex_exit(&lldev->genlock);
1343 		return (EIO);
1344 	}
1345 
1346 	ring->mmac.mac_addr_set[slot] = B_FALSE;
1347 	ring->mmac.naddrfree++;
1348 
1349 	/*
1350 	 * TODO: Disable MAC RTS if all addresses have been cleared.
1351 	 */
1352 
1353 	/*
1354 	 * Read back the MAC address from HAL to keep the array up to date.
1355 	 */
1356 	(void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
1357 	    ring->mmac.mac_addr + slot);
1358 	mutex_exit(&lldev->genlock);
1359 
1360 	return (0);
1361 }
1362 
1363 /*
1364  * Temporarily calling hal function.
1365  *
1366  * With MSI-X implementation, no lock is needed, so that the interrupt
1367  * handling could be faster.
1368  */
1369 int
1370 xgell_rx_ring_intr_enable(mac_intr_handle_t ih)
1371 {
1372 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1373 
1374 	mutex_enter(&ring->ring_lock);
1375 	xge_hal_device_rx_channel_disable_polling(ring->channelh);
1376 	mutex_exit(&ring->ring_lock);
1377 
1378 	return (0);
1379 }
1380 
1381 int
1382 xgell_rx_ring_intr_disable(mac_intr_handle_t ih)
1383 {
1384 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1385 
1386 	mutex_enter(&ring->ring_lock);
1387 	xge_hal_device_rx_channel_enable_polling(ring->channelh);
1388 	mutex_exit(&ring->ring_lock);
1389 
1390 	return (0);
1391 }
1392 
1393 static int
1394 xgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1395 {
1396 	xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
1397 
1398 	rx_ring->ring_gen_num = mr_gen_num;
1399 
1400 	return (0);
1401 }
1402 
1403 /*ARGSUSED*/
1404 static void
1405 xgell_rx_ring_stop(mac_ring_driver_t rh)
1406 {
1407 }
1408 
1409 /*ARGSUSED*/
1410 static int
1411 xgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
1412 {
1413 	return (0);
1414 }
1415 
1416 /*ARGSUSED*/
1417 static void
1418 xgell_tx_ring_stop(mac_ring_driver_t rh)
1419 {
1420 }
1421 
1422 /*
1423  * Callback funtion for MAC layer to register all rings.
1424  *
1425  * Xframe hardware doesn't support grouping explicitly, so the driver needs
1426  * to pretend having resource groups. We may also optionally group all 8 rx
1427  * rings into a single group for increased scalability on CMT architectures,
1428  * or group one rx ring per group for maximum virtualization.
1429  *
1430  * TX grouping is actually done by framework, so, just register all TX
1431  * resources without grouping them.
1432  */
1433 void
1434 xgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1435     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1436 {
1437 	xgelldev_t *lldev = (xgelldev_t *)arg;
1438 	mac_intr_t *mintr;
1439 
1440 	switch (rtype) {
1441 	case MAC_RING_TYPE_RX: {
1442 		xgell_rx_ring_t *rx_ring;
1443 
1444 		xge_assert(index < lldev->init_rx_rings);
1445 		xge_assert(rg_index < lldev->init_rx_groups);
1446 
1447 		/*
1448 		 * Performance vs. Virtualization
1449 		 */
1450 		if (lldev->init_rx_rings == lldev->init_rx_groups)
1451 			rx_ring = lldev->rx_ring + rg_index;
1452 		else
1453 			rx_ring = lldev->rx_ring + index;
1454 
1455 		rx_ring->ring_handle = rh;
1456 
1457 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
1458 		infop->mri_start = xgell_rx_ring_start;
1459 		infop->mri_stop = xgell_rx_ring_stop;
1460 		infop->mri_poll = xgell_rx_poll;
1461 
1462 		mintr = &infop->mri_intr;
1463 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1464 		mintr->mi_enable = xgell_rx_ring_intr_enable;
1465 		mintr->mi_disable = xgell_rx_ring_intr_disable;
1466 
1467 		break;
1468 	}
1469 	case MAC_RING_TYPE_TX: {
1470 		xgell_tx_ring_t *tx_ring;
1471 
1472 		xge_assert(rg_index == -1);
1473 
1474 		xge_assert((index >= 0) && (index < lldev->init_tx_rings));
1475 
1476 		tx_ring = lldev->tx_ring + index;
1477 		tx_ring->ring_handle = rh;
1478 
1479 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
1480 		infop->mri_start = xgell_tx_ring_start;
1481 		infop->mri_stop = xgell_tx_ring_stop;
1482 		infop->mri_tx = xgell_ring_tx;
1483 
1484 		break;
1485 	}
1486 	default:
1487 		break;
1488 	}
1489 }
1490 
1491 void
1492 xgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
1493     mac_group_info_t *infop, mac_group_handle_t gh)
1494 {
1495 	xgelldev_t *lldev = (xgelldev_t *)arg;
1496 
1497 	switch (rtype) {
1498 	case MAC_RING_TYPE_RX: {
1499 		xgell_rx_ring_t *rx_ring;
1500 
1501 		xge_assert(index < lldev->init_rx_groups);
1502 
1503 		rx_ring = lldev->rx_ring + index;
1504 
1505 		rx_ring->group_handle = gh;
1506 
1507 		infop->mgi_driver = (mac_group_driver_t)rx_ring;
1508 		infop->mgi_start = NULL;
1509 		infop->mgi_stop = NULL;
1510 		infop->mgi_addmac = xgell_addmac;
1511 		infop->mgi_remmac = xgell_remmac;
1512 		infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
1513 
1514 		break;
1515 	}
1516 	case MAC_RING_TYPE_TX:
1517 		xge_assert(0);
1518 		break;
1519 	default:
1520 		break;
1521 	}
1522 }
1523 
1524 /*
1525  * xgell_macaddr_set
1526  */
1527 static int
1528 xgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
1529 {
1530 	xge_hal_device_t *hldev = lldev->devh;
1531 	xge_hal_status_e status;
1532 
1533 	xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
1534 
1535 	xge_debug_ll(XGE_TRACE,
1536 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1537 	    macaddr[0], macaddr[1], macaddr[2],
1538 	    macaddr[3], macaddr[4], macaddr[5]);
1539 
1540 	status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
1541 
1542 	if (status != XGE_HAL_OK) {
1543 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1544 		    XGELL_IFNAME, lldev->instance);
1545 		return (EIO);
1546 	}
1547 
1548 	return (0);
1549 }
1550 
1551 /*
1552  * xgell_rx_dtr_term
1553  *
1554  * Function will be called by HAL to terminate all DTRs for
1555  * Ring(s) type of channels.
1556  */
1557 static void
1558 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1559     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1560 {
1561 	xgell_rxd_priv_t *rxd_priv =
1562 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1563 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1564 
1565 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1566 		xgell_rx_ring_t *ring = rx_buffer->ring;
1567 
1568 		mutex_enter(&ring->bf_pool.pool_lock);
1569 		xge_hal_ring_dtr_free(channelh, dtrh);
1570 		xgell_rx_buffer_release(rx_buffer);
1571 		mutex_exit(&ring->bf_pool.pool_lock);
1572 	}
1573 }
1574 
1575 /*
1576  * To open a rx ring.
1577  */
1578 static boolean_t
1579 xgell_rx_ring_open(xgell_rx_ring_t *rx_ring)
1580 {
1581 	xge_hal_status_e status;
1582 	xge_hal_channel_attr_t attr;
1583 	xgelldev_t *lldev = rx_ring->lldev;
1584 	xge_hal_device_t *hldev = lldev->devh;
1585 
1586 	if (rx_ring->live)
1587 		return (B_TRUE);
1588 
1589 	/* Create the buffer pool first */
1590 	if (!xgell_rx_create_buffer_pool(rx_ring)) {
1591 		xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
1592 		    rx_ring->index);
1593 		return (B_FALSE);
1594 	}
1595 
1596 	/* Default ring initialization */
1597 	attr.post_qid		= rx_ring->index;
1598 	attr.compl_qid		= 0;
1599 	attr.callback		= xgell_rx_1b_callback;
1600 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1601 	attr.flags		= 0;
1602 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1603 	attr.dtr_init		= xgell_rx_dtr_replenish;
1604 	attr.dtr_term		= xgell_rx_dtr_term;
1605 	attr.userdata		= rx_ring;
1606 
1607 	status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
1608 	    XGE_HAL_CHANNEL_OC_NORMAL);
1609 	if (status != XGE_HAL_OK) {
1610 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1611 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1612 		(void) xgell_rx_destroy_buffer_pool(rx_ring);
1613 		return (B_FALSE);
1614 	}
1615 
1616 	xgell_rx_ring_maddr_init(rx_ring);
1617 
1618 	mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
1619 	    DDI_INTR_PRI(hldev->irqh));
1620 
1621 	rx_ring->received_bytes = 0;
1622 	rx_ring->poll_bytes = -1;
1623 	rx_ring->polled_bytes = 0;
1624 	rx_ring->poll_mp = NULL;
1625 	rx_ring->live = B_TRUE;
1626 
1627 	xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
1628 	    rx_ring->index);
1629 
1630 	return (B_TRUE);
1631 }
1632 
1633 static void
1634 xgell_rx_ring_close(xgell_rx_ring_t *rx_ring)
1635 {
1636 	if (!rx_ring->live)
1637 		return;
1638 	xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1639 	rx_ring->channelh = NULL;
1640 	/* This may not clean up all used buffers, driver will handle it */
1641 	if (xgell_rx_destroy_buffer_pool(rx_ring))
1642 		rx_ring->live = B_FALSE;
1643 
1644 	mutex_destroy(&rx_ring->ring_lock);
1645 }
1646 
1647 /*
1648  * xgell_rx_open
1649  * @lldev: the link layer object
1650  *
1651  * Initialize and open all RX channels.
1652  */
1653 static boolean_t
1654 xgell_rx_open(xgelldev_t *lldev)
1655 {
1656 	xgell_rx_ring_t *rx_ring;
1657 	int i;
1658 
1659 	if (lldev->live_rx_rings != 0)
1660 		return (B_TRUE);
1661 
1662 	lldev->live_rx_rings = 0;
1663 
1664 	/*
1665 	 * Initialize all rings
1666 	 */
1667 	for (i = 0; i < lldev->init_rx_rings; i++) {
1668 		rx_ring = &lldev->rx_ring[i];
1669 		rx_ring->index = i;
1670 		rx_ring->lldev = lldev;
1671 		rx_ring->live = B_FALSE;
1672 
1673 		if (!xgell_rx_ring_open(rx_ring))
1674 			return (B_FALSE);
1675 
1676 		lldev->live_rx_rings++;
1677 	}
1678 
1679 	return (B_TRUE);
1680 }
1681 
1682 static void
1683 xgell_rx_close(xgelldev_t *lldev)
1684 {
1685 	xgell_rx_ring_t *rx_ring;
1686 	int i;
1687 
1688 	if (lldev->live_rx_rings == 0)
1689 		return;
1690 
1691 	/*
1692 	 * Close all rx rings
1693 	 */
1694 	for (i = 0; i < lldev->init_rx_rings; i++) {
1695 		rx_ring = &lldev->rx_ring[i];
1696 
1697 		if (rx_ring->live) {
1698 			xgell_rx_ring_close(rx_ring);
1699 			lldev->live_rx_rings--;
1700 		}
1701 	}
1702 
1703 	xge_assert(lldev->live_rx_rings == 0);
1704 }
1705 
1706 /*
1707  * xgell_tx_term
1708  *
1709  * Function will be called by HAL to terminate all DTRs for
1710  * Fifo(s) type of channels.
1711  */
1712 static void
1713 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1714     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1715 {
1716 	xgell_txd_priv_t *txd_priv =
1717 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1718 	mblk_t *mp = txd_priv->mblk;
1719 	int i;
1720 
1721 	/*
1722 	 * for Tx we must clean up the DTR *only* if it has been
1723 	 * posted!
1724 	 */
1725 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1726 		return;
1727 	}
1728 
1729 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1730 		xge_assert(txd_priv->dma_handles[i]);
1731 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1732 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1733 		txd_priv->dma_handles[i] = 0;
1734 	}
1735 
1736 	xge_hal_fifo_dtr_free(channelh, dtrh);
1737 
1738 	if (mp) {
1739 		txd_priv->mblk = NULL;
1740 		freemsg(mp);
1741 	}
1742 }
1743 
1744 static boolean_t
1745 xgell_tx_ring_open(xgell_tx_ring_t *tx_ring)
1746 {
1747 	xge_hal_status_e status;
1748 	xge_hal_channel_attr_t attr;
1749 	xgelldev_t *lldev = tx_ring->lldev;
1750 
1751 	if (tx_ring->live)
1752 		return (B_TRUE);
1753 
1754 	attr.post_qid		= tx_ring->index;
1755 	attr.compl_qid		= 0;
1756 	attr.callback		= xgell_xmit_compl;
1757 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1758 	attr.flags		= 0;
1759 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1760 	attr.dtr_init		= NULL;
1761 	attr.dtr_term		= xgell_tx_term;
1762 	attr.userdata		= tx_ring;
1763 
1764 	status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
1765 	    XGE_HAL_CHANNEL_OC_NORMAL);
1766 	if (status != XGE_HAL_OK) {
1767 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
1768 		    "code %d", XGELL_IFNAME, lldev->instance, status);
1769 		return (B_FALSE);
1770 	}
1771 
1772 	tx_ring->sent_bytes = 0;
1773 	tx_ring->live = B_TRUE;
1774 
1775 	return (B_TRUE);
1776 }
1777 
1778 static void
1779 xgell_tx_ring_close(xgell_tx_ring_t *tx_ring)
1780 {
1781 	if (!tx_ring->live)
1782 		return;
1783 	xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1784 	tx_ring->live = B_FALSE;
1785 }
1786 
1787 /*
1788  * xgell_tx_open
1789  * @lldev: the link layer object
1790  *
1791  * Initialize and open all TX channels.
1792  */
1793 static boolean_t
1794 xgell_tx_open(xgelldev_t *lldev)
1795 {
1796 	xgell_tx_ring_t *tx_ring;
1797 	int i;
1798 
1799 	if (lldev->live_tx_rings != 0)
1800 		return (B_TRUE);
1801 
1802 	lldev->live_tx_rings = 0;
1803 
1804 	/*
1805 	 * Enable rings by reserve sequence to match the h/w sequences.
1806 	 */
1807 	for (i = 0; i < lldev->init_tx_rings; i++) {
1808 		tx_ring = &lldev->tx_ring[i];
1809 		tx_ring->index = i;
1810 		tx_ring->lldev = lldev;
1811 		tx_ring->live = B_FALSE;
1812 
1813 		if (!xgell_tx_ring_open(tx_ring))
1814 			return (B_FALSE);
1815 
1816 		lldev->live_tx_rings++;
1817 	}
1818 
1819 	return (B_TRUE);
1820 }
1821 
1822 static void
1823 xgell_tx_close(xgelldev_t *lldev)
1824 {
1825 	xgell_tx_ring_t *tx_ring;
1826 	int i;
1827 
1828 	if (lldev->live_tx_rings == 0)
1829 		return;
1830 
1831 	/*
1832 	 * Enable rings by reserve sequence to match the h/w sequences.
1833 	 */
1834 	for (i = 0; i < lldev->init_tx_rings; i++) {
1835 		tx_ring = &lldev->tx_ring[i];
1836 		if (tx_ring->live) {
1837 			xgell_tx_ring_close(tx_ring);
1838 			lldev->live_tx_rings--;
1839 		}
1840 	}
1841 }
1842 
1843 static int
1844 xgell_initiate_start(xgelldev_t *lldev)
1845 {
1846 	xge_hal_status_e status;
1847 	xge_hal_device_t *hldev = lldev->devh;
1848 	int maxpkt = hldev->config.mtu;
1849 
1850 	/* check initial mtu before enabling the device */
1851 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1852 	if (status != XGE_HAL_OK) {
1853 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1854 		    XGELL_IFNAME, lldev->instance, maxpkt);
1855 		return (EINVAL);
1856 	}
1857 
1858 	/* set initial mtu before enabling the device */
1859 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1860 	if (status != XGE_HAL_OK) {
1861 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1862 		    XGELL_IFNAME, lldev->instance, maxpkt);
1863 		return (EIO);
1864 	}
1865 
1866 	/* tune jumbo/normal frame UFC counters */
1867 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
1868 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1869 	    XGE_HAL_DEFAULT_RX_UFC_B_J :
1870 	    XGE_HAL_DEFAULT_RX_UFC_B_N;
1871 
1872 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
1873 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1874 	    XGE_HAL_DEFAULT_RX_UFC_C_J :
1875 	    XGE_HAL_DEFAULT_RX_UFC_C_N;
1876 
1877 	/* now, enable the device */
1878 	status = xge_hal_device_enable(lldev->devh);
1879 	if (status != XGE_HAL_OK) {
1880 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1881 		    XGELL_IFNAME, lldev->instance);
1882 		return (EIO);
1883 	}
1884 
1885 	if (!xgell_rx_open(lldev)) {
1886 		status = xge_hal_device_disable(lldev->devh);
1887 		if (status != XGE_HAL_OK) {
1888 			u64 adapter_status;
1889 			(void) xge_hal_device_status(lldev->devh,
1890 			    &adapter_status);
1891 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1892 			    "the device. adaper status 0x%"PRIx64
1893 			    " returned status %d",
1894 			    XGELL_IFNAME, lldev->instance,
1895 			    (uint64_t)adapter_status, status);
1896 		}
1897 		xgell_rx_close(lldev);
1898 		xge_os_mdelay(1500);
1899 		return (ENOMEM);
1900 	}
1901 
1902 	if (!xgell_tx_open(lldev)) {
1903 		status = xge_hal_device_disable(lldev->devh);
1904 		if (status != XGE_HAL_OK) {
1905 			u64 adapter_status;
1906 			(void) xge_hal_device_status(lldev->devh,
1907 			    &adapter_status);
1908 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1909 			    "the device. adaper status 0x%"PRIx64
1910 			    " returned status %d",
1911 			    XGELL_IFNAME, lldev->instance,
1912 			    (uint64_t)adapter_status, status);
1913 		}
1914 		xgell_tx_close(lldev);
1915 		xgell_rx_close(lldev);
1916 		xge_os_mdelay(1500);
1917 		return (ENOMEM);
1918 	}
1919 
1920 	/* time to enable interrupts */
1921 	(void) xge_enable_intrs(lldev);
1922 	xge_hal_device_intr_enable(lldev->devh);
1923 
1924 	lldev->is_initialized = 1;
1925 
1926 	return (0);
1927 }
1928 
1929 static void
1930 xgell_initiate_stop(xgelldev_t *lldev)
1931 {
1932 	xge_hal_status_e status;
1933 
1934 	lldev->is_initialized = 0;
1935 
1936 	status = xge_hal_device_disable(lldev->devh);
1937 	if (status != XGE_HAL_OK) {
1938 		u64 adapter_status;
1939 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1940 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1941 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1942 		    XGELL_IFNAME, lldev->instance,
1943 		    (uint64_t)adapter_status, status);
1944 	}
1945 	xge_hal_device_intr_disable(lldev->devh);
1946 	/* disable OS ISR's */
1947 	xge_disable_intrs(lldev);
1948 
1949 	xge_debug_ll(XGE_TRACE, "%s",
1950 	    "waiting for device irq to become quiescent...");
1951 	xge_os_mdelay(1500);
1952 
1953 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1954 
1955 	xgell_rx_close(lldev);
1956 	xgell_tx_close(lldev);
1957 }
1958 
1959 /*
1960  * xgell_m_start
1961  * @arg: pointer to device private strucutre(hldev)
1962  *
1963  * This function is called by MAC Layer to enable the XFRAME
1964  * firmware to generate interrupts and also prepare the
1965  * driver to call mac_rx for delivering receive packets
1966  * to MAC Layer.
1967  */
1968 static int
1969 xgell_m_start(void *arg)
1970 {
1971 	xgelldev_t *lldev = arg;
1972 	xge_hal_device_t *hldev = lldev->devh;
1973 	int ret;
1974 
1975 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1976 	    lldev->instance);
1977 
1978 	mutex_enter(&lldev->genlock);
1979 
1980 	if (lldev->is_initialized) {
1981 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1982 		    XGELL_IFNAME, lldev->instance);
1983 		mutex_exit(&lldev->genlock);
1984 		return (EINVAL);
1985 	}
1986 
1987 	hldev->terminating = 0;
1988 	if (ret = xgell_initiate_start(lldev)) {
1989 		mutex_exit(&lldev->genlock);
1990 		return (ret);
1991 	}
1992 
1993 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1994 
1995 	mutex_exit(&lldev->genlock);
1996 
1997 	return (0);
1998 }
1999 
2000 /*
2001  * xgell_m_stop
2002  * @arg: pointer to device private data (hldev)
2003  *
2004  * This function is called by the MAC Layer to disable
2005  * the XFRAME firmware for generating any interrupts and
2006  * also stop the driver from calling mac_rx() for
2007  * delivering data packets to the MAC Layer.
2008  */
2009 static void
2010 xgell_m_stop(void *arg)
2011 {
2012 	xgelldev_t *lldev = arg;
2013 	xge_hal_device_t *hldev = lldev->devh;
2014 
2015 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
2016 
2017 	mutex_enter(&lldev->genlock);
2018 	if (!lldev->is_initialized) {
2019 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
2020 		mutex_exit(&lldev->genlock);
2021 		return;
2022 	}
2023 
2024 	xge_hal_device_terminating(hldev);
2025 	xgell_initiate_stop(lldev);
2026 
2027 	/* reset device */
2028 	(void) xge_hal_device_reset(lldev->devh);
2029 
2030 	mutex_exit(&lldev->genlock);
2031 
2032 	if (lldev->timeout_id != 0) {
2033 		(void) untimeout(lldev->timeout_id);
2034 	}
2035 
2036 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
2037 }
2038 
2039 /*
2040  * xgell_onerr_reset
2041  * @lldev: pointer to xgelldev_t structure
2042  *
2043  * This function is called by HAL Event framework to reset the HW
2044  * This function is must be called with genlock taken.
2045  */
2046 int
2047 xgell_onerr_reset(xgelldev_t *lldev)
2048 {
2049 	int rc = 0;
2050 
2051 	if (!lldev->is_initialized) {
2052 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
2053 		    XGELL_IFNAME, lldev->instance);
2054 		return (rc);
2055 	}
2056 
2057 	lldev->in_reset = 1;
2058 	xgell_initiate_stop(lldev);
2059 
2060 	/* reset device */
2061 	(void) xge_hal_device_reset(lldev->devh);
2062 
2063 	rc = xgell_initiate_start(lldev);
2064 	lldev->in_reset = 0;
2065 
2066 	return (rc);
2067 }
2068 
2069 /*
2070  * xgell_m_multicst
2071  * @arg: pointer to device private strucutre(hldev)
2072  * @add:
2073  * @mc_addr:
2074  *
2075  * This function is called by MAC Layer to enable or
2076  * disable device-level reception of specific multicast addresses.
2077  */
2078 static int
2079 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
2080 {
2081 	xge_hal_status_e status;
2082 	xgelldev_t *lldev = (xgelldev_t *)arg;
2083 	xge_hal_device_t *hldev = lldev->devh;
2084 
2085 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
2086 
2087 	mutex_enter(&lldev->genlock);
2088 
2089 	if (!lldev->is_initialized) {
2090 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
2091 		    XGELL_IFNAME, lldev->instance);
2092 		mutex_exit(&lldev->genlock);
2093 		return (EIO);
2094 	}
2095 
2096 	/* FIXME: missing HAL functionality: enable_one() */
2097 
2098 	status = (add) ?
2099 	    xge_hal_device_mcast_enable(hldev) :
2100 	    xge_hal_device_mcast_disable(hldev);
2101 
2102 	if (status != XGE_HAL_OK) {
2103 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
2104 		    add ? "enable" : "disable", status);
2105 		mutex_exit(&lldev->genlock);
2106 		return (EIO);
2107 	}
2108 
2109 	mutex_exit(&lldev->genlock);
2110 
2111 	return (0);
2112 }
2113 
2114 
2115 /*
2116  * xgell_m_promisc
2117  * @arg: pointer to device private strucutre(hldev)
2118  * @on:
2119  *
2120  * This function is called by MAC Layer to enable or
2121  * disable the reception of all the packets on the medium
2122  */
2123 static int
2124 xgell_m_promisc(void *arg, boolean_t on)
2125 {
2126 	xgelldev_t *lldev = (xgelldev_t *)arg;
2127 	xge_hal_device_t *hldev = lldev->devh;
2128 
2129 	mutex_enter(&lldev->genlock);
2130 
2131 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
2132 
2133 	if (!lldev->is_initialized) {
2134 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
2135 		    XGELL_IFNAME, lldev->instance);
2136 		mutex_exit(&lldev->genlock);
2137 		return (EIO);
2138 	}
2139 
2140 	if (on) {
2141 		xge_hal_device_promisc_enable(hldev);
2142 	} else {
2143 		xge_hal_device_promisc_disable(hldev);
2144 	}
2145 
2146 	mutex_exit(&lldev->genlock);
2147 
2148 	return (0);
2149 }
2150 
2151 /*
2152  * xgell_m_stat
2153  * @arg: pointer to device private strucutre(hldev)
2154  *
2155  * This function is called by MAC Layer to get network statistics
2156  * from the driver.
2157  */
2158 static int
2159 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
2160 {
2161 	xge_hal_stats_hw_info_t *hw_info;
2162 	xgelldev_t *lldev = (xgelldev_t *)arg;
2163 	xge_hal_device_t *hldev = lldev->devh;
2164 
2165 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
2166 
2167 	mutex_enter(&lldev->genlock);
2168 
2169 	if (!lldev->is_initialized) {
2170 		mutex_exit(&lldev->genlock);
2171 		return (EAGAIN);
2172 	}
2173 
2174 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
2175 		mutex_exit(&lldev->genlock);
2176 		return (EAGAIN);
2177 	}
2178 
2179 	switch (stat) {
2180 	case MAC_STAT_IFSPEED:
2181 		*val = 10000000000ull; /* 10G */
2182 		break;
2183 
2184 	case MAC_STAT_MULTIRCV:
2185 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
2186 		    hw_info->rmac_vld_mcst_frms;
2187 		break;
2188 
2189 	case MAC_STAT_BRDCSTRCV:
2190 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
2191 		    hw_info->rmac_vld_bcst_frms;
2192 		break;
2193 
2194 	case MAC_STAT_MULTIXMT:
2195 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
2196 		    hw_info->tmac_mcst_frms;
2197 		break;
2198 
2199 	case MAC_STAT_BRDCSTXMT:
2200 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
2201 		    hw_info->tmac_bcst_frms;
2202 		break;
2203 
2204 	case MAC_STAT_RBYTES:
2205 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
2206 		    hw_info->rmac_ttl_octets;
2207 		break;
2208 
2209 	case MAC_STAT_NORCVBUF:
2210 		*val = hw_info->rmac_drop_frms;
2211 		break;
2212 
2213 	case MAC_STAT_IERRORS:
2214 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
2215 		    hw_info->rmac_discarded_frms;
2216 		break;
2217 
2218 	case MAC_STAT_OBYTES:
2219 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
2220 		    hw_info->tmac_ttl_octets;
2221 		break;
2222 
2223 	case MAC_STAT_NOXMTBUF:
2224 		*val = hw_info->tmac_drop_frms;
2225 		break;
2226 
2227 	case MAC_STAT_OERRORS:
2228 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
2229 		    hw_info->tmac_any_err_frms;
2230 		break;
2231 
2232 	case MAC_STAT_IPACKETS:
2233 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
2234 		    hw_info->rmac_vld_frms;
2235 		break;
2236 
2237 	case MAC_STAT_OPACKETS:
2238 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
2239 		    hw_info->tmac_frms;
2240 		break;
2241 
2242 	case ETHER_STAT_FCS_ERRORS:
2243 		*val = hw_info->rmac_fcs_err_frms;
2244 		break;
2245 
2246 	case ETHER_STAT_TOOLONG_ERRORS:
2247 		*val = hw_info->rmac_long_frms;
2248 		break;
2249 
2250 	case ETHER_STAT_LINK_DUPLEX:
2251 		*val = LINK_DUPLEX_FULL;
2252 		break;
2253 
2254 	default:
2255 		mutex_exit(&lldev->genlock);
2256 		return (ENOTSUP);
2257 	}
2258 
2259 	mutex_exit(&lldev->genlock);
2260 
2261 	return (0);
2262 }
2263 
2264 /*
2265  * xgell_device_alloc - Allocate new LL device
2266  */
2267 int
2268 xgell_device_alloc(xge_hal_device_h devh,
2269     dev_info_t *dev_info, xgelldev_t **lldev_out)
2270 {
2271 	xgelldev_t *lldev;
2272 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2273 	int instance = ddi_get_instance(dev_info);
2274 
2275 	*lldev_out = NULL;
2276 
2277 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
2278 	    XGELL_IFNAME, instance);
2279 
2280 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
2281 
2282 	lldev->devh = hldev;
2283 	lldev->instance = instance;
2284 	lldev->dev_info = dev_info;
2285 
2286 	*lldev_out = lldev;
2287 
2288 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
2289 
2290 	return (DDI_SUCCESS);
2291 }
2292 
2293 /*
2294  * xgell_device_free
2295  */
2296 void
2297 xgell_device_free(xgelldev_t *lldev)
2298 {
2299 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
2300 	    XGELL_IFNAME, lldev->instance);
2301 
2302 	kmem_free(lldev, sizeof (xgelldev_t));
2303 }
2304 
2305 /*
2306  * xgell_ioctl
2307  */
2308 static void
2309 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2310 {
2311 	xgelldev_t *lldev = arg;
2312 	struct iocblk *iocp;
2313 	int err = 0;
2314 	int cmd;
2315 	int need_privilege = 1;
2316 	int ret = 0;
2317 
2318 
2319 	iocp = (struct iocblk *)mp->b_rptr;
2320 	iocp->ioc_error = 0;
2321 	cmd = iocp->ioc_cmd;
2322 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
2323 	switch (cmd) {
2324 	case ND_GET:
2325 		need_privilege = 0;
2326 		/* FALLTHRU */
2327 	case ND_SET:
2328 		break;
2329 	default:
2330 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
2331 		miocnak(wq, mp, 0, EINVAL);
2332 		return;
2333 	}
2334 
2335 	if (need_privilege) {
2336 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2337 		if (err != 0) {
2338 			xge_debug_ll(XGE_ERR,
2339 			    "drv_priv(): rejected cmd 0x%x, err %d",
2340 			    cmd, err);
2341 			miocnak(wq, mp, 0, err);
2342 			return;
2343 		}
2344 	}
2345 
2346 	switch (cmd) {
2347 	case ND_GET:
2348 		/*
2349 		 * If nd_getset() returns B_FALSE, the command was
2350 		 * not valid (e.g. unknown name), so we just tell the
2351 		 * top-level ioctl code to send a NAK (with code EINVAL).
2352 		 *
2353 		 * Otherwise, nd_getset() will have built the reply to
2354 		 * be sent (but not actually sent it), so we tell the
2355 		 * caller to send the prepared reply.
2356 		 */
2357 		ret = nd_getset(wq, lldev->ndp, mp);
2358 		xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2359 		break;
2360 
2361 	case ND_SET:
2362 		ret = nd_getset(wq, lldev->ndp, mp);
2363 		xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2364 		break;
2365 
2366 	default:
2367 		break;
2368 	}
2369 
2370 	if (ret == B_FALSE) {
2371 		xge_debug_ll(XGE_ERR,
2372 		    "nd_getset(): rejected cmd 0x%x, err %d",
2373 		    cmd, err);
2374 		miocnak(wq, mp, 0, EINVAL);
2375 	} else {
2376 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
2377 		    M_IOCACK : M_IOCNAK;
2378 		qreply(wq, mp);
2379 	}
2380 }
2381 
2382 
2383 static boolean_t
2384 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2385 {
2386 	xgelldev_t *lldev = arg;
2387 
2388 	xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
2389 
2390 	switch (cap) {
2391 	case MAC_CAPAB_HCKSUM: {
2392 		uint32_t *hcksum_txflags = cap_data;
2393 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2394 		    HCKSUM_IPHDRCKSUM;
2395 		break;
2396 	}
2397 	case MAC_CAPAB_LSO: {
2398 		mac_capab_lso_t *cap_lso = cap_data;
2399 
2400 		if (lldev->config.lso_enable) {
2401 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2402 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
2403 			break;
2404 		} else {
2405 			return (B_FALSE);
2406 		}
2407 	}
2408 	case MAC_CAPAB_RINGS: {
2409 		mac_capab_rings_t *cap_rings = cap_data;
2410 
2411 		switch (cap_rings->mr_type) {
2412 		case MAC_RING_TYPE_RX:
2413 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2414 			cap_rings->mr_rnum = lldev->init_rx_rings;
2415 			cap_rings->mr_gnum = lldev->init_rx_groups;
2416 			cap_rings->mr_rget = xgell_fill_ring;
2417 			cap_rings->mr_gget = xgell_fill_group;
2418 			break;
2419 		case MAC_RING_TYPE_TX:
2420 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2421 			cap_rings->mr_rnum = lldev->init_tx_rings;
2422 			cap_rings->mr_gnum = 0;
2423 			cap_rings->mr_rget = xgell_fill_ring;
2424 			cap_rings->mr_gget = NULL;
2425 			break;
2426 		default:
2427 			break;
2428 		}
2429 		break;
2430 	}
2431 	default:
2432 		return (B_FALSE);
2433 	}
2434 	return (B_TRUE);
2435 }
2436 
2437 static int
2438 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2439 {
2440 	xgelldev_t *lldev = (xgelldev_t *)cp;
2441 	xge_hal_status_e status;
2442 	int count = 0, retsize;
2443 	char *buf;
2444 
2445 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2446 	if (buf == NULL) {
2447 		return (ENOSPC);
2448 	}
2449 
2450 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2451 	    buf, &retsize);
2452 	if (status != XGE_HAL_OK) {
2453 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2454 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2455 		return (EINVAL);
2456 	}
2457 	count += retsize;
2458 
2459 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2460 	    XGELL_STATS_BUFSIZE - count,
2461 	    buf+count, &retsize);
2462 	if (status != XGE_HAL_OK) {
2463 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2464 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2465 		return (EINVAL);
2466 	}
2467 	count += retsize;
2468 
2469 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2470 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2471 	if (status != XGE_HAL_OK) {
2472 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2473 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2474 		return (EINVAL);
2475 	}
2476 	count += retsize;
2477 
2478 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2479 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2480 	if (status != XGE_HAL_OK) {
2481 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2482 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2483 		return (EINVAL);
2484 	}
2485 	count += retsize;
2486 
2487 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2488 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2489 	if (status != XGE_HAL_OK) {
2490 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2491 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2492 		return (EINVAL);
2493 	}
2494 	count += retsize;
2495 
2496 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2497 	(void) mi_mpprintf(mp, "%s", buf);
2498 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2499 
2500 	return (0);
2501 }
2502 
2503 static int
2504 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2505 {
2506 	xgelldev_t *lldev = (xgelldev_t *)cp;
2507 	xge_hal_status_e status;
2508 	int retsize;
2509 	char *buf;
2510 
2511 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2512 	if (buf == NULL) {
2513 		return (ENOSPC);
2514 	}
2515 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2516 	    buf, &retsize);
2517 	if (status != XGE_HAL_OK) {
2518 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2519 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2520 		return (EINVAL);
2521 	}
2522 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2523 	(void) mi_mpprintf(mp, "%s", buf);
2524 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2525 
2526 	return (0);
2527 }
2528 
2529 static int
2530 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2531 {
2532 	xgelldev_t *lldev = (xgelldev_t *)cp;
2533 	xge_hal_status_e status;
2534 	int retsize;
2535 	char *buf;
2536 
2537 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2538 	if (buf == NULL) {
2539 		return (ENOSPC);
2540 	}
2541 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2542 	    buf, &retsize);
2543 	if (status != XGE_HAL_OK) {
2544 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2545 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2546 		return (EINVAL);
2547 	}
2548 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2549 	(void) mi_mpprintf(mp, "%s", buf);
2550 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2551 
2552 	return (0);
2553 }
2554 
2555 static unsigned long bar0_offset = 0x110; /* adapter_control */
2556 
2557 static int
2558 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2559 {
2560 	xgelldev_t *lldev = (xgelldev_t *)cp;
2561 	xge_hal_status_e status;
2562 	int retsize;
2563 	char *buf;
2564 
2565 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2566 	if (buf == NULL) {
2567 		return (ENOSPC);
2568 	}
2569 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2570 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2571 	if (status != XGE_HAL_OK) {
2572 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2573 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2574 		return (EINVAL);
2575 	}
2576 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2577 	(void) mi_mpprintf(mp, "%s", buf);
2578 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2579 
2580 	return (0);
2581 }
2582 
2583 static int
2584 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2585 {
2586 	unsigned long old_offset = bar0_offset;
2587 	char *end;
2588 
2589 	if (value && *value == '0' &&
2590 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2591 		value += 2;
2592 	}
2593 
2594 	bar0_offset = mi_strtol(value, &end, 16);
2595 	if (end == value) {
2596 		bar0_offset = old_offset;
2597 		return (EINVAL);
2598 	}
2599 
2600 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2601 
2602 	return (0);
2603 }
2604 
2605 static int
2606 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2607 {
2608 	char *buf;
2609 
2610 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2611 	if (buf == NULL) {
2612 		return (ENOSPC);
2613 	}
2614 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2615 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2616 
2617 	return (0);
2618 }
2619 
2620 static int
2621 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2622     cred_t *credp)
2623 {
2624 	int level;
2625 	char *end;
2626 
2627 	level = mi_strtol(value, &end, 10);
2628 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2629 		return (EINVAL);
2630 	}
2631 
2632 	xge_hal_driver_debug_level_set(level);
2633 
2634 	return (0);
2635 }
2636 
2637 static int
2638 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2639 {
2640 	char *buf;
2641 
2642 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2643 	if (buf == NULL) {
2644 		return (ENOSPC);
2645 	}
2646 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2647 	    xge_hal_driver_debug_module_mask());
2648 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2649 
2650 	return (0);
2651 }
2652 
2653 static int
2654 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2655 			    cred_t *credp)
2656 {
2657 	u32 mask;
2658 	char *end;
2659 
2660 	if (value && *value == '0' &&
2661 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2662 		value += 2;
2663 	}
2664 
2665 	mask = mi_strtol(value, &end, 16);
2666 	if (end == value) {
2667 		return (EINVAL);
2668 	}
2669 
2670 	xge_hal_driver_debug_module_mask_set(mask);
2671 
2672 	return (0);
2673 }
2674 
2675 static int
2676 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2677 {
2678 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2679 	xge_hal_status_e status;
2680 	int retsize;
2681 	char *buf;
2682 
2683 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2684 	if (buf == NULL) {
2685 		return (ENOSPC);
2686 	}
2687 	status = xge_hal_aux_device_config_read(lldev->devh,
2688 	    XGELL_DEVCONF_BUFSIZE, buf, &retsize);
2689 	if (status != XGE_HAL_OK) {
2690 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2691 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2692 		    status);
2693 		return (EINVAL);
2694 	}
2695 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2696 	(void) mi_mpprintf(mp, "%s", buf);
2697 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2698 
2699 	return (0);
2700 }
2701 
2702 /*
2703  * xgell_device_register
2704  * @devh: pointer on HAL device
2705  * @config: pointer on this network device configuration
2706  * @ll_out: output pointer. Will be assigned to valid LL device.
2707  *
2708  * This function will allocate and register network device
2709  */
2710 int
2711 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2712 {
2713 	mac_register_t *macp = NULL;
2714 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2715 
2716 	/*
2717 	 * Initialize some NDD interface for internal debug.
2718 	 */
2719 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2720 	    (caddr_t)lldev) == B_FALSE)
2721 		goto xgell_ndd_fail;
2722 
2723 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2724 	    (caddr_t)lldev) == B_FALSE)
2725 		goto xgell_ndd_fail;
2726 
2727 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2728 	    (caddr_t)lldev) == B_FALSE)
2729 		goto xgell_ndd_fail;
2730 
2731 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2732 	    (caddr_t)lldev) == B_FALSE)
2733 		goto xgell_ndd_fail;
2734 
2735 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2736 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2737 		goto xgell_ndd_fail;
2738 
2739 	if (nd_load(&lldev->ndp, "debug_module_mask",
2740 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2741 	    (caddr_t)lldev) == B_FALSE)
2742 		goto xgell_ndd_fail;
2743 
2744 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2745 	    (caddr_t)lldev) == B_FALSE)
2746 		goto xgell_ndd_fail;
2747 
2748 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2749 
2750 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
2751 	    DDI_INTR_PRI(hldev->irqh));
2752 
2753 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2754 		goto xgell_register_fail;
2755 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2756 	macp->m_driver = lldev;
2757 	macp->m_dip = lldev->dev_info;
2758 	macp->m_src_addr = hldev->macaddr[0];
2759 	macp->m_callbacks = &xgell_m_callbacks;
2760 	macp->m_min_sdu = 0;
2761 	macp->m_max_sdu = hldev->config.mtu;
2762 	macp->m_margin = VLAN_TAGSZ;
2763 	macp->m_v12n = MAC_VIRT_LEVEL1;
2764 
2765 	/*
2766 	 * MAC Registration.
2767 	 */
2768 	if (mac_register(macp, &lldev->mh) != 0)
2769 		goto xgell_register_fail;
2770 
2771 	/* Always free the macp after register */
2772 	if (macp != NULL)
2773 		mac_free(macp);
2774 
2775 	/* Calculate tx_copied_max here ??? */
2776 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2777 	    hldev->config.fifo.alignment_size *
2778 	    hldev->config.fifo.max_aligned_frags;
2779 
2780 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2781 	    XGELL_IFNAME, lldev->instance);
2782 
2783 	return (DDI_SUCCESS);
2784 
2785 xgell_ndd_fail:
2786 	nd_free(&lldev->ndp);
2787 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2788 	return (DDI_FAILURE);
2789 
2790 xgell_register_fail:
2791 	if (macp != NULL)
2792 		mac_free(macp);
2793 	nd_free(&lldev->ndp);
2794 	mutex_destroy(&lldev->genlock);
2795 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2796 	return (DDI_FAILURE);
2797 }
2798 
2799 /*
2800  * xgell_device_unregister
2801  * @devh: pointer on HAL device
2802  * @lldev: pointer to valid LL device.
2803  *
2804  * This function will unregister and free network device
2805  */
2806 int
2807 xgell_device_unregister(xgelldev_t *lldev)
2808 {
2809 	if (mac_unregister(lldev->mh) != 0) {
2810 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2811 		    XGELL_IFNAME, lldev->instance);
2812 		return (DDI_FAILURE);
2813 	}
2814 
2815 	mutex_destroy(&lldev->genlock);
2816 
2817 	nd_free(&lldev->ndp);
2818 
2819 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2820 	    XGELL_IFNAME, lldev->instance);
2821 
2822 	return (DDI_SUCCESS);
2823 }
2824