xref: /titanic_52/usr/src/uts/common/io/xge/drv/xgell.c (revision b6c3f7863936abeae522e48a13887dddeb691a45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *  Copyright (c) 2002-2005 Neterion, Inc.
31  *  All right Reserved.
32  *
33  *  FileName :    xgell.c
34  *
35  *  Description:  Xge Link Layer data path implementation
36  *
37  */
38 
39 #include "xgell.h"
40 
41 #include <netinet/ip.h>
42 #include <netinet/tcp.h>
43 #include <netinet/udp.h>
44 
45 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
46     sizeof (struct ether_vlan_header))
47 
48 #define	HEADROOM		2	/* for DIX-only packets */
49 
50 void header_free_func(void *arg) { }
51 frtn_t header_frtn = {header_free_func, NULL};
52 
53 /* DMA attributes used for Tx side */
54 static struct ddi_dma_attr tx_dma_attr = {
55 	DMA_ATTR_V0,			/* dma_attr_version */
56 	0x0ULL,				/* dma_attr_addr_lo */
57 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
58 	0xFFFFFFFFULL,			/* dma_attr_count_max */
59 	0x1ULL,				/* dma_attr_align */
60 	0xFFF,				/* dma_attr_burstsizes */
61 	1,				/* dma_attr_minxfer */
62 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
63 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
64 	18,				/* dma_attr_sgllen */
65 	1,				/* dma_attr_granular */
66 	0				/* dma_attr_flags */
67 };
68 
69 /* Aligned DMA attributes used for Tx side */
70 struct ddi_dma_attr tx_dma_attr_align = {
71 	DMA_ATTR_V0,			/* dma_attr_version */
72 	0x0ULL,				/* dma_attr_addr_lo */
73 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
74 	0xFFFFFFFFULL,			/* dma_attr_count_max */
75 	4096,				/* dma_attr_align */
76 	0xFFF,				/* dma_attr_burstsizes */
77 	1,				/* dma_attr_minxfer */
78 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
79 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
80 	4,				/* dma_attr_sgllen */
81 	1,				/* dma_attr_granular */
82 	0				/* dma_attr_flags */
83 };
84 
85 /*
86  * DMA attributes used when using ddi_dma_mem_alloc to
87  * allocat HAL descriptors and Rx buffers during replenish
88  */
89 static struct ddi_dma_attr hal_dma_attr = {
90 	DMA_ATTR_V0,			/* dma_attr_version */
91 	0x0ULL,				/* dma_attr_addr_lo */
92 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
93 	0xFFFFFFFFULL,			/* dma_attr_count_max */
94 	0x1ULL,				/* dma_attr_align */
95 	0xFFF,				/* dma_attr_burstsizes */
96 	1,				/* dma_attr_minxfer */
97 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
98 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
99 	1,				/* dma_attr_sgllen */
100 	1,				/* dma_attr_granular */
101 	0				/* dma_attr_flags */
102 };
103 
104 /*
105  * Aligned DMA attributes used when using ddi_dma_mem_alloc to
106  * allocat HAL descriptors and Rx buffers during replenish
107  */
108 struct ddi_dma_attr hal_dma_attr_aligned = {
109 	DMA_ATTR_V0,			/* dma_attr_version */
110 	0x0ULL,				/* dma_attr_addr_lo */
111 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
112 	0xFFFFFFFFULL,			/* dma_attr_count_max */
113 	4096,				/* dma_attr_align */
114 	0xFFF,				/* dma_attr_burstsizes */
115 	1,				/* dma_attr_minxfer */
116 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
117 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
118 	1,				/* dma_attr_sgllen */
119 	1,				/* dma_attr_granular */
120 	0				/* dma_attr_flags */
121 };
122 
123 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
124 struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned;
125 
126 static int		xgell_m_stat(void *, uint_t, uint64_t *);
127 static int		xgell_m_start(void *);
128 static void		xgell_m_stop(void *);
129 static int		xgell_m_promisc(void *, boolean_t);
130 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
131 static int		xgell_m_unicst(void *, const uint8_t *);
132 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
133 static mblk_t 		*xgell_m_tx(void *, mblk_t *);
134 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
135 
136 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
137 
138 static mac_callbacks_t xgell_m_callbacks = {
139 	XGELL_M_CALLBACK_FLAGS,
140 	xgell_m_stat,
141 	xgell_m_start,
142 	xgell_m_stop,
143 	xgell_m_promisc,
144 	xgell_m_multicst,
145 	xgell_m_unicst,
146 	xgell_m_tx,
147 	NULL,
148 	xgell_m_ioctl,
149 	xgell_m_getcapab
150 };
151 
152 /*
153  * xge_device_poll
154  *
155  * Cyclic should call me every 1s. xge_callback_event_queued should call me
156  * when HAL hope event was rescheduled.
157  */
158 /*ARGSUSED*/
159 void
160 xge_device_poll(void *data)
161 {
162 	xgelldev_t *lldev = xge_hal_device_private(data);
163 
164 	mutex_enter(&lldev->genlock);
165 	if (lldev->is_initialized) {
166 		xge_hal_device_poll(data);
167 		lldev->timeout_id = timeout(xge_device_poll, data,
168 		    XGE_DEV_POLL_TICKS);
169 	} else if (lldev->in_reset == 1) {
170 		lldev->timeout_id = timeout(xge_device_poll, data,
171 		    XGE_DEV_POLL_TICKS);
172 	} else {
173 		lldev->timeout_id = 0;
174 	}
175 	mutex_exit(&lldev->genlock);
176 }
177 
178 /*
179  * xge_device_poll_now
180  *
181  * Will call xge_device_poll() immediately
182  */
183 void
184 xge_device_poll_now(void *data)
185 {
186 	xgelldev_t *lldev = xge_hal_device_private(data);
187 
188 	mutex_enter(&lldev->genlock);
189 	if (lldev->is_initialized) {
190 		xge_hal_device_poll(data);
191 	}
192 	mutex_exit(&lldev->genlock);
193 }
194 
195 /*
196  * xgell_callback_link_up
197  *
198  * This function called by HAL to notify HW link up state change.
199  */
200 void
201 xgell_callback_link_up(void *userdata)
202 {
203 	xgelldev_t *lldev = (xgelldev_t *)userdata;
204 
205 	mac_link_update(lldev->mh, LINK_STATE_UP);
206 }
207 
208 /*
209  * xgell_callback_link_down
210  *
211  * This function called by HAL to notify HW link down state change.
212  */
213 void
214 xgell_callback_link_down(void *userdata)
215 {
216 	xgelldev_t *lldev = (xgelldev_t *)userdata;
217 
218 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
219 }
220 
221 /*
222  * xgell_rx_buffer_replenish_all
223  *
224  * To replenish all freed dtr(s) with buffers in free pool. It's called by
225  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
226  * Must be called with pool_lock held.
227  */
228 static void
229 xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
230 {
231 	xge_hal_dtr_h dtr;
232 	xgell_rx_buffer_t *rx_buffer;
233 	xgell_rxd_priv_t *rxd_priv;
234 
235 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
236 
237 	while ((lldev->bf_pool.free > 0) &&
238 	    (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
239 	    XGE_HAL_OK)) {
240 		rx_buffer = lldev->bf_pool.head;
241 		lldev->bf_pool.head = rx_buffer->next;
242 		lldev->bf_pool.free--;
243 
244 		xge_assert(rx_buffer);
245 		xge_assert(rx_buffer->dma_addr);
246 
247 		rxd_priv = (xgell_rxd_priv_t *)
248 		    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
249 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
250 		    lldev->bf_pool.size);
251 
252 		rxd_priv->rx_buffer = rx_buffer;
253 		xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr);
254 	}
255 }
256 
257 /*
258  * xgell_rx_buffer_release
259  *
260  * The only thing done here is to put the buffer back to the pool.
261  * Calling this function need be protected by mutex, bf_pool.pool_lock.
262  */
263 static void
264 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
265 {
266 	xgelldev_t *lldev = rx_buffer->lldev;
267 
268 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
269 
270 	/* Put the buffer back to pool */
271 	rx_buffer->next = lldev->bf_pool.head;
272 	lldev->bf_pool.head = rx_buffer;
273 
274 	lldev->bf_pool.free++;
275 }
276 
277 /*
278  * xgell_rx_buffer_recycle
279  *
280  * Called by desballoc() to "free" the resource.
281  * We will try to replenish all descripters.
282  */
283 static void
284 xgell_rx_buffer_recycle(char *arg)
285 {
286 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
287 	xgelldev_t *lldev = rx_buffer->lldev;
288 
289 	mutex_enter(&lldev->bf_pool.pool_lock);
290 
291 	xgell_rx_buffer_release(rx_buffer);
292 	lldev->bf_pool.post--;
293 
294 	/*
295 	 * Before finding a good way to set this hiwat, just always call to
296 	 * replenish_all. *TODO*
297 	 */
298 	if (lldev->is_initialized != 0) {
299 		xgell_rx_buffer_replenish_all(lldev);
300 	}
301 
302 	mutex_exit(&lldev->bf_pool.pool_lock);
303 }
304 
305 /*
306  * xgell_rx_buffer_alloc
307  *
308  * Allocate one rx buffer and return with the pointer to the buffer.
309  * Return NULL if failed.
310  */
311 static xgell_rx_buffer_t *
312 xgell_rx_buffer_alloc(xgelldev_t *lldev)
313 {
314 	xge_hal_device_t *hldev;
315 	void *vaddr;
316 	ddi_dma_handle_t dma_handle;
317 	ddi_acc_handle_t dma_acch;
318 	dma_addr_t dma_addr;
319 	uint_t ncookies;
320 	ddi_dma_cookie_t dma_cookie;
321 	size_t real_size;
322 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
323 	xgell_rx_buffer_t *rx_buffer;
324 
325 	hldev = (xge_hal_device_t *)lldev->devh;
326 
327 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
328 	    0, &dma_handle) != DDI_SUCCESS) {
329 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
330 		    XGELL_IFNAME, lldev->instance);
331 		goto handle_failed;
332 	}
333 
334 	/* reserve some space at the end of the buffer for recycling */
335 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size +
336 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
337 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
338 	    DDI_SUCCESS) {
339 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
340 		    XGELL_IFNAME, lldev->instance);
341 		goto mem_failed;
342 	}
343 
344 	if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) >
345 	    real_size) {
346 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
347 		    XGELL_IFNAME, lldev->instance);
348 		goto bind_failed;
349 	}
350 
351 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
352 	    lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
353 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
354 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
355 		    XGELL_IFNAME, lldev->instance);
356 		goto bind_failed;
357 	}
358 
359 	if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) {
360 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
361 		    XGELL_IFNAME, lldev->instance);
362 		goto check_failed;
363 	}
364 
365 	dma_addr = dma_cookie.dmac_laddress;
366 
367 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
368 	    sizeof (xgell_rx_buffer_t));
369 	rx_buffer->next = NULL;
370 	rx_buffer->vaddr = vaddr;
371 	rx_buffer->dma_addr = dma_addr;
372 	rx_buffer->dma_handle = dma_handle;
373 	rx_buffer->dma_acch = dma_acch;
374 	rx_buffer->lldev = lldev;
375 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
376 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
377 
378 	return (rx_buffer);
379 
380 check_failed:
381 	(void) ddi_dma_unbind_handle(dma_handle);
382 bind_failed:
383 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
384 	ddi_dma_mem_free(&dma_acch);
385 mem_failed:
386 	ddi_dma_free_handle(&dma_handle);
387 handle_failed:
388 
389 	return (NULL);
390 }
391 
392 /*
393  * xgell_rx_destroy_buffer_pool
394  *
395  * Destroy buffer pool. If there is still any buffer hold by upper layer,
396  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
397  */
398 static int
399 xgell_rx_destroy_buffer_pool(xgelldev_t *lldev)
400 {
401 	xgell_rx_buffer_t *rx_buffer;
402 	ddi_dma_handle_t  dma_handle;
403 	ddi_acc_handle_t  dma_acch;
404 	int i;
405 
406 	/*
407 	 * If there is any posted buffer, the driver should reject to be
408 	 * detached. Need notice upper layer to release them.
409 	 */
410 	if (lldev->bf_pool.post != 0) {
411 		xge_debug_ll(XGE_ERR,
412 		    "%s%d has some buffers not be recycled, try later!",
413 		    XGELL_IFNAME, lldev->instance);
414 		return (DDI_FAILURE);
415 	}
416 
417 	/*
418 	 * Relase buffers one by one.
419 	 */
420 	for (i = lldev->bf_pool.total; i > 0; i--) {
421 		rx_buffer = lldev->bf_pool.head;
422 		xge_assert(rx_buffer != NULL);
423 
424 		lldev->bf_pool.head = rx_buffer->next;
425 
426 		dma_handle = rx_buffer->dma_handle;
427 		dma_acch = rx_buffer->dma_acch;
428 
429 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
430 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
431 			lldev->bf_pool.head = rx_buffer;
432 			return (DDI_FAILURE);
433 		}
434 		ddi_dma_mem_free(&dma_acch);
435 		ddi_dma_free_handle(&dma_handle);
436 
437 		lldev->bf_pool.total--;
438 		lldev->bf_pool.free--;
439 	}
440 
441 	mutex_destroy(&lldev->bf_pool.pool_lock);
442 	return (DDI_SUCCESS);
443 }
444 
445 /*
446  * xgell_rx_create_buffer_pool
447  *
448  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
449  */
450 static int
451 xgell_rx_create_buffer_pool(xgelldev_t *lldev)
452 {
453 	xge_hal_device_t *hldev;
454 	xgell_rx_buffer_t *rx_buffer;
455 	int i;
456 
457 	hldev = (xge_hal_device_t *)lldev->devh;
458 
459 	lldev->bf_pool.total = 0;
460 	lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev);
461 	lldev->bf_pool.head = NULL;
462 	lldev->bf_pool.free = 0;
463 	lldev->bf_pool.post = 0;
464 	lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
465 
466 	mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
467 	    hldev->irqh);
468 
469 	/*
470 	 * Allocate buffers one by one. If failed, destroy whole pool by
471 	 * call to xgell_rx_destroy_buffer_pool().
472 	 */
473 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
474 		if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) {
475 			(void) xgell_rx_destroy_buffer_pool(lldev);
476 			return (DDI_FAILURE);
477 		}
478 
479 		rx_buffer->next = lldev->bf_pool.head;
480 		lldev->bf_pool.head = rx_buffer;
481 
482 		lldev->bf_pool.total++;
483 		lldev->bf_pool.free++;
484 	}
485 
486 	return (DDI_SUCCESS);
487 }
488 
489 /*
490  * xgell_rx_dtr_replenish
491  *
492  * Replenish descriptor with rx_buffer in RX buffer pool.
493  * The dtr should be post right away.
494  */
495 xge_hal_status_e
496 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
497     void *userdata, xge_hal_channel_reopen_e reopen)
498 {
499 	xgell_ring_t *ring = userdata;
500 	xgelldev_t *lldev = ring->lldev;
501 	xgell_rx_buffer_t *rx_buffer;
502 	xgell_rxd_priv_t *rxd_priv;
503 
504 	if (lldev->bf_pool.head == NULL) {
505 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
506 		return (XGE_HAL_FAIL);
507 	}
508 	rx_buffer = lldev->bf_pool.head;
509 	lldev->bf_pool.head = rx_buffer->next;
510 	lldev->bf_pool.free--;
511 
512 	xge_assert(rx_buffer);
513 	xge_assert(rx_buffer->dma_addr);
514 
515 	rxd_priv = (xgell_rxd_priv_t *)
516 	    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
517 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size);
518 
519 	rxd_priv->rx_buffer = rx_buffer;
520 
521 	return (XGE_HAL_OK);
522 }
523 
524 /*
525  * xgell_get_ip_offset
526  *
527  * Calculate the offset to IP header.
528  */
529 static inline int
530 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
531 {
532 	int ip_off;
533 
534 	/* get IP-header offset */
535 	switch (ext_info->frame) {
536 	case XGE_HAL_FRAME_TYPE_DIX:
537 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
538 		break;
539 	case XGE_HAL_FRAME_TYPE_IPX:
540 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
541 		    XGE_HAL_HEADER_802_2_SIZE +
542 		    XGE_HAL_HEADER_SNAP_SIZE);
543 		break;
544 	case XGE_HAL_FRAME_TYPE_LLC:
545 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
546 		    XGE_HAL_HEADER_802_2_SIZE);
547 		break;
548 	case XGE_HAL_FRAME_TYPE_SNAP:
549 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
550 		    XGE_HAL_HEADER_SNAP_SIZE);
551 		break;
552 	default:
553 		ip_off = 0;
554 		break;
555 	}
556 
557 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
558 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
559 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
560 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
561 	}
562 
563 	return (ip_off);
564 }
565 
566 /*
567  * xgell_rx_hcksum_assoc
568  *
569  * Judge the packet type and then call to hcksum_assoc() to associate
570  * h/w checksum information.
571  */
572 static inline void
573 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
574     xge_hal_dtr_info_t *ext_info)
575 {
576 	int cksum_flags = 0;
577 
578 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
579 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
580 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
581 				cksum_flags |= HCK_IPV4_HDRCKSUM;
582 			}
583 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
584 				cksum_flags |= HCK_FULLCKSUM_OK;
585 			}
586 			if (cksum_flags) {
587 				cksum_flags |= HCK_FULLCKSUM;
588 				(void) hcksum_assoc(mp, NULL, NULL, 0,
589 				    0, 0, 0, cksum_flags, 0);
590 			}
591 		}
592 	} else if (ext_info->proto &
593 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
594 		/*
595 		 * Just pass the partial cksum up to IP.
596 		 */
597 		int ip_off = xgell_get_ip_offset(ext_info);
598 		int start, end = pkt_length - ip_off;
599 
600 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
601 			struct ip *ip =
602 			    (struct ip *)(vaddr + ip_off);
603 			start = ip->ip_hl * 4 + ip_off;
604 		} else {
605 			start = ip_off + 40;
606 		}
607 		cksum_flags |= HCK_PARTIALCKSUM;
608 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
609 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
610 		    0);
611 	}
612 }
613 
614 /*
615  * xgell_rx_1b_msg_alloc
616  *
617  * Allocate message header for data buffer, and decide if copy the packet to
618  * new data buffer to release big rx_buffer to save memory.
619  *
620  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
621  * new message and copy the payload in.
622  */
623 static mblk_t *
624 xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer,
625     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
626 {
627 	mblk_t *mp;
628 	char *vaddr;
629 
630 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
631 	/*
632 	 * Copy packet into new allocated message buffer, if pkt_length
633 	 * is less than XGELL_RX_DMA_LOWAT
634 	 */
635 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
636 		if ((mp = allocb(pkt_length, 0)) == NULL) {
637 			return (NULL);
638 		}
639 		bcopy(vaddr, mp->b_rptr, pkt_length);
640 		mp->b_wptr = mp->b_rptr + pkt_length;
641 		*copyit = B_TRUE;
642 		return (mp);
643 	}
644 
645 	/*
646 	 * Just allocate mblk for current data buffer
647 	 */
648 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
649 	    &rx_buffer->frtn)) == NULL) {
650 		/* Drop it */
651 		return (NULL);
652 	}
653 	/*
654 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
655 	 */
656 	mp->b_wptr += pkt_length;
657 
658 	return (mp);
659 }
660 
661 /*
662  * xgell_rx_1b_compl
663  *
664  * If the interrupt is because of a received frame or if the receive ring
665  * contains fresh as yet un-processed frames, this function is called.
666  */
667 static xge_hal_status_e
668 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
669     void *userdata)
670 {
671 	xgelldev_t *lldev = ((xgell_ring_t *)userdata)->lldev;
672 	xgell_rx_buffer_t *rx_buffer;
673 	mblk_t *mp_head = NULL;
674 	mblk_t *mp_end  = NULL;
675 	int pkt_burst = 0;
676 
677 	mutex_enter(&lldev->bf_pool.pool_lock);
678 
679 	do {
680 		int pkt_length;
681 		dma_addr_t dma_data;
682 		mblk_t *mp;
683 		boolean_t copyit = B_FALSE;
684 
685 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
686 		    xge_hal_ring_dtr_private(channelh, dtr));
687 		xge_hal_dtr_info_t ext_info;
688 
689 		rx_buffer = rxd_priv->rx_buffer;
690 
691 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
692 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
693 
694 		xge_assert(dma_data == rx_buffer->dma_addr);
695 
696 		if (t_code != 0) {
697 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
698 			    " completed due to error t_code %01x", XGELL_IFNAME,
699 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
700 
701 			(void) xge_hal_device_handle_tcode(channelh, dtr,
702 			    t_code);
703 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
704 			xgell_rx_buffer_release(rx_buffer);
705 			continue;
706 		}
707 
708 		/*
709 		 * Sync the DMA memory
710 		 */
711 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
712 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
713 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
714 			    XGELL_IFNAME, lldev->instance);
715 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
716 			xgell_rx_buffer_release(rx_buffer);
717 			continue;
718 		}
719 
720 		/*
721 		 * Allocate message for the packet.
722 		 */
723 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
724 			copyit = B_TRUE;
725 		} else {
726 			copyit = B_FALSE;
727 		}
728 
729 		mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length,
730 		    &ext_info, &copyit);
731 
732 		xge_hal_ring_dtr_free(channelh, dtr);
733 
734 		/*
735 		 * Release the buffer and recycle it later
736 		 */
737 		if ((mp == NULL) || copyit) {
738 			xgell_rx_buffer_release(rx_buffer);
739 		} else {
740 			/*
741 			 * Count it since the buffer should be loaned up.
742 			 */
743 			lldev->bf_pool.post++;
744 		}
745 		if (mp == NULL) {
746 			xge_debug_ll(XGE_ERR,
747 			    "%s%d: rx: can not allocate mp mblk",
748 			    XGELL_IFNAME, lldev->instance);
749 			continue;
750 		}
751 
752 		/*
753 		 * Associate cksum_flags per packet type and h/w
754 		 * cksum flags.
755 		 */
756 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
757 		    HEADROOM, pkt_length, &ext_info);
758 
759 		if (mp_head == NULL) {
760 			mp_head = mp;
761 			mp_end = mp;
762 		} else {
763 			mp_end->b_next = mp;
764 			mp_end = mp;
765 		}
766 
767 		if (++pkt_burst < lldev->config.rx_pkt_burst)
768 			continue;
769 
770 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
771 			/* Replenish rx buffers */
772 			xgell_rx_buffer_replenish_all(lldev);
773 		}
774 		mutex_exit(&lldev->bf_pool.pool_lock);
775 		if (mp_head != NULL) {
776 			mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle,
777 			    mp_head);
778 		}
779 		mp_head = mp_end  = NULL;
780 		pkt_burst = 0;
781 		mutex_enter(&lldev->bf_pool.pool_lock);
782 
783 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
784 	    XGE_HAL_OK);
785 
786 	/*
787 	 * Always call replenish_all to recycle rx_buffers.
788 	 */
789 	xgell_rx_buffer_replenish_all(lldev);
790 	mutex_exit(&lldev->bf_pool.pool_lock);
791 
792 	if (mp_head != NULL) {
793 		mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
794 	}
795 
796 	return (XGE_HAL_OK);
797 }
798 
799 /*
800  * xgell_xmit_compl
801  *
802  * If an interrupt was raised to indicate DMA complete of the Tx packet,
803  * this function is called. It identifies the last TxD whose buffer was
804  * freed and frees all skbs whose data have already DMA'ed into the NICs
805  * internal memory.
806  */
807 static xge_hal_status_e
808 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
809     void *userdata)
810 {
811 	xgelldev_t *lldev = userdata;
812 
813 	do {
814 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
815 		    xge_hal_fifo_dtr_private(dtr));
816 		mblk_t *mp = txd_priv->mblk;
817 		int i;
818 
819 		if (t_code) {
820 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
821 			    " completed due to error t_code %01x", XGELL_IFNAME,
822 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
823 
824 			(void) xge_hal_device_handle_tcode(channelh, dtr,
825 			    t_code);
826 		}
827 
828 		for (i = 0; i < txd_priv->handle_cnt; i++) {
829 			xge_assert(txd_priv->dma_handles[i]);
830 			(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
831 			ddi_dma_free_handle(&txd_priv->dma_handles[i]);
832 			txd_priv->dma_handles[i] = 0;
833 		}
834 
835 		xge_hal_fifo_dtr_free(channelh, dtr);
836 
837 		freemsg(mp);
838 		lldev->resched_avail++;
839 
840 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
841 	    XGE_HAL_OK);
842 
843 	if (lldev->resched_retry &&
844 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
845 	    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
846 	    xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
847 		XGELL_IFNAME, lldev->instance,
848 		((xge_hal_channel_t *)lldev->fifo_channel)->post_qid);
849 		lldev->resched_send = lldev->resched_avail;
850 		lldev->resched_retry = 0;
851 	}
852 
853 	return (XGE_HAL_OK);
854 }
855 
856 /*
857  * xgell_send
858  * @hldev: pointer to xge_hal_device_t strucutre
859  * @mblk: pointer to network buffer, i.e. mblk_t structure
860  *
861  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
862  * A pointer to an M_DATA message that contains the packet is passed to
863  * this routine.
864  */
865 static boolean_t
866 xgell_send(xgelldev_t *lldev, mblk_t *mp)
867 {
868 	mblk_t *bp;
869 	boolean_t retry;
870 	xge_hal_device_t *hldev = lldev->devh;
871 	xge_hal_status_e status;
872 	xge_hal_dtr_h dtr;
873 	xgell_txd_priv_t *txd_priv;
874 	uint32_t hckflags;
875 	uint32_t mss;
876 	int handle_cnt, frag_cnt, ret, i, copied;
877 	boolean_t used_copy;
878 
879 _begin:
880 	retry = B_FALSE;
881 	handle_cnt = frag_cnt = 0;
882 
883 	if (!lldev->is_initialized || lldev->in_reset)
884 		return (B_FALSE);
885 
886 	/*
887 	 * If the free Tx dtrs count reaches the lower threshold,
888 	 * inform the gld to stop sending more packets till the free
889 	 * dtrs count exceeds higher threshold. Driver informs the
890 	 * gld through gld_sched call, when the free dtrs count exceeds
891 	 * the higher threshold.
892 	 */
893 	if (xge_hal_channel_dtr_count(lldev->fifo_channel)
894 	    <= XGELL_TX_LEVEL_LOW) {
895 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
896 		    "free descriptors count at low threshold %d",
897 		    XGELL_IFNAME, lldev->instance,
898 		    ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
899 		    XGELL_TX_LEVEL_LOW);
900 		retry = B_TRUE;
901 		goto _exit;
902 	}
903 
904 	status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr);
905 	if (status != XGE_HAL_OK) {
906 		switch (status) {
907 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
908 			xge_debug_ll(XGE_ERR,
909 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
910 			    lldev->instance,
911 			    ((xge_hal_channel_t *)
912 			    lldev->fifo_channel)->post_qid);
913 			retry = B_TRUE;
914 			goto _exit;
915 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
916 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
917 			    " out of descriptors.", XGELL_IFNAME,
918 			    lldev->instance,
919 			    ((xge_hal_channel_t *)
920 			    lldev->fifo_channel)->post_qid);
921 			retry = B_TRUE;
922 			goto _exit;
923 		default:
924 			return (B_FALSE);
925 		}
926 	}
927 
928 	txd_priv = xge_hal_fifo_dtr_private(dtr);
929 	txd_priv->mblk = mp;
930 
931 	/*
932 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
933 	 * do insertion.
934 	 *
935 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
936 	 * is the other implementation for xge. The driver can simple bcopy()
937 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
938 	 * automatically, since it's impossible that GLD sends down mp(s) with
939 	 * splited ether_vlan_header.
940 	 *
941 	 * struct ether_vlan_header *evhp;
942 	 * uint16_t tci;
943 	 *
944 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
945 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
946 	 *	tci = ntohs(evhp->ether_tci);
947 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
948 	 *	    2 * ETHERADDRL);
949 	 *	mp->b_rptr += VLAN_TAGSZ;
950 	 *
951 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
952 	 * }
953 	 */
954 
955 	copied = 0;
956 	used_copy = B_FALSE;
957 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
958 		int mblen;
959 		uint_t ncookies;
960 		ddi_dma_cookie_t dma_cookie;
961 		ddi_dma_handle_t dma_handle;
962 
963 		/* skip zero-length message blocks */
964 		mblen = MBLKL(bp);
965 		if (mblen == 0) {
966 			continue;
967 		}
968 
969 		/*
970 		 * Check the message length to decide to DMA or bcopy() data
971 		 * to tx descriptor(s).
972 		 */
973 		if (mblen < lldev->config.tx_dma_lowat &&
974 		    (copied + mblen) < lldev->tx_copied_max) {
975 			xge_hal_status_e rc;
976 			rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel,
977 			    dtr, bp->b_rptr, mblen);
978 			if (rc == XGE_HAL_OK) {
979 				used_copy = B_TRUE;
980 				copied += mblen;
981 				continue;
982 			} else if (used_copy) {
983 				xge_hal_fifo_dtr_buffer_finalize(
984 					lldev->fifo_channel, dtr, frag_cnt++);
985 				used_copy = B_FALSE;
986 			}
987 		} else if (used_copy) {
988 			xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel,
989 			    dtr, frag_cnt++);
990 			used_copy = B_FALSE;
991 		}
992 
993 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
994 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
995 		if (ret != DDI_SUCCESS) {
996 			xge_debug_ll(XGE_ERR,
997 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
998 			    lldev->instance);
999 			goto _exit_cleanup;
1000 		}
1001 
1002 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1003 		    (caddr_t)bp->b_rptr, mblen,
1004 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1005 		    &dma_cookie, &ncookies);
1006 
1007 		switch (ret) {
1008 		case DDI_DMA_MAPPED:
1009 			/* everything's fine */
1010 			break;
1011 
1012 		case DDI_DMA_NORESOURCES:
1013 			xge_debug_ll(XGE_ERR,
1014 			    "%s%d: can not bind dma address",
1015 			    XGELL_IFNAME, lldev->instance);
1016 			ddi_dma_free_handle(&dma_handle);
1017 			goto _exit_cleanup;
1018 
1019 		case DDI_DMA_NOMAPPING:
1020 		case DDI_DMA_INUSE:
1021 		case DDI_DMA_TOOBIG:
1022 		default:
1023 			/* drop packet, don't retry */
1024 			xge_debug_ll(XGE_ERR,
1025 			    "%s%d: can not map message buffer",
1026 			    XGELL_IFNAME, lldev->instance);
1027 			ddi_dma_free_handle(&dma_handle);
1028 			goto _exit_cleanup;
1029 		}
1030 
1031 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1032 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1033 			    "requested c:%d+f:%d", XGELL_IFNAME,
1034 			    lldev->instance, ncookies, frag_cnt);
1035 			(void) ddi_dma_unbind_handle(dma_handle);
1036 			ddi_dma_free_handle(&dma_handle);
1037 			goto _exit_cleanup;
1038 		}
1039 
1040 		/* setup the descriptors for this data buffer */
1041 		while (ncookies) {
1042 			xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr,
1043 			    frag_cnt++, dma_cookie.dmac_laddress,
1044 			    dma_cookie.dmac_size);
1045 			if (--ncookies) {
1046 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1047 			}
1048 
1049 		}
1050 
1051 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1052 
1053 		if (bp->b_cont &&
1054 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1055 			hldev->config.fifo.max_frags)) {
1056 			mblk_t *nmp;
1057 
1058 			xge_debug_ll(XGE_TRACE,
1059 			    "too many FRAGs [%d], pull up them", frag_cnt);
1060 
1061 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1062 				/* Drop packet, don't retry */
1063 				xge_debug_ll(XGE_ERR,
1064 				    "%s%d: can not pullup message buffer",
1065 				    XGELL_IFNAME, lldev->instance);
1066 				goto _exit_cleanup;
1067 			}
1068 			freemsg(bp->b_cont);
1069 			bp->b_cont = nmp;
1070 		}
1071 	}
1072 
1073 	/* finalize unfinished copies */
1074 	if (used_copy) {
1075 		xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr,
1076 		    frag_cnt++);
1077 	}
1078 
1079 	txd_priv->handle_cnt = handle_cnt;
1080 
1081 	/*
1082 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1083 	 * do all necessary work.
1084 	 */
1085 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags);
1086 	if ((hckflags & HW_LSO) && (mss != 0)) {
1087 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1088 	}
1089 
1090 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1091 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1092 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1093 	}
1094 	if (hckflags & HCK_FULLCKSUM) {
1095 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1096 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1097 	}
1098 
1099 	xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr);
1100 
1101 	return (B_TRUE);
1102 
1103 _exit_cleanup:
1104 
1105 	for (i = 0; i < handle_cnt; i++) {
1106 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1107 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1108 		txd_priv->dma_handles[i] = 0;
1109 	}
1110 
1111 	xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
1112 
1113 _exit:
1114 	if (retry) {
1115 		if (lldev->resched_avail != lldev->resched_send &&
1116 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1117 		    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
1118 			lldev->resched_send = lldev->resched_avail;
1119 			return (B_FALSE);
1120 		} else {
1121 			lldev->resched_retry = 1;
1122 		}
1123 	}
1124 
1125 	freemsg(mp);
1126 	return (B_TRUE);
1127 }
1128 
1129 /*
1130  * xge_m_tx
1131  * @arg: pointer to the xgelldev_t structure
1132  * @resid: resource id
1133  * @mp: pointer to the message buffer
1134  *
1135  * Called by MAC Layer to send a chain of packets
1136  */
1137 static mblk_t *
1138 xgell_m_tx(void *arg, mblk_t *mp)
1139 {
1140 	xgelldev_t *lldev = arg;
1141 	mblk_t *next;
1142 
1143 	while (mp != NULL) {
1144 		next = mp->b_next;
1145 		mp->b_next = NULL;
1146 
1147 		if (!xgell_send(lldev, mp)) {
1148 			mp->b_next = next;
1149 			break;
1150 		}
1151 		mp = next;
1152 	}
1153 
1154 	return (mp);
1155 }
1156 
1157 /*
1158  * xgell_rx_dtr_term
1159  *
1160  * Function will be called by HAL to terminate all DTRs for
1161  * Ring(s) type of channels.
1162  */
1163 static void
1164 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1165     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1166 {
1167 	xgell_rxd_priv_t *rxd_priv =
1168 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1169 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1170 
1171 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1172 		xgelldev_t *lldev = rx_buffer->lldev;
1173 
1174 		mutex_enter(&lldev->bf_pool.pool_lock);
1175 		xge_hal_ring_dtr_free(channelh, dtrh);
1176 		xgell_rx_buffer_release(rx_buffer);
1177 		mutex_exit(&lldev->bf_pool.pool_lock);
1178 	}
1179 }
1180 
1181 /*
1182  * xgell_tx_term
1183  *
1184  * Function will be called by HAL to terminate all DTRs for
1185  * Fifo(s) type of channels.
1186  */
1187 static void
1188 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1189     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1190 {
1191 	xgell_txd_priv_t *txd_priv =
1192 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1193 	mblk_t *mp = txd_priv->mblk;
1194 	int i;
1195 
1196 	/*
1197 	 * for Tx we must clean up the DTR *only* if it has been
1198 	 * posted!
1199 	 */
1200 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1201 		return;
1202 	}
1203 
1204 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1205 		xge_assert(txd_priv->dma_handles[i]);
1206 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1207 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1208 		txd_priv->dma_handles[i] = 0;
1209 	}
1210 
1211 	xge_hal_fifo_dtr_free(channelh, dtrh);
1212 
1213 	freemsg(mp);
1214 }
1215 
1216 /*
1217  * xgell_tx_open
1218  * @lldev: the link layer object
1219  *
1220  * Initialize and open all Tx channels;
1221  */
1222 static boolean_t
1223 xgell_tx_open(xgelldev_t *lldev)
1224 {
1225 	xge_hal_status_e status;
1226 	u64 adapter_status;
1227 	xge_hal_channel_attr_t attr;
1228 
1229 	attr.post_qid		= 0;
1230 	attr.compl_qid		= 0;
1231 	attr.callback		= xgell_xmit_compl;
1232 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1233 	attr.flags		= 0;
1234 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1235 	attr.userdata		= lldev;
1236 	attr.dtr_init		= NULL;
1237 	attr.dtr_term		= xgell_tx_term;
1238 
1239 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1240 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1241 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1242 		    lldev->instance, (uint64_t)adapter_status);
1243 		return (B_FALSE);
1244 	}
1245 
1246 	status = xge_hal_channel_open(lldev->devh, &attr,
1247 	    &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL);
1248 	if (status != XGE_HAL_OK) {
1249 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1250 		    "got status code %d", XGELL_IFNAME,
1251 		    lldev->instance, status);
1252 		return (B_FALSE);
1253 	}
1254 
1255 	return (B_TRUE);
1256 }
1257 
1258 /*
1259  * xgell_rx_open
1260  * @lldev: the link layer object
1261  *
1262  * Initialize and open all Rx channels;
1263  */
1264 static boolean_t
1265 xgell_rx_open(xgelldev_t *lldev)
1266 {
1267 	xge_hal_status_e status;
1268 	u64 adapter_status;
1269 	xge_hal_channel_attr_t attr;
1270 
1271 	attr.post_qid		= XGELL_RING_MAIN_QID;
1272 	attr.compl_qid		= 0;
1273 	attr.callback		= xgell_rx_1b_compl;
1274 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1275 	attr.flags		= 0;
1276 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1277 	attr.dtr_init		= xgell_rx_dtr_replenish;
1278 	attr.dtr_term		= xgell_rx_dtr_term;
1279 
1280 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1281 		xge_debug_ll(XGE_ERR,
1282 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1283 		    XGELL_IFNAME, lldev->instance,
1284 		    (uint64_t)adapter_status);
1285 		return (B_FALSE);
1286 	}
1287 
1288 	lldev->ring_main.lldev = lldev;
1289 	attr.userdata = &lldev->ring_main;
1290 
1291 	status = xge_hal_channel_open(lldev->devh, &attr,
1292 	    &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1293 	if (status != XGE_HAL_OK) {
1294 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1295 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1296 		return (B_FALSE);
1297 	}
1298 
1299 	return (B_TRUE);
1300 }
1301 
1302 static int
1303 xgell_initiate_start(xgelldev_t *lldev)
1304 {
1305 	xge_hal_status_e status;
1306 	xge_hal_device_t *hldev = lldev->devh;
1307 	int maxpkt = hldev->config.mtu;
1308 
1309 	/* check initial mtu before enabling the device */
1310 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1311 	if (status != XGE_HAL_OK) {
1312 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1313 		    XGELL_IFNAME, lldev->instance, maxpkt);
1314 		return (EINVAL);
1315 	}
1316 
1317 	/* set initial mtu before enabling the device */
1318 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1319 	if (status != XGE_HAL_OK) {
1320 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1321 		    XGELL_IFNAME, lldev->instance, maxpkt);
1322 		return (EIO);
1323 	}
1324 
1325 	/* tune jumbo/normal frame UFC counters */
1326 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \
1327 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1328 			XGE_HAL_DEFAULT_RX_UFC_B_J :
1329 			XGE_HAL_DEFAULT_RX_UFC_B_N;
1330 
1331 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \
1332 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1333 			XGE_HAL_DEFAULT_RX_UFC_C_J :
1334 			XGE_HAL_DEFAULT_RX_UFC_C_N;
1335 
1336 	/* now, enable the device */
1337 	status = xge_hal_device_enable(lldev->devh);
1338 	if (status != XGE_HAL_OK) {
1339 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1340 		    XGELL_IFNAME, lldev->instance);
1341 		return (EIO);
1342 	}
1343 
1344 	if (!xgell_rx_open(lldev)) {
1345 		status = xge_hal_device_disable(lldev->devh);
1346 		if (status != XGE_HAL_OK) {
1347 			u64 adapter_status;
1348 			(void) xge_hal_device_status(lldev->devh,
1349 			    &adapter_status);
1350 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1351 			    "the device. adaper status 0x%"PRIx64
1352 			    " returned status %d",
1353 			    XGELL_IFNAME, lldev->instance,
1354 			    (uint64_t)adapter_status, status);
1355 		}
1356 		xge_os_mdelay(1500);
1357 		return (ENOMEM);
1358 	}
1359 
1360 	if (!xgell_tx_open(lldev)) {
1361 		status = xge_hal_device_disable(lldev->devh);
1362 		if (status != XGE_HAL_OK) {
1363 			u64 adapter_status;
1364 			(void) xge_hal_device_status(lldev->devh,
1365 			    &adapter_status);
1366 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1367 			    "the device. adaper status 0x%"PRIx64
1368 			    " returned status %d",
1369 			    XGELL_IFNAME, lldev->instance,
1370 			    (uint64_t)adapter_status, status);
1371 		}
1372 		xge_os_mdelay(1500);
1373 		xge_hal_channel_close(lldev->ring_main.channelh,
1374 		    XGE_HAL_CHANNEL_OC_NORMAL);
1375 		return (ENOMEM);
1376 	}
1377 
1378 	/* time to enable interrupts */
1379 	xge_hal_device_intr_enable(lldev->devh);
1380 
1381 	lldev->is_initialized = 1;
1382 
1383 	return (0);
1384 }
1385 
1386 static void
1387 xgell_initiate_stop(xgelldev_t *lldev)
1388 {
1389 	xge_hal_status_e status;
1390 
1391 	lldev->is_initialized = 0;
1392 
1393 	status = xge_hal_device_disable(lldev->devh);
1394 	if (status != XGE_HAL_OK) {
1395 		u64 adapter_status;
1396 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1397 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1398 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1399 		    XGELL_IFNAME, lldev->instance,
1400 		    (uint64_t)adapter_status, status);
1401 	}
1402 	xge_hal_device_intr_disable(lldev->devh);
1403 
1404 	xge_debug_ll(XGE_TRACE, "%s",
1405 	    "waiting for device irq to become quiescent...");
1406 	xge_os_mdelay(1500);
1407 
1408 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1409 
1410 	xge_hal_channel_close(lldev->ring_main.channelh,
1411 	    XGE_HAL_CHANNEL_OC_NORMAL);
1412 
1413 	xge_hal_channel_close(lldev->fifo_channel,
1414 	    XGE_HAL_CHANNEL_OC_NORMAL);
1415 }
1416 
1417 /*
1418  * xgell_m_start
1419  * @arg: pointer to device private strucutre(hldev)
1420  *
1421  * This function is called by MAC Layer to enable the XFRAME
1422  * firmware to generate interrupts and also prepare the
1423  * driver to call mac_rx for delivering receive packets
1424  * to MAC Layer.
1425  */
1426 static int
1427 xgell_m_start(void *arg)
1428 {
1429 	xgelldev_t *lldev = arg;
1430 	xge_hal_device_t *hldev = lldev->devh;
1431 	int ret;
1432 
1433 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1434 	    lldev->instance);
1435 
1436 	mutex_enter(&lldev->genlock);
1437 
1438 	if (lldev->is_initialized) {
1439 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1440 		    XGELL_IFNAME, lldev->instance);
1441 		mutex_exit(&lldev->genlock);
1442 		return (EINVAL);
1443 	}
1444 
1445 	hldev->terminating = 0;
1446 	if (ret = xgell_initiate_start(lldev)) {
1447 		mutex_exit(&lldev->genlock);
1448 		return (ret);
1449 	}
1450 
1451 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1452 
1453 	mutex_exit(&lldev->genlock);
1454 
1455 	return (0);
1456 }
1457 
1458 /*
1459  * xgell_m_stop
1460  * @arg: pointer to device private data (hldev)
1461  *
1462  * This function is called by the MAC Layer to disable
1463  * the XFRAME firmware for generating any interrupts and
1464  * also stop the driver from calling mac_rx() for
1465  * delivering data packets to the MAC Layer.
1466  */
1467 static void
1468 xgell_m_stop(void *arg)
1469 {
1470 	xgelldev_t *lldev = arg;
1471 	xge_hal_device_t *hldev = lldev->devh;
1472 
1473 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1474 
1475 	mutex_enter(&lldev->genlock);
1476 	if (!lldev->is_initialized) {
1477 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1478 		mutex_exit(&lldev->genlock);
1479 		return;
1480 	}
1481 
1482 	xge_hal_device_terminating(hldev);
1483 	xgell_initiate_stop(lldev);
1484 
1485 	/* reset device */
1486 	(void) xge_hal_device_reset(lldev->devh);
1487 
1488 	mutex_exit(&lldev->genlock);
1489 
1490 	if (lldev->timeout_id != 0) {
1491 		(void) untimeout(lldev->timeout_id);
1492 	}
1493 
1494 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1495 }
1496 
1497 /*
1498  * xgell_onerr_reset
1499  * @lldev: pointer to xgelldev_t structure
1500  *
1501  * This function is called by HAL Event framework to reset the HW
1502  * This function is must be called with genlock taken.
1503  */
1504 int
1505 xgell_onerr_reset(xgelldev_t *lldev)
1506 {
1507 	int rc = 0;
1508 
1509 	if (!lldev->is_initialized) {
1510 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1511 		    XGELL_IFNAME, lldev->instance);
1512 		return (rc);
1513 	}
1514 
1515 	lldev->in_reset = 1;
1516 	xgell_initiate_stop(lldev);
1517 
1518 	/* reset device */
1519 	(void) xge_hal_device_reset(lldev->devh);
1520 
1521 	rc = xgell_initiate_start(lldev);
1522 	lldev->in_reset = 0;
1523 
1524 	return (rc);
1525 }
1526 
1527 
1528 /*
1529  * xgell_m_unicst
1530  * @arg: pointer to device private strucutre(hldev)
1531  * @mac_addr:
1532  *
1533  * This function is called by MAC Layer to set the physical address
1534  * of the XFRAME firmware.
1535  */
1536 static int
1537 xgell_m_unicst(void *arg, const uint8_t *macaddr)
1538 {
1539 	xge_hal_status_e status;
1540 	xgelldev_t *lldev = (xgelldev_t *)arg;
1541 	xge_hal_device_t *hldev = lldev->devh;
1542 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1543 
1544 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1545 
1546 	mutex_enter(&lldev->genlock);
1547 
1548 	xge_debug_ll(XGE_TRACE,
1549 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1550 	    macaddr[0], macaddr[1], macaddr[2],
1551 	    macaddr[3], macaddr[4], macaddr[5]);
1552 
1553 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1554 	if (status != XGE_HAL_OK) {
1555 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1556 		    XGELL_IFNAME, lldev->instance);
1557 		mutex_exit(&lldev->genlock);
1558 		return (EIO);
1559 	}
1560 
1561 	mutex_exit(&lldev->genlock);
1562 
1563 	return (0);
1564 }
1565 
1566 
1567 /*
1568  * xgell_m_multicst
1569  * @arg: pointer to device private strucutre(hldev)
1570  * @add:
1571  * @mc_addr:
1572  *
1573  * This function is called by MAC Layer to enable or
1574  * disable device-level reception of specific multicast addresses.
1575  */
1576 static int
1577 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1578 {
1579 	xge_hal_status_e status;
1580 	xgelldev_t *lldev = (xgelldev_t *)arg;
1581 	xge_hal_device_t *hldev = lldev->devh;
1582 
1583 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1584 
1585 	mutex_enter(&lldev->genlock);
1586 
1587 	if (!lldev->is_initialized) {
1588 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1589 		    XGELL_IFNAME, lldev->instance);
1590 		mutex_exit(&lldev->genlock);
1591 		return (EIO);
1592 	}
1593 
1594 	/* FIXME: missing HAL functionality: enable_one() */
1595 
1596 	status = (add) ?
1597 	    xge_hal_device_mcast_enable(hldev) :
1598 	    xge_hal_device_mcast_disable(hldev);
1599 
1600 	if (status != XGE_HAL_OK) {
1601 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1602 		    add ? "enable" : "disable", status);
1603 		mutex_exit(&lldev->genlock);
1604 		return (EIO);
1605 	}
1606 
1607 	mutex_exit(&lldev->genlock);
1608 
1609 	return (0);
1610 }
1611 
1612 
1613 /*
1614  * xgell_m_promisc
1615  * @arg: pointer to device private strucutre(hldev)
1616  * @on:
1617  *
1618  * This function is called by MAC Layer to enable or
1619  * disable the reception of all the packets on the medium
1620  */
1621 static int
1622 xgell_m_promisc(void *arg, boolean_t on)
1623 {
1624 	xgelldev_t *lldev = (xgelldev_t *)arg;
1625 	xge_hal_device_t *hldev = lldev->devh;
1626 
1627 	mutex_enter(&lldev->genlock);
1628 
1629 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1630 
1631 	if (!lldev->is_initialized) {
1632 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1633 		    XGELL_IFNAME, lldev->instance);
1634 		mutex_exit(&lldev->genlock);
1635 		return (EIO);
1636 	}
1637 
1638 	if (on) {
1639 		xge_hal_device_promisc_enable(hldev);
1640 	} else {
1641 		xge_hal_device_promisc_disable(hldev);
1642 	}
1643 
1644 	mutex_exit(&lldev->genlock);
1645 
1646 	return (0);
1647 }
1648 
1649 /*
1650  * xgell_m_stat
1651  * @arg: pointer to device private strucutre(hldev)
1652  *
1653  * This function is called by MAC Layer to get network statistics
1654  * from the driver.
1655  */
1656 static int
1657 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
1658 {
1659 	xge_hal_stats_hw_info_t *hw_info;
1660 	xgelldev_t *lldev = (xgelldev_t *)arg;
1661 	xge_hal_device_t *hldev = lldev->devh;
1662 
1663 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1664 
1665 	if (!mutex_tryenter(&lldev->genlock))
1666 		return (EAGAIN);
1667 
1668 	if (!lldev->is_initialized) {
1669 		mutex_exit(&lldev->genlock);
1670 		return (EAGAIN);
1671 	}
1672 
1673 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1674 		mutex_exit(&lldev->genlock);
1675 		return (EAGAIN);
1676 	}
1677 
1678 	switch (stat) {
1679 	case MAC_STAT_IFSPEED:
1680 		*val = 10000000000ull; /* 10G */
1681 		break;
1682 
1683 	case MAC_STAT_MULTIRCV:
1684 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
1685 		    hw_info->rmac_vld_mcst_frms;
1686 		break;
1687 
1688 	case MAC_STAT_BRDCSTRCV:
1689 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
1690 		    hw_info->rmac_vld_bcst_frms;
1691 		break;
1692 
1693 	case MAC_STAT_MULTIXMT:
1694 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
1695 		    hw_info->tmac_mcst_frms;
1696 		break;
1697 
1698 	case MAC_STAT_BRDCSTXMT:
1699 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
1700 		    hw_info->tmac_bcst_frms;
1701 		break;
1702 
1703 	case MAC_STAT_RBYTES:
1704 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
1705 		    hw_info->rmac_ttl_octets;
1706 		break;
1707 
1708 	case MAC_STAT_NORCVBUF:
1709 		*val = hw_info->rmac_drop_frms;
1710 		break;
1711 
1712 	case MAC_STAT_IERRORS:
1713 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
1714 		    hw_info->rmac_discarded_frms;
1715 		break;
1716 
1717 	case MAC_STAT_OBYTES:
1718 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
1719 		    hw_info->tmac_ttl_octets;
1720 		break;
1721 
1722 	case MAC_STAT_NOXMTBUF:
1723 		*val = hw_info->tmac_drop_frms;
1724 		break;
1725 
1726 	case MAC_STAT_OERRORS:
1727 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
1728 		    hw_info->tmac_any_err_frms;
1729 		break;
1730 
1731 	case MAC_STAT_IPACKETS:
1732 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
1733 		    hw_info->rmac_vld_frms;
1734 		break;
1735 
1736 	case MAC_STAT_OPACKETS:
1737 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
1738 		    hw_info->tmac_frms;
1739 		break;
1740 
1741 	case ETHER_STAT_FCS_ERRORS:
1742 		*val = hw_info->rmac_fcs_err_frms;
1743 		break;
1744 
1745 	case ETHER_STAT_TOOLONG_ERRORS:
1746 		*val = hw_info->rmac_long_frms;
1747 		break;
1748 
1749 	case ETHER_STAT_LINK_DUPLEX:
1750 		*val = LINK_DUPLEX_FULL;
1751 		break;
1752 
1753 	default:
1754 		mutex_exit(&lldev->genlock);
1755 		return (ENOTSUP);
1756 	}
1757 
1758 	mutex_exit(&lldev->genlock);
1759 
1760 	return (0);
1761 }
1762 
1763 /*
1764  * xgell_device_alloc - Allocate new LL device
1765  */
1766 int
1767 xgell_device_alloc(xge_hal_device_h devh,
1768     dev_info_t *dev_info, xgelldev_t **lldev_out)
1769 {
1770 	xgelldev_t *lldev;
1771 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1772 	int instance = ddi_get_instance(dev_info);
1773 
1774 	*lldev_out = NULL;
1775 
1776 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1777 	    XGELL_IFNAME, instance);
1778 
1779 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1780 
1781 	lldev->devh = hldev;
1782 	lldev->instance = instance;
1783 	lldev->dev_info = dev_info;
1784 
1785 	*lldev_out = lldev;
1786 
1787 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1788 
1789 	return (DDI_SUCCESS);
1790 }
1791 
1792 /*
1793  * xgell_device_free
1794  */
1795 void
1796 xgell_device_free(xgelldev_t *lldev)
1797 {
1798 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1799 	    XGELL_IFNAME, lldev->instance);
1800 
1801 	kmem_free(lldev, sizeof (xgelldev_t));
1802 }
1803 
1804 /*
1805  * xgell_ioctl
1806  */
1807 static void
1808 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1809 {
1810 	xgelldev_t *lldev = arg;
1811 	struct iocblk *iocp;
1812 	int err = 0;
1813 	int cmd;
1814 	int need_privilege = 1;
1815 	int ret = 0;
1816 
1817 
1818 	iocp = (struct iocblk *)mp->b_rptr;
1819 	iocp->ioc_error = 0;
1820 	cmd = iocp->ioc_cmd;
1821 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1822 	switch (cmd) {
1823 	case ND_GET:
1824 		need_privilege = 0;
1825 		/* FALLTHRU */
1826 	case ND_SET:
1827 		break;
1828 	default:
1829 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1830 		miocnak(wq, mp, 0, EINVAL);
1831 		return;
1832 	}
1833 
1834 	if (need_privilege) {
1835 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1836 		if (err != 0) {
1837 			xge_debug_ll(XGE_ERR,
1838 			    "drv_priv(): rejected cmd 0x%x, err %d",
1839 			    cmd, err);
1840 			miocnak(wq, mp, 0, err);
1841 			return;
1842 		}
1843 	}
1844 
1845 	switch (cmd) {
1846 	case ND_GET:
1847 		/*
1848 		 * If nd_getset() returns B_FALSE, the command was
1849 		 * not valid (e.g. unknown name), so we just tell the
1850 		 * top-level ioctl code to send a NAK (with code EINVAL).
1851 		 *
1852 		 * Otherwise, nd_getset() will have built the reply to
1853 		 * be sent (but not actually sent it), so we tell the
1854 		 * caller to send the prepared reply.
1855 		 */
1856 		ret = nd_getset(wq, lldev->ndp, mp);
1857 		xge_debug_ll(XGE_TRACE, "got ndd get ioctl");
1858 		break;
1859 
1860 	case ND_SET:
1861 		ret = nd_getset(wq, lldev->ndp, mp);
1862 		xge_debug_ll(XGE_TRACE, "got ndd set ioctl");
1863 		break;
1864 
1865 	default:
1866 		break;
1867 	}
1868 
1869 	if (ret == B_FALSE) {
1870 		xge_debug_ll(XGE_ERR,
1871 		    "nd_getset(): rejected cmd 0x%x, err %d",
1872 		    cmd, err);
1873 		miocnak(wq, mp, 0, EINVAL);
1874 	} else {
1875 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1876 		    M_IOCACK : M_IOCNAK;
1877 		qreply(wq, mp);
1878 	}
1879 }
1880 
1881 /* ARGSUSED */
1882 static boolean_t
1883 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1884 {
1885 	xgelldev_t *lldev = arg;
1886 
1887 	switch (cap) {
1888 	case MAC_CAPAB_HCKSUM: {
1889 		uint32_t *hcksum_txflags = cap_data;
1890 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
1891 		    HCKSUM_IPHDRCKSUM;
1892 		break;
1893 	}
1894 	case MAC_CAPAB_LSO: {
1895 		mac_capab_lso_t *cap_lso = cap_data;
1896 
1897 		if (lldev->config.lso_enable) {
1898 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1899 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
1900 			break;
1901 		} else {
1902 			return (B_FALSE);
1903 		}
1904 	}
1905 	default:
1906 		return (B_FALSE);
1907 	}
1908 	return (B_TRUE);
1909 }
1910 
1911 static int
1912 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
1913 {
1914 	xgelldev_t *lldev = (xgelldev_t *)cp;
1915 	xge_hal_status_e status;
1916 	int count = 0, retsize;
1917 	char *buf;
1918 
1919 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
1920 	if (buf == NULL) {
1921 		return (ENOSPC);
1922 	}
1923 
1924 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
1925 	    buf, &retsize);
1926 	if (status != XGE_HAL_OK) {
1927 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1928 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
1929 		return (EINVAL);
1930 	}
1931 	count += retsize;
1932 
1933 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
1934 	    XGELL_STATS_BUFSIZE - count,
1935 	    buf+count, &retsize);
1936 	if (status != XGE_HAL_OK) {
1937 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1938 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
1939 		return (EINVAL);
1940 	}
1941 	count += retsize;
1942 
1943 	status = xge_hal_aux_stats_pci_read(lldev->devh,
1944 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1945 	if (status != XGE_HAL_OK) {
1946 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1947 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
1948 		return (EINVAL);
1949 	}
1950 	count += retsize;
1951 
1952 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
1953 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1954 	if (status != XGE_HAL_OK) {
1955 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1956 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
1957 		return (EINVAL);
1958 	}
1959 	count += retsize;
1960 
1961 	status = xge_hal_aux_stats_hal_read(lldev->devh,
1962 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1963 	if (status != XGE_HAL_OK) {
1964 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1965 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
1966 		return (EINVAL);
1967 	}
1968 	count += retsize;
1969 
1970 	*(buf + count - 1) = '\0'; /* remove last '\n' */
1971 	(void) mi_mpprintf(mp, "%s", buf);
1972 	kmem_free(buf, XGELL_STATS_BUFSIZE);
1973 
1974 	return (0);
1975 }
1976 
1977 static int
1978 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
1979 {
1980 	xgelldev_t *lldev = (xgelldev_t *)cp;
1981 	xge_hal_status_e status;
1982 	int retsize;
1983 	char *buf;
1984 
1985 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
1986 	if (buf == NULL) {
1987 		return (ENOSPC);
1988 	}
1989 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
1990 	    buf, &retsize);
1991 	if (status != XGE_HAL_OK) {
1992 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
1993 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
1994 		return (EINVAL);
1995 	}
1996 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
1997 	(void) mi_mpprintf(mp, "%s", buf);
1998 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
1999 
2000 	return (0);
2001 }
2002 
2003 static int
2004 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2005 {
2006 	xgelldev_t *lldev = (xgelldev_t *)cp;
2007 	xge_hal_status_e status;
2008 	int retsize;
2009 	char *buf;
2010 
2011 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2012 	if (buf == NULL) {
2013 		return (ENOSPC);
2014 	}
2015 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2016 	    buf, &retsize);
2017 	if (status != XGE_HAL_OK) {
2018 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2019 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2020 		return (EINVAL);
2021 	}
2022 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2023 	(void) mi_mpprintf(mp, "%s", buf);
2024 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2025 
2026 	return (0);
2027 }
2028 
2029 static unsigned long bar0_offset = 0x110; /* adapter_control */
2030 
2031 static int
2032 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2033 {
2034 	xgelldev_t *lldev = (xgelldev_t *)cp;
2035 	xge_hal_status_e status;
2036 	int retsize;
2037 	char *buf;
2038 
2039 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2040 	if (buf == NULL) {
2041 		return (ENOSPC);
2042 	}
2043 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2044 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2045 	if (status != XGE_HAL_OK) {
2046 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2047 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2048 		return (EINVAL);
2049 	}
2050 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2051 	(void) mi_mpprintf(mp, "%s", buf);
2052 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2053 
2054 	return (0);
2055 }
2056 
2057 static int
2058 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2059 {
2060 	unsigned long old_offset = bar0_offset;
2061 	char *end;
2062 
2063 	if (value && *value == '0' &&
2064 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2065 		value += 2;
2066 	}
2067 
2068 	bar0_offset = mi_strtol(value, &end, 16);
2069 	if (end == value) {
2070 		bar0_offset = old_offset;
2071 		return (EINVAL);
2072 	}
2073 
2074 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2075 
2076 	return (0);
2077 }
2078 
2079 static int
2080 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2081 {
2082 	char *buf;
2083 
2084 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2085 	if (buf == NULL) {
2086 		return (ENOSPC);
2087 	}
2088 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2089 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2090 
2091 	return (0);
2092 }
2093 
2094 static int
2095 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2096     cred_t *credp)
2097 {
2098 	int level;
2099 	char *end;
2100 
2101 	level = mi_strtol(value, &end, 10);
2102 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2103 		return (EINVAL);
2104 	}
2105 
2106 	xge_hal_driver_debug_level_set(level);
2107 
2108 	return (0);
2109 }
2110 
2111 static int
2112 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2113 {
2114 	char *buf;
2115 
2116 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2117 	if (buf == NULL) {
2118 		return (ENOSPC);
2119 	}
2120 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2121 	    xge_hal_driver_debug_module_mask());
2122 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2123 
2124 	return (0);
2125 }
2126 
2127 static int
2128 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2129 			    cred_t *credp)
2130 {
2131 	u32 mask;
2132 	char *end;
2133 
2134 	if (value && *value == '0' &&
2135 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2136 		value += 2;
2137 	}
2138 
2139 	mask = mi_strtol(value, &end, 16);
2140 	if (end == value) {
2141 		return (EINVAL);
2142 	}
2143 
2144 	xge_hal_driver_debug_module_mask_set(mask);
2145 
2146 	return (0);
2147 }
2148 
2149 static int
2150 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2151 {
2152 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2153 	xge_hal_status_e status;
2154 	int retsize;
2155 	char *buf;
2156 
2157 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2158 	if (buf == NULL) {
2159 		return (ENOSPC);
2160 	}
2161 	status = xge_hal_aux_device_config_read(lldev->devh,
2162 						XGELL_DEVCONF_BUFSIZE,
2163 						buf, &retsize);
2164 	if (status != XGE_HAL_OK) {
2165 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2166 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2167 		    status);
2168 		return (EINVAL);
2169 	}
2170 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2171 	(void) mi_mpprintf(mp, "%s", buf);
2172 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2173 
2174 	return (0);
2175 }
2176 
2177 /*
2178  * xgell_device_register
2179  * @devh: pointer on HAL device
2180  * @config: pointer on this network device configuration
2181  * @ll_out: output pointer. Will be assigned to valid LL device.
2182  *
2183  * This function will allocate and register network device
2184  */
2185 int
2186 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2187 {
2188 	mac_register_t *macp = NULL;
2189 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2190 
2191 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2192 	    (caddr_t)lldev) == B_FALSE)
2193 		goto xgell_ndd_fail;
2194 
2195 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2196 	    (caddr_t)lldev) == B_FALSE)
2197 		goto xgell_ndd_fail;
2198 
2199 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2200 	    (caddr_t)lldev) == B_FALSE)
2201 		goto xgell_ndd_fail;
2202 
2203 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2204 	    (caddr_t)lldev) == B_FALSE)
2205 		goto xgell_ndd_fail;
2206 
2207 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2208 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2209 		goto xgell_ndd_fail;
2210 
2211 	if (nd_load(&lldev->ndp, "debug_module_mask",
2212 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2213 	    (caddr_t)lldev) == B_FALSE)
2214 		goto xgell_ndd_fail;
2215 
2216 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2217 	    (caddr_t)lldev) == B_FALSE)
2218 		goto xgell_ndd_fail;
2219 
2220 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2221 
2222 	if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) {
2223 		nd_free(&lldev->ndp);
2224 		xge_debug_ll(XGE_ERR, "unable to create RX buffer pool");
2225 		return (DDI_FAILURE);
2226 	}
2227 
2228 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh);
2229 
2230 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2231 		goto xgell_register_fail;
2232 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2233 	macp->m_driver = lldev;
2234 	macp->m_dip = lldev->dev_info;
2235 	macp->m_src_addr = hldev->macaddr[0];
2236 	macp->m_callbacks = &xgell_m_callbacks;
2237 	macp->m_min_sdu = 0;
2238 	macp->m_max_sdu = hldev->config.mtu;
2239 	macp->m_margin = VLAN_TAGSZ;
2240 	/*
2241 	 * Finally, we're ready to register ourselves with the Nemo
2242 	 * interface; if this succeeds, we're all ready to start()
2243 	 */
2244 
2245 	if (mac_register(macp, &lldev->mh) != 0)
2246 		goto xgell_register_fail;
2247 
2248 	/* Always free the macp after register */
2249 	if (macp != NULL)
2250 		mac_free(macp);
2251 
2252 	/* Calculate tx_copied_max here ??? */
2253 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2254 		hldev->config.fifo.alignment_size *
2255 		hldev->config.fifo.max_aligned_frags;
2256 
2257 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2258 	    XGELL_IFNAME, lldev->instance);
2259 
2260 	return (DDI_SUCCESS);
2261 
2262 xgell_ndd_fail:
2263 	nd_free(&lldev->ndp);
2264 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2265 	return (DDI_FAILURE);
2266 
2267 xgell_register_fail:
2268 	if (macp != NULL)
2269 		mac_free(macp);
2270 	nd_free(&lldev->ndp);
2271 	mutex_destroy(&lldev->genlock);
2272 	/* Ignore return value, since RX not start */
2273 	(void) xgell_rx_destroy_buffer_pool(lldev);
2274 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2275 	return (DDI_FAILURE);
2276 }
2277 
2278 /*
2279  * xgell_device_unregister
2280  * @devh: pointer on HAL device
2281  * @lldev: pointer to valid LL device.
2282  *
2283  * This function will unregister and free network device
2284  */
2285 int
2286 xgell_device_unregister(xgelldev_t *lldev)
2287 {
2288 	/*
2289 	 * Destroy RX buffer pool.
2290 	 */
2291 	if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) {
2292 		return (DDI_FAILURE);
2293 	}
2294 
2295 	if (mac_unregister(lldev->mh) != 0) {
2296 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2297 		    XGELL_IFNAME, lldev->instance);
2298 		return (DDI_FAILURE);
2299 	}
2300 
2301 	mutex_destroy(&lldev->genlock);
2302 
2303 	nd_free(&lldev->ndp);
2304 
2305 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2306 	    XGELL_IFNAME, lldev->instance);
2307 
2308 	return (DDI_SUCCESS);
2309 }
2310