xref: /titanic_50/usr/src/uts/common/io/xge/drv/xgell.c (revision 0aa3cd4d26810aec2570d1529242ae8c446b6a5d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *  Copyright (c) 2002-2005 Neterion, Inc.
31  *  All right Reserved.
32  *
33  *  FileName :    xgell.c
34  *
35  *  Description:  Xge Link Layer data path implementation
36  *
37  */
38 
39 #include "xgell.h"
40 
41 #include <netinet/ip.h>
42 #include <netinet/tcp.h>
43 #include <netinet/udp.h>
44 
45 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
46     sizeof (struct ether_vlan_header))
47 
48 #define	HEADROOM		2	/* for DIX-only packets */
49 
50 void header_free_func(void *arg) { }
51 frtn_t header_frtn = {header_free_func, NULL};
52 
53 /* DMA attributes used for Tx side */
54 static struct ddi_dma_attr tx_dma_attr = {
55 	DMA_ATTR_V0,			/* dma_attr_version */
56 	0x0ULL,				/* dma_attr_addr_lo */
57 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
58 	0xFFFFFFFFULL,			/* dma_attr_count_max */
59 	0x1ULL,				/* dma_attr_align */
60 	0xFFF,				/* dma_attr_burstsizes */
61 	1,				/* dma_attr_minxfer */
62 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
63 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
64 	18,				/* dma_attr_sgllen */
65 	1,				/* dma_attr_granular */
66 	0				/* dma_attr_flags */
67 };
68 
69 /* Aligned DMA attributes used for Tx side */
70 struct ddi_dma_attr tx_dma_attr_align = {
71 	DMA_ATTR_V0,			/* dma_attr_version */
72 	0x0ULL,				/* dma_attr_addr_lo */
73 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
74 	0xFFFFFFFFULL,			/* dma_attr_count_max */
75 	4096,				/* dma_attr_align */
76 	0xFFF,				/* dma_attr_burstsizes */
77 	1,				/* dma_attr_minxfer */
78 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
79 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
80 	4,				/* dma_attr_sgllen */
81 	1,				/* dma_attr_granular */
82 	0				/* dma_attr_flags */
83 };
84 
85 /*
86  * DMA attributes used when using ddi_dma_mem_alloc to
87  * allocat HAL descriptors and Rx buffers during replenish
88  */
89 static struct ddi_dma_attr hal_dma_attr = {
90 	DMA_ATTR_V0,			/* dma_attr_version */
91 	0x0ULL,				/* dma_attr_addr_lo */
92 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
93 	0xFFFFFFFFULL,			/* dma_attr_count_max */
94 	0x1ULL,				/* dma_attr_align */
95 	0xFFF,				/* dma_attr_burstsizes */
96 	1,				/* dma_attr_minxfer */
97 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
98 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
99 	1,				/* dma_attr_sgllen */
100 	1,				/* dma_attr_granular */
101 	0				/* dma_attr_flags */
102 };
103 
104 /*
105  * Aligned DMA attributes used when using ddi_dma_mem_alloc to
106  * allocat HAL descriptors and Rx buffers during replenish
107  */
108 struct ddi_dma_attr hal_dma_attr_aligned = {
109 	DMA_ATTR_V0,			/* dma_attr_version */
110 	0x0ULL,				/* dma_attr_addr_lo */
111 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
112 	0xFFFFFFFFULL,			/* dma_attr_count_max */
113 	4096,				/* dma_attr_align */
114 	0xFFF,				/* dma_attr_burstsizes */
115 	1,				/* dma_attr_minxfer */
116 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
117 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
118 	1,				/* dma_attr_sgllen */
119 	1,				/* dma_attr_granular */
120 	0				/* dma_attr_flags */
121 };
122 
123 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
124 struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned;
125 
126 static int		xgell_m_stat(void *, uint_t, uint64_t *);
127 static int		xgell_m_start(void *);
128 static void		xgell_m_stop(void *);
129 static int		xgell_m_promisc(void *, boolean_t);
130 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
131 static int		xgell_m_unicst(void *, const uint8_t *);
132 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
133 static mblk_t 		*xgell_m_tx(void *, mblk_t *);
134 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
135 
136 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
137 
138 static mac_callbacks_t xgell_m_callbacks = {
139 	XGELL_M_CALLBACK_FLAGS,
140 	xgell_m_stat,
141 	xgell_m_start,
142 	xgell_m_stop,
143 	xgell_m_promisc,
144 	xgell_m_multicst,
145 	xgell_m_unicst,
146 	xgell_m_tx,
147 	NULL,
148 	xgell_m_ioctl,
149 	xgell_m_getcapab
150 };
151 
152 /*
153  * xge_device_poll
154  *
155  * Cyclic should call me every 1s. xge_callback_event_queued should call me
156  * when HAL hope event was rescheduled.
157  */
158 /*ARGSUSED*/
159 void
160 xge_device_poll(void *data)
161 {
162 	xgelldev_t *lldev = xge_hal_device_private(data);
163 
164 	mutex_enter(&lldev->genlock);
165 	if (lldev->is_initialized) {
166 		xge_hal_device_poll(data);
167 		lldev->timeout_id = timeout(xge_device_poll, data,
168 		    XGE_DEV_POLL_TICKS);
169 	} else if (lldev->in_reset == 1) {
170 		lldev->timeout_id = timeout(xge_device_poll, data,
171 		    XGE_DEV_POLL_TICKS);
172 	} else {
173 		lldev->timeout_id = 0;
174 	}
175 	mutex_exit(&lldev->genlock);
176 }
177 
178 /*
179  * xge_device_poll_now
180  *
181  * Will call xge_device_poll() immediately
182  */
183 void
184 xge_device_poll_now(void *data)
185 {
186 	xgelldev_t *lldev = xge_hal_device_private(data);
187 
188 	mutex_enter(&lldev->genlock);
189 	if (lldev->is_initialized) {
190 		xge_hal_device_poll(data);
191 	}
192 	mutex_exit(&lldev->genlock);
193 }
194 
195 /*
196  * xgell_callback_link_up
197  *
198  * This function called by HAL to notify HW link up state change.
199  */
200 void
201 xgell_callback_link_up(void *userdata)
202 {
203 	xgelldev_t *lldev = (xgelldev_t *)userdata;
204 
205 	mac_link_update(lldev->mh, LINK_STATE_UP);
206 	/* Link states should be reported to user whenever it changes */
207 	cmn_err(CE_NOTE, "!%s%d: Link is up [10 Gbps Full Duplex]",
208 	    XGELL_IFNAME, lldev->instance);
209 }
210 
211 /*
212  * xgell_callback_link_down
213  *
214  * This function called by HAL to notify HW link down state change.
215  */
216 void
217 xgell_callback_link_down(void *userdata)
218 {
219 	xgelldev_t *lldev = (xgelldev_t *)userdata;
220 
221 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
222 	/* Link states should be reported to user whenever it changes */
223 	cmn_err(CE_NOTE, "!%s%d: Link is down", XGELL_IFNAME,
224 	    lldev->instance);
225 }
226 
227 /*
228  * xgell_rx_buffer_replenish_all
229  *
230  * To replenish all freed dtr(s) with buffers in free pool. It's called by
231  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
232  * Must be called with pool_lock held.
233  */
234 static void
235 xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
236 {
237 	xge_hal_dtr_h dtr;
238 	xgell_rx_buffer_t *rx_buffer;
239 	xgell_rxd_priv_t *rxd_priv;
240 
241 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
242 
243 	while ((lldev->bf_pool.free > 0) &&
244 	    (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
245 	    XGE_HAL_OK)) {
246 		rx_buffer = lldev->bf_pool.head;
247 		lldev->bf_pool.head = rx_buffer->next;
248 		lldev->bf_pool.free--;
249 
250 		xge_assert(rx_buffer);
251 		xge_assert(rx_buffer->dma_addr);
252 
253 		rxd_priv = (xgell_rxd_priv_t *)
254 		    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
255 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
256 		    lldev->bf_pool.size);
257 
258 		rxd_priv->rx_buffer = rx_buffer;
259 		xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr);
260 	}
261 }
262 
263 /*
264  * xgell_rx_buffer_release
265  *
266  * The only thing done here is to put the buffer back to the pool.
267  * Calling this function need be protected by mutex, bf_pool.pool_lock.
268  */
269 static void
270 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
271 {
272 	xgelldev_t *lldev = rx_buffer->lldev;
273 
274 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
275 
276 	/* Put the buffer back to pool */
277 	rx_buffer->next = lldev->bf_pool.head;
278 	lldev->bf_pool.head = rx_buffer;
279 
280 	lldev->bf_pool.free++;
281 }
282 
283 /*
284  * xgell_rx_buffer_recycle
285  *
286  * Called by desballoc() to "free" the resource.
287  * We will try to replenish all descripters.
288  */
289 static void
290 xgell_rx_buffer_recycle(char *arg)
291 {
292 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
293 	xgelldev_t *lldev = rx_buffer->lldev;
294 
295 	mutex_enter(&lldev->bf_pool.pool_lock);
296 
297 	xgell_rx_buffer_release(rx_buffer);
298 	lldev->bf_pool.post--;
299 
300 	/*
301 	 * Before finding a good way to set this hiwat, just always call to
302 	 * replenish_all. *TODO*
303 	 */
304 	if (lldev->is_initialized != 0) {
305 		xgell_rx_buffer_replenish_all(lldev);
306 	}
307 
308 	mutex_exit(&lldev->bf_pool.pool_lock);
309 }
310 
311 /*
312  * xgell_rx_buffer_alloc
313  *
314  * Allocate one rx buffer and return with the pointer to the buffer.
315  * Return NULL if failed.
316  */
317 static xgell_rx_buffer_t *
318 xgell_rx_buffer_alloc(xgelldev_t *lldev)
319 {
320 	xge_hal_device_t *hldev;
321 	void *vaddr;
322 	ddi_dma_handle_t dma_handle;
323 	ddi_acc_handle_t dma_acch;
324 	dma_addr_t dma_addr;
325 	uint_t ncookies;
326 	ddi_dma_cookie_t dma_cookie;
327 	size_t real_size;
328 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
329 	xgell_rx_buffer_t *rx_buffer;
330 
331 	hldev = (xge_hal_device_t *)lldev->devh;
332 
333 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
334 	    0, &dma_handle) != DDI_SUCCESS) {
335 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
336 		    XGELL_IFNAME, lldev->instance);
337 		goto handle_failed;
338 	}
339 
340 	/* reserve some space at the end of the buffer for recycling */
341 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size +
342 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
343 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
344 	    DDI_SUCCESS) {
345 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
346 		    XGELL_IFNAME, lldev->instance);
347 		goto mem_failed;
348 	}
349 
350 	if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) >
351 	    real_size) {
352 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
353 		    XGELL_IFNAME, lldev->instance);
354 		goto bind_failed;
355 	}
356 
357 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
358 	    lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
359 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
360 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
361 		    XGELL_IFNAME, lldev->instance);
362 		goto bind_failed;
363 	}
364 
365 	if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) {
366 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
367 		    XGELL_IFNAME, lldev->instance);
368 		goto check_failed;
369 	}
370 
371 	dma_addr = dma_cookie.dmac_laddress;
372 
373 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
374 	    sizeof (xgell_rx_buffer_t));
375 	rx_buffer->next = NULL;
376 	rx_buffer->vaddr = vaddr;
377 	rx_buffer->dma_addr = dma_addr;
378 	rx_buffer->dma_handle = dma_handle;
379 	rx_buffer->dma_acch = dma_acch;
380 	rx_buffer->lldev = lldev;
381 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
382 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
383 
384 	return (rx_buffer);
385 
386 check_failed:
387 	(void) ddi_dma_unbind_handle(dma_handle);
388 bind_failed:
389 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
390 	ddi_dma_mem_free(&dma_acch);
391 mem_failed:
392 	ddi_dma_free_handle(&dma_handle);
393 handle_failed:
394 
395 	return (NULL);
396 }
397 
398 /*
399  * xgell_rx_destroy_buffer_pool
400  *
401  * Destroy buffer pool. If there is still any buffer hold by upper layer,
402  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
403  */
404 static int
405 xgell_rx_destroy_buffer_pool(xgelldev_t *lldev)
406 {
407 	xgell_rx_buffer_t *rx_buffer;
408 	ddi_dma_handle_t  dma_handle;
409 	ddi_acc_handle_t  dma_acch;
410 	int i;
411 
412 	/*
413 	 * If there is any posted buffer, the driver should reject to be
414 	 * detached. Need notice upper layer to release them.
415 	 */
416 	if (lldev->bf_pool.post != 0) {
417 		xge_debug_ll(XGE_ERR,
418 		    "%s%d has some buffers not be recycled, try later!",
419 		    XGELL_IFNAME, lldev->instance);
420 		return (DDI_FAILURE);
421 	}
422 
423 	/*
424 	 * Relase buffers one by one.
425 	 */
426 	for (i = lldev->bf_pool.total; i > 0; i--) {
427 		rx_buffer = lldev->bf_pool.head;
428 		xge_assert(rx_buffer != NULL);
429 
430 		lldev->bf_pool.head = rx_buffer->next;
431 
432 		dma_handle = rx_buffer->dma_handle;
433 		dma_acch = rx_buffer->dma_acch;
434 
435 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
436 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
437 			lldev->bf_pool.head = rx_buffer;
438 			return (DDI_FAILURE);
439 		}
440 		ddi_dma_mem_free(&dma_acch);
441 		ddi_dma_free_handle(&dma_handle);
442 
443 		lldev->bf_pool.total--;
444 		lldev->bf_pool.free--;
445 	}
446 
447 	mutex_destroy(&lldev->bf_pool.pool_lock);
448 	return (DDI_SUCCESS);
449 }
450 
451 /*
452  * xgell_rx_create_buffer_pool
453  *
454  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
455  */
456 static int
457 xgell_rx_create_buffer_pool(xgelldev_t *lldev)
458 {
459 	xge_hal_device_t *hldev;
460 	xgell_rx_buffer_t *rx_buffer;
461 	int i;
462 
463 	hldev = (xge_hal_device_t *)lldev->devh;
464 
465 	lldev->bf_pool.total = 0;
466 	lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev);
467 	lldev->bf_pool.head = NULL;
468 	lldev->bf_pool.free = 0;
469 	lldev->bf_pool.post = 0;
470 	lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
471 
472 	mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
473 	    hldev->irqh);
474 
475 	/*
476 	 * Allocate buffers one by one. If failed, destroy whole pool by
477 	 * call to xgell_rx_destroy_buffer_pool().
478 	 */
479 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
480 		if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) {
481 			(void) xgell_rx_destroy_buffer_pool(lldev);
482 			return (DDI_FAILURE);
483 		}
484 
485 		rx_buffer->next = lldev->bf_pool.head;
486 		lldev->bf_pool.head = rx_buffer;
487 
488 		lldev->bf_pool.total++;
489 		lldev->bf_pool.free++;
490 	}
491 
492 	return (DDI_SUCCESS);
493 }
494 
495 /*
496  * xgell_rx_dtr_replenish
497  *
498  * Replenish descriptor with rx_buffer in RX buffer pool.
499  * The dtr should be post right away.
500  */
501 xge_hal_status_e
502 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
503     void *userdata, xge_hal_channel_reopen_e reopen)
504 {
505 	xgell_ring_t *ring = userdata;
506 	xgelldev_t *lldev = ring->lldev;
507 	xgell_rx_buffer_t *rx_buffer;
508 	xgell_rxd_priv_t *rxd_priv;
509 
510 	if (lldev->bf_pool.head == NULL) {
511 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
512 		return (XGE_HAL_FAIL);
513 	}
514 	rx_buffer = lldev->bf_pool.head;
515 	lldev->bf_pool.head = rx_buffer->next;
516 	lldev->bf_pool.free--;
517 
518 	xge_assert(rx_buffer);
519 	xge_assert(rx_buffer->dma_addr);
520 
521 	rxd_priv = (xgell_rxd_priv_t *)
522 	    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
523 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size);
524 
525 	rxd_priv->rx_buffer = rx_buffer;
526 
527 	return (XGE_HAL_OK);
528 }
529 
530 /*
531  * xgell_get_ip_offset
532  *
533  * Calculate the offset to IP header.
534  */
535 static inline int
536 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
537 {
538 	int ip_off;
539 
540 	/* get IP-header offset */
541 	switch (ext_info->frame) {
542 	case XGE_HAL_FRAME_TYPE_DIX:
543 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
544 		break;
545 	case XGE_HAL_FRAME_TYPE_IPX:
546 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
547 		    XGE_HAL_HEADER_802_2_SIZE +
548 		    XGE_HAL_HEADER_SNAP_SIZE);
549 		break;
550 	case XGE_HAL_FRAME_TYPE_LLC:
551 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
552 		    XGE_HAL_HEADER_802_2_SIZE);
553 		break;
554 	case XGE_HAL_FRAME_TYPE_SNAP:
555 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
556 		    XGE_HAL_HEADER_SNAP_SIZE);
557 		break;
558 	default:
559 		ip_off = 0;
560 		break;
561 	}
562 
563 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
564 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
565 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
566 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
567 	}
568 
569 	return (ip_off);
570 }
571 
572 /*
573  * xgell_rx_hcksum_assoc
574  *
575  * Judge the packet type and then call to hcksum_assoc() to associate
576  * h/w checksum information.
577  */
578 static inline void
579 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
580     xge_hal_dtr_info_t *ext_info)
581 {
582 	int cksum_flags = 0;
583 
584 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
585 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
586 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
587 				cksum_flags |= HCK_IPV4_HDRCKSUM;
588 			}
589 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
590 				cksum_flags |= HCK_FULLCKSUM_OK;
591 			}
592 			if (cksum_flags) {
593 				cksum_flags |= HCK_FULLCKSUM;
594 				(void) hcksum_assoc(mp, NULL, NULL, 0,
595 				    0, 0, 0, cksum_flags, 0);
596 			}
597 		}
598 	} else if (ext_info->proto &
599 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
600 		/*
601 		 * Just pass the partial cksum up to IP.
602 		 */
603 		int ip_off = xgell_get_ip_offset(ext_info);
604 		int start, end = pkt_length - ip_off;
605 
606 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
607 			struct ip *ip =
608 			    (struct ip *)(vaddr + ip_off);
609 			start = ip->ip_hl * 4 + ip_off;
610 		} else {
611 			start = ip_off + 40;
612 		}
613 		cksum_flags |= HCK_PARTIALCKSUM;
614 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
615 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
616 		    0);
617 	}
618 }
619 
620 /*
621  * xgell_rx_1b_msg_alloc
622  *
623  * Allocate message header for data buffer, and decide if copy the packet to
624  * new data buffer to release big rx_buffer to save memory.
625  *
626  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
627  * new message and copy the payload in.
628  */
629 static mblk_t *
630 xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer,
631     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
632 {
633 	mblk_t *mp;
634 	char *vaddr;
635 
636 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
637 	/*
638 	 * Copy packet into new allocated message buffer, if pkt_length
639 	 * is less than XGELL_RX_DMA_LOWAT
640 	 */
641 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
642 		if ((mp = allocb(pkt_length, 0)) == NULL) {
643 			return (NULL);
644 		}
645 		bcopy(vaddr, mp->b_rptr, pkt_length);
646 		mp->b_wptr = mp->b_rptr + pkt_length;
647 		*copyit = B_TRUE;
648 		return (mp);
649 	}
650 
651 	/*
652 	 * Just allocate mblk for current data buffer
653 	 */
654 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
655 	    &rx_buffer->frtn)) == NULL) {
656 		/* Drop it */
657 		return (NULL);
658 	}
659 	/*
660 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
661 	 */
662 	mp->b_wptr += pkt_length;
663 
664 	return (mp);
665 }
666 
667 /*
668  * xgell_rx_1b_compl
669  *
670  * If the interrupt is because of a received frame or if the receive ring
671  * contains fresh as yet un-processed frames, this function is called.
672  */
673 static xge_hal_status_e
674 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
675     void *userdata)
676 {
677 	xgelldev_t *lldev = ((xgell_ring_t *)userdata)->lldev;
678 	xgell_rx_buffer_t *rx_buffer;
679 	mblk_t *mp_head = NULL;
680 	mblk_t *mp_end  = NULL;
681 	int pkt_burst = 0;
682 
683 	mutex_enter(&lldev->bf_pool.pool_lock);
684 
685 	do {
686 		int pkt_length;
687 		dma_addr_t dma_data;
688 		mblk_t *mp;
689 		boolean_t copyit = B_FALSE;
690 
691 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
692 		    xge_hal_ring_dtr_private(channelh, dtr));
693 		xge_hal_dtr_info_t ext_info;
694 
695 		rx_buffer = rxd_priv->rx_buffer;
696 
697 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
698 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
699 
700 		xge_assert(dma_data == rx_buffer->dma_addr);
701 
702 		if (t_code != 0) {
703 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
704 			    " completed due to error t_code %01x", XGELL_IFNAME,
705 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
706 
707 			(void) xge_hal_device_handle_tcode(channelh, dtr,
708 			    t_code);
709 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
710 			xgell_rx_buffer_release(rx_buffer);
711 			continue;
712 		}
713 
714 		/*
715 		 * Sync the DMA memory
716 		 */
717 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
718 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
719 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
720 			    XGELL_IFNAME, lldev->instance);
721 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
722 			xgell_rx_buffer_release(rx_buffer);
723 			continue;
724 		}
725 
726 		/*
727 		 * Allocate message for the packet.
728 		 */
729 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
730 			copyit = B_TRUE;
731 		} else {
732 			copyit = B_FALSE;
733 		}
734 
735 		mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length,
736 		    &ext_info, &copyit);
737 
738 		xge_hal_ring_dtr_free(channelh, dtr);
739 
740 		/*
741 		 * Release the buffer and recycle it later
742 		 */
743 		if ((mp == NULL) || copyit) {
744 			xgell_rx_buffer_release(rx_buffer);
745 		} else {
746 			/*
747 			 * Count it since the buffer should be loaned up.
748 			 */
749 			lldev->bf_pool.post++;
750 		}
751 		if (mp == NULL) {
752 			xge_debug_ll(XGE_ERR,
753 			    "%s%d: rx: can not allocate mp mblk",
754 			    XGELL_IFNAME, lldev->instance);
755 			continue;
756 		}
757 
758 		/*
759 		 * Associate cksum_flags per packet type and h/w
760 		 * cksum flags.
761 		 */
762 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
763 		    HEADROOM, pkt_length, &ext_info);
764 
765 		if (mp_head == NULL) {
766 			mp_head = mp;
767 			mp_end = mp;
768 		} else {
769 			mp_end->b_next = mp;
770 			mp_end = mp;
771 		}
772 
773 		if (++pkt_burst < lldev->config.rx_pkt_burst)
774 			continue;
775 
776 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
777 			/* Replenish rx buffers */
778 			xgell_rx_buffer_replenish_all(lldev);
779 		}
780 		mutex_exit(&lldev->bf_pool.pool_lock);
781 		if (mp_head != NULL) {
782 			mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle,
783 			    mp_head);
784 		}
785 		mp_head = mp_end  = NULL;
786 		pkt_burst = 0;
787 		mutex_enter(&lldev->bf_pool.pool_lock);
788 
789 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
790 	    XGE_HAL_OK);
791 
792 	/*
793 	 * Always call replenish_all to recycle rx_buffers.
794 	 */
795 	xgell_rx_buffer_replenish_all(lldev);
796 	mutex_exit(&lldev->bf_pool.pool_lock);
797 
798 	if (mp_head != NULL) {
799 		mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
800 	}
801 
802 	return (XGE_HAL_OK);
803 }
804 
805 /*
806  * xgell_xmit_compl
807  *
808  * If an interrupt was raised to indicate DMA complete of the Tx packet,
809  * this function is called. It identifies the last TxD whose buffer was
810  * freed and frees all skbs whose data have already DMA'ed into the NICs
811  * internal memory.
812  */
813 static xge_hal_status_e
814 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
815     void *userdata)
816 {
817 	xgelldev_t *lldev = userdata;
818 
819 	do {
820 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
821 		    xge_hal_fifo_dtr_private(dtr));
822 		mblk_t *mp = txd_priv->mblk;
823 		int i;
824 
825 		if (t_code) {
826 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
827 			    " completed due to error t_code %01x", XGELL_IFNAME,
828 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
829 
830 			(void) xge_hal_device_handle_tcode(channelh, dtr,
831 			    t_code);
832 		}
833 
834 		for (i = 0; i < txd_priv->handle_cnt; i++) {
835 			xge_assert(txd_priv->dma_handles[i]);
836 			(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
837 			ddi_dma_free_handle(&txd_priv->dma_handles[i]);
838 			txd_priv->dma_handles[i] = 0;
839 		}
840 
841 		xge_hal_fifo_dtr_free(channelh, dtr);
842 
843 		freemsg(mp);
844 		lldev->resched_avail++;
845 
846 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
847 	    XGE_HAL_OK);
848 
849 	if (lldev->resched_retry &&
850 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
851 	    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
852 	    xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
853 		XGELL_IFNAME, lldev->instance,
854 		((xge_hal_channel_t *)lldev->fifo_channel)->post_qid);
855 		lldev->resched_send = lldev->resched_avail;
856 		lldev->resched_retry = 0;
857 	}
858 
859 	return (XGE_HAL_OK);
860 }
861 
862 /*
863  * xgell_send
864  * @hldev: pointer to xge_hal_device_t strucutre
865  * @mblk: pointer to network buffer, i.e. mblk_t structure
866  *
867  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
868  * A pointer to an M_DATA message that contains the packet is passed to
869  * this routine.
870  */
871 static boolean_t
872 xgell_send(xgelldev_t *lldev, mblk_t *mp)
873 {
874 	mblk_t *bp;
875 	boolean_t retry;
876 	xge_hal_device_t *hldev = lldev->devh;
877 	xge_hal_status_e status;
878 	xge_hal_dtr_h dtr;
879 	xgell_txd_priv_t *txd_priv;
880 	uint32_t hckflags;
881 	uint32_t mss;
882 	int handle_cnt, frag_cnt, ret, i, copied;
883 	boolean_t used_copy;
884 
885 _begin:
886 	retry = B_FALSE;
887 	handle_cnt = frag_cnt = 0;
888 
889 	if (!lldev->is_initialized || lldev->in_reset)
890 		return (B_FALSE);
891 
892 	/*
893 	 * If the free Tx dtrs count reaches the lower threshold,
894 	 * inform the gld to stop sending more packets till the free
895 	 * dtrs count exceeds higher threshold. Driver informs the
896 	 * gld through gld_sched call, when the free dtrs count exceeds
897 	 * the higher threshold.
898 	 */
899 	if (xge_hal_channel_dtr_count(lldev->fifo_channel)
900 	    <= XGELL_TX_LEVEL_LOW) {
901 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
902 		    "free descriptors count at low threshold %d",
903 		    XGELL_IFNAME, lldev->instance,
904 		    ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
905 		    XGELL_TX_LEVEL_LOW);
906 		retry = B_TRUE;
907 		goto _exit;
908 	}
909 
910 	status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr);
911 	if (status != XGE_HAL_OK) {
912 		switch (status) {
913 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
914 			xge_debug_ll(XGE_ERR,
915 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
916 			    lldev->instance,
917 			    ((xge_hal_channel_t *)
918 			    lldev->fifo_channel)->post_qid);
919 			retry = B_TRUE;
920 			goto _exit;
921 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
922 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
923 			    " out of descriptors.", XGELL_IFNAME,
924 			    lldev->instance,
925 			    ((xge_hal_channel_t *)
926 			    lldev->fifo_channel)->post_qid);
927 			retry = B_TRUE;
928 			goto _exit;
929 		default:
930 			return (B_FALSE);
931 		}
932 	}
933 
934 	txd_priv = xge_hal_fifo_dtr_private(dtr);
935 	txd_priv->mblk = mp;
936 
937 	/*
938 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
939 	 * do insertion.
940 	 *
941 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
942 	 * is the other implementation for xge. The driver can simple bcopy()
943 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
944 	 * automatically, since it's impossible that GLD sends down mp(s) with
945 	 * splited ether_vlan_header.
946 	 *
947 	 * struct ether_vlan_header *evhp;
948 	 * uint16_t tci;
949 	 *
950 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
951 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
952 	 *	tci = ntohs(evhp->ether_tci);
953 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
954 	 *	    2 * ETHERADDRL);
955 	 *	mp->b_rptr += VLAN_TAGSZ;
956 	 *
957 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
958 	 * }
959 	 */
960 
961 	copied = 0;
962 	used_copy = B_FALSE;
963 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
964 		int mblen;
965 		uint_t ncookies;
966 		ddi_dma_cookie_t dma_cookie;
967 		ddi_dma_handle_t dma_handle;
968 
969 		/* skip zero-length message blocks */
970 		mblen = MBLKL(bp);
971 		if (mblen == 0) {
972 			continue;
973 		}
974 
975 		/*
976 		 * Check the message length to decide to DMA or bcopy() data
977 		 * to tx descriptor(s).
978 		 */
979 		if (mblen < lldev->config.tx_dma_lowat &&
980 		    (copied + mblen) < lldev->tx_copied_max) {
981 			xge_hal_status_e rc;
982 			rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel,
983 			    dtr, bp->b_rptr, mblen);
984 			if (rc == XGE_HAL_OK) {
985 				used_copy = B_TRUE;
986 				copied += mblen;
987 				continue;
988 			} else if (used_copy) {
989 				xge_hal_fifo_dtr_buffer_finalize(
990 					lldev->fifo_channel, dtr, frag_cnt++);
991 				used_copy = B_FALSE;
992 			}
993 		} else if (used_copy) {
994 			xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel,
995 			    dtr, frag_cnt++);
996 			used_copy = B_FALSE;
997 		}
998 
999 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1000 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1001 		if (ret != DDI_SUCCESS) {
1002 			xge_debug_ll(XGE_ERR,
1003 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1004 			    lldev->instance);
1005 			goto _exit_cleanup;
1006 		}
1007 
1008 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1009 		    (caddr_t)bp->b_rptr, mblen,
1010 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1011 		    &dma_cookie, &ncookies);
1012 
1013 		switch (ret) {
1014 		case DDI_DMA_MAPPED:
1015 			/* everything's fine */
1016 			break;
1017 
1018 		case DDI_DMA_NORESOURCES:
1019 			xge_debug_ll(XGE_ERR,
1020 			    "%s%d: can not bind dma address",
1021 			    XGELL_IFNAME, lldev->instance);
1022 			ddi_dma_free_handle(&dma_handle);
1023 			goto _exit_cleanup;
1024 
1025 		case DDI_DMA_NOMAPPING:
1026 		case DDI_DMA_INUSE:
1027 		case DDI_DMA_TOOBIG:
1028 		default:
1029 			/* drop packet, don't retry */
1030 			xge_debug_ll(XGE_ERR,
1031 			    "%s%d: can not map message buffer",
1032 			    XGELL_IFNAME, lldev->instance);
1033 			ddi_dma_free_handle(&dma_handle);
1034 			goto _exit_cleanup;
1035 		}
1036 
1037 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1038 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1039 			    "requested c:%d+f:%d", XGELL_IFNAME,
1040 			    lldev->instance, ncookies, frag_cnt);
1041 			(void) ddi_dma_unbind_handle(dma_handle);
1042 			ddi_dma_free_handle(&dma_handle);
1043 			goto _exit_cleanup;
1044 		}
1045 
1046 		/* setup the descriptors for this data buffer */
1047 		while (ncookies) {
1048 			xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr,
1049 			    frag_cnt++, dma_cookie.dmac_laddress,
1050 			    dma_cookie.dmac_size);
1051 			if (--ncookies) {
1052 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1053 			}
1054 
1055 		}
1056 
1057 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1058 
1059 		if (bp->b_cont &&
1060 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1061 			hldev->config.fifo.max_frags)) {
1062 			mblk_t *nmp;
1063 
1064 			xge_debug_ll(XGE_TRACE,
1065 			    "too many FRAGs [%d], pull up them", frag_cnt);
1066 
1067 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1068 				/* Drop packet, don't retry */
1069 				xge_debug_ll(XGE_ERR,
1070 				    "%s%d: can not pullup message buffer",
1071 				    XGELL_IFNAME, lldev->instance);
1072 				goto _exit_cleanup;
1073 			}
1074 			freemsg(bp->b_cont);
1075 			bp->b_cont = nmp;
1076 		}
1077 	}
1078 
1079 	/* finalize unfinished copies */
1080 	if (used_copy) {
1081 		xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr,
1082 		    frag_cnt++);
1083 	}
1084 
1085 	txd_priv->handle_cnt = handle_cnt;
1086 
1087 	/*
1088 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1089 	 * do all necessary work.
1090 	 */
1091 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags);
1092 	if ((hckflags & HW_LSO) && (mss != 0)) {
1093 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1094 	}
1095 
1096 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1097 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1098 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1099 	}
1100 	if (hckflags & HCK_FULLCKSUM) {
1101 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1102 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1103 	}
1104 
1105 	xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr);
1106 
1107 	return (B_TRUE);
1108 
1109 _exit_cleanup:
1110 
1111 	for (i = 0; i < handle_cnt; i++) {
1112 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1113 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1114 		txd_priv->dma_handles[i] = 0;
1115 	}
1116 
1117 	xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
1118 
1119 _exit:
1120 	if (retry) {
1121 		if (lldev->resched_avail != lldev->resched_send &&
1122 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1123 		    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
1124 			lldev->resched_send = lldev->resched_avail;
1125 			return (B_FALSE);
1126 		} else {
1127 			lldev->resched_retry = 1;
1128 		}
1129 	}
1130 
1131 	freemsg(mp);
1132 	return (B_TRUE);
1133 }
1134 
1135 /*
1136  * xge_m_tx
1137  * @arg: pointer to the xgelldev_t structure
1138  * @resid: resource id
1139  * @mp: pointer to the message buffer
1140  *
1141  * Called by MAC Layer to send a chain of packets
1142  */
1143 static mblk_t *
1144 xgell_m_tx(void *arg, mblk_t *mp)
1145 {
1146 	xgelldev_t *lldev = arg;
1147 	mblk_t *next;
1148 
1149 	while (mp != NULL) {
1150 		next = mp->b_next;
1151 		mp->b_next = NULL;
1152 
1153 		if (!xgell_send(lldev, mp)) {
1154 			mp->b_next = next;
1155 			break;
1156 		}
1157 		mp = next;
1158 	}
1159 
1160 	return (mp);
1161 }
1162 
1163 /*
1164  * xgell_rx_dtr_term
1165  *
1166  * Function will be called by HAL to terminate all DTRs for
1167  * Ring(s) type of channels.
1168  */
1169 static void
1170 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1171     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1172 {
1173 	xgell_rxd_priv_t *rxd_priv =
1174 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1175 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1176 
1177 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1178 		xgelldev_t *lldev = rx_buffer->lldev;
1179 
1180 		mutex_enter(&lldev->bf_pool.pool_lock);
1181 		xge_hal_ring_dtr_free(channelh, dtrh);
1182 		xgell_rx_buffer_release(rx_buffer);
1183 		mutex_exit(&lldev->bf_pool.pool_lock);
1184 	}
1185 }
1186 
1187 /*
1188  * xgell_tx_term
1189  *
1190  * Function will be called by HAL to terminate all DTRs for
1191  * Fifo(s) type of channels.
1192  */
1193 static void
1194 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1195     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1196 {
1197 	xgell_txd_priv_t *txd_priv =
1198 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1199 	mblk_t *mp = txd_priv->mblk;
1200 	int i;
1201 
1202 	/*
1203 	 * for Tx we must clean up the DTR *only* if it has been
1204 	 * posted!
1205 	 */
1206 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1207 		return;
1208 	}
1209 
1210 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1211 		xge_assert(txd_priv->dma_handles[i]);
1212 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1213 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1214 		txd_priv->dma_handles[i] = 0;
1215 	}
1216 
1217 	xge_hal_fifo_dtr_free(channelh, dtrh);
1218 
1219 	freemsg(mp);
1220 }
1221 
1222 /*
1223  * xgell_tx_open
1224  * @lldev: the link layer object
1225  *
1226  * Initialize and open all Tx channels;
1227  */
1228 static boolean_t
1229 xgell_tx_open(xgelldev_t *lldev)
1230 {
1231 	xge_hal_status_e status;
1232 	u64 adapter_status;
1233 	xge_hal_channel_attr_t attr;
1234 
1235 	attr.post_qid		= 0;
1236 	attr.compl_qid		= 0;
1237 	attr.callback		= xgell_xmit_compl;
1238 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1239 	attr.flags		= 0;
1240 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1241 	attr.userdata		= lldev;
1242 	attr.dtr_init		= NULL;
1243 	attr.dtr_term		= xgell_tx_term;
1244 
1245 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1246 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1247 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1248 		    lldev->instance, (uint64_t)adapter_status);
1249 		return (B_FALSE);
1250 	}
1251 
1252 	status = xge_hal_channel_open(lldev->devh, &attr,
1253 	    &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL);
1254 	if (status != XGE_HAL_OK) {
1255 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1256 		    "got status code %d", XGELL_IFNAME,
1257 		    lldev->instance, status);
1258 		return (B_FALSE);
1259 	}
1260 
1261 	return (B_TRUE);
1262 }
1263 
1264 /*
1265  * xgell_rx_open
1266  * @lldev: the link layer object
1267  *
1268  * Initialize and open all Rx channels;
1269  */
1270 static boolean_t
1271 xgell_rx_open(xgelldev_t *lldev)
1272 {
1273 	xge_hal_status_e status;
1274 	u64 adapter_status;
1275 	xge_hal_channel_attr_t attr;
1276 
1277 	attr.post_qid		= XGELL_RING_MAIN_QID;
1278 	attr.compl_qid		= 0;
1279 	attr.callback		= xgell_rx_1b_compl;
1280 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1281 	attr.flags		= 0;
1282 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1283 	attr.dtr_init		= xgell_rx_dtr_replenish;
1284 	attr.dtr_term		= xgell_rx_dtr_term;
1285 
1286 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1287 		xge_debug_ll(XGE_ERR,
1288 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1289 		    XGELL_IFNAME, lldev->instance,
1290 		    (uint64_t)adapter_status);
1291 		return (B_FALSE);
1292 	}
1293 
1294 	lldev->ring_main.lldev = lldev;
1295 	attr.userdata = &lldev->ring_main;
1296 
1297 	status = xge_hal_channel_open(lldev->devh, &attr,
1298 	    &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1299 	if (status != XGE_HAL_OK) {
1300 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1301 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1302 		return (B_FALSE);
1303 	}
1304 
1305 	return (B_TRUE);
1306 }
1307 
1308 static int
1309 xgell_initiate_start(xgelldev_t *lldev)
1310 {
1311 	xge_hal_status_e status;
1312 	xge_hal_device_t *hldev = lldev->devh;
1313 	int maxpkt = hldev->config.mtu;
1314 
1315 	/* check initial mtu before enabling the device */
1316 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1317 	if (status != XGE_HAL_OK) {
1318 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1319 		    XGELL_IFNAME, lldev->instance, maxpkt);
1320 		return (EINVAL);
1321 	}
1322 
1323 	/* set initial mtu before enabling the device */
1324 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1325 	if (status != XGE_HAL_OK) {
1326 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1327 		    XGELL_IFNAME, lldev->instance, maxpkt);
1328 		return (EIO);
1329 	}
1330 
1331 	/* tune jumbo/normal frame UFC counters */
1332 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \
1333 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1334 			XGE_HAL_DEFAULT_RX_UFC_B_J :
1335 			XGE_HAL_DEFAULT_RX_UFC_B_N;
1336 
1337 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \
1338 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1339 			XGE_HAL_DEFAULT_RX_UFC_C_J :
1340 			XGE_HAL_DEFAULT_RX_UFC_C_N;
1341 
1342 	/* now, enable the device */
1343 	status = xge_hal_device_enable(lldev->devh);
1344 	if (status != XGE_HAL_OK) {
1345 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1346 		    XGELL_IFNAME, lldev->instance);
1347 		return (EIO);
1348 	}
1349 
1350 	if (!xgell_rx_open(lldev)) {
1351 		status = xge_hal_device_disable(lldev->devh);
1352 		if (status != XGE_HAL_OK) {
1353 			u64 adapter_status;
1354 			(void) xge_hal_device_status(lldev->devh,
1355 			    &adapter_status);
1356 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1357 			    "the device. adaper status 0x%"PRIx64
1358 			    " returned status %d",
1359 			    XGELL_IFNAME, lldev->instance,
1360 			    (uint64_t)adapter_status, status);
1361 		}
1362 		xge_os_mdelay(1500);
1363 		return (ENOMEM);
1364 	}
1365 
1366 	if (!xgell_tx_open(lldev)) {
1367 		status = xge_hal_device_disable(lldev->devh);
1368 		if (status != XGE_HAL_OK) {
1369 			u64 adapter_status;
1370 			(void) xge_hal_device_status(lldev->devh,
1371 			    &adapter_status);
1372 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1373 			    "the device. adaper status 0x%"PRIx64
1374 			    " returned status %d",
1375 			    XGELL_IFNAME, lldev->instance,
1376 			    (uint64_t)adapter_status, status);
1377 		}
1378 		xge_os_mdelay(1500);
1379 		xge_hal_channel_close(lldev->ring_main.channelh,
1380 		    XGE_HAL_CHANNEL_OC_NORMAL);
1381 		return (ENOMEM);
1382 	}
1383 
1384 	/* time to enable interrupts */
1385 	xge_hal_device_intr_enable(lldev->devh);
1386 
1387 	lldev->is_initialized = 1;
1388 
1389 	return (0);
1390 }
1391 
1392 static void
1393 xgell_initiate_stop(xgelldev_t *lldev)
1394 {
1395 	xge_hal_status_e status;
1396 
1397 	lldev->is_initialized = 0;
1398 
1399 	status = xge_hal_device_disable(lldev->devh);
1400 	if (status != XGE_HAL_OK) {
1401 		u64 adapter_status;
1402 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1403 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1404 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1405 		    XGELL_IFNAME, lldev->instance,
1406 		    (uint64_t)adapter_status, status);
1407 	}
1408 	xge_hal_device_intr_disable(lldev->devh);
1409 
1410 	xge_debug_ll(XGE_TRACE, "%s",
1411 	    "waiting for device irq to become quiescent...");
1412 	xge_os_mdelay(1500);
1413 
1414 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1415 
1416 	xge_hal_channel_close(lldev->ring_main.channelh,
1417 	    XGE_HAL_CHANNEL_OC_NORMAL);
1418 
1419 	xge_hal_channel_close(lldev->fifo_channel,
1420 	    XGE_HAL_CHANNEL_OC_NORMAL);
1421 }
1422 
1423 /*
1424  * xgell_m_start
1425  * @arg: pointer to device private strucutre(hldev)
1426  *
1427  * This function is called by MAC Layer to enable the XFRAME
1428  * firmware to generate interrupts and also prepare the
1429  * driver to call mac_rx for delivering receive packets
1430  * to MAC Layer.
1431  */
1432 static int
1433 xgell_m_start(void *arg)
1434 {
1435 	xgelldev_t *lldev = arg;
1436 	xge_hal_device_t *hldev = lldev->devh;
1437 	int ret;
1438 
1439 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1440 	    lldev->instance);
1441 
1442 	mutex_enter(&lldev->genlock);
1443 
1444 	if (lldev->is_initialized) {
1445 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1446 		    XGELL_IFNAME, lldev->instance);
1447 		mutex_exit(&lldev->genlock);
1448 		return (EINVAL);
1449 	}
1450 
1451 	hldev->terminating = 0;
1452 	if (ret = xgell_initiate_start(lldev)) {
1453 		mutex_exit(&lldev->genlock);
1454 		return (ret);
1455 	}
1456 
1457 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1458 
1459 	mutex_exit(&lldev->genlock);
1460 
1461 	return (0);
1462 }
1463 
1464 /*
1465  * xgell_m_stop
1466  * @arg: pointer to device private data (hldev)
1467  *
1468  * This function is called by the MAC Layer to disable
1469  * the XFRAME firmware for generating any interrupts and
1470  * also stop the driver from calling mac_rx() for
1471  * delivering data packets to the MAC Layer.
1472  */
1473 static void
1474 xgell_m_stop(void *arg)
1475 {
1476 	xgelldev_t *lldev = arg;
1477 	xge_hal_device_t *hldev = lldev->devh;
1478 
1479 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1480 
1481 	mutex_enter(&lldev->genlock);
1482 	if (!lldev->is_initialized) {
1483 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1484 		mutex_exit(&lldev->genlock);
1485 		return;
1486 	}
1487 
1488 	xge_hal_device_terminating(hldev);
1489 	xgell_initiate_stop(lldev);
1490 
1491 	/* reset device */
1492 	(void) xge_hal_device_reset(lldev->devh);
1493 
1494 	mutex_exit(&lldev->genlock);
1495 
1496 	if (lldev->timeout_id != 0) {
1497 		(void) untimeout(lldev->timeout_id);
1498 	}
1499 
1500 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1501 }
1502 
1503 /*
1504  * xgell_onerr_reset
1505  * @lldev: pointer to xgelldev_t structure
1506  *
1507  * This function is called by HAL Event framework to reset the HW
1508  * This function is must be called with genlock taken.
1509  */
1510 int
1511 xgell_onerr_reset(xgelldev_t *lldev)
1512 {
1513 	int rc = 0;
1514 
1515 	if (!lldev->is_initialized) {
1516 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1517 		    XGELL_IFNAME, lldev->instance);
1518 		return (rc);
1519 	}
1520 
1521 	lldev->in_reset = 1;
1522 	xgell_initiate_stop(lldev);
1523 
1524 	/* reset device */
1525 	(void) xge_hal_device_reset(lldev->devh);
1526 
1527 	rc = xgell_initiate_start(lldev);
1528 	lldev->in_reset = 0;
1529 
1530 	return (rc);
1531 }
1532 
1533 
1534 /*
1535  * xgell_m_unicst
1536  * @arg: pointer to device private strucutre(hldev)
1537  * @mac_addr:
1538  *
1539  * This function is called by MAC Layer to set the physical address
1540  * of the XFRAME firmware.
1541  */
1542 static int
1543 xgell_m_unicst(void *arg, const uint8_t *macaddr)
1544 {
1545 	xge_hal_status_e status;
1546 	xgelldev_t *lldev = (xgelldev_t *)arg;
1547 	xge_hal_device_t *hldev = lldev->devh;
1548 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1549 
1550 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1551 
1552 	mutex_enter(&lldev->genlock);
1553 
1554 	xge_debug_ll(XGE_TRACE,
1555 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1556 	    macaddr[0], macaddr[1], macaddr[2],
1557 	    macaddr[3], macaddr[4], macaddr[5]);
1558 
1559 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1560 	if (status != XGE_HAL_OK) {
1561 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1562 		    XGELL_IFNAME, lldev->instance);
1563 		mutex_exit(&lldev->genlock);
1564 		return (EIO);
1565 	}
1566 
1567 	mutex_exit(&lldev->genlock);
1568 
1569 	return (0);
1570 }
1571 
1572 
1573 /*
1574  * xgell_m_multicst
1575  * @arg: pointer to device private strucutre(hldev)
1576  * @add:
1577  * @mc_addr:
1578  *
1579  * This function is called by MAC Layer to enable or
1580  * disable device-level reception of specific multicast addresses.
1581  */
1582 static int
1583 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1584 {
1585 	xge_hal_status_e status;
1586 	xgelldev_t *lldev = (xgelldev_t *)arg;
1587 	xge_hal_device_t *hldev = lldev->devh;
1588 
1589 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1590 
1591 	mutex_enter(&lldev->genlock);
1592 
1593 	if (!lldev->is_initialized) {
1594 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1595 		    XGELL_IFNAME, lldev->instance);
1596 		mutex_exit(&lldev->genlock);
1597 		return (EIO);
1598 	}
1599 
1600 	/* FIXME: missing HAL functionality: enable_one() */
1601 
1602 	status = (add) ?
1603 	    xge_hal_device_mcast_enable(hldev) :
1604 	    xge_hal_device_mcast_disable(hldev);
1605 
1606 	if (status != XGE_HAL_OK) {
1607 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1608 		    add ? "enable" : "disable", status);
1609 		mutex_exit(&lldev->genlock);
1610 		return (EIO);
1611 	}
1612 
1613 	mutex_exit(&lldev->genlock);
1614 
1615 	return (0);
1616 }
1617 
1618 
1619 /*
1620  * xgell_m_promisc
1621  * @arg: pointer to device private strucutre(hldev)
1622  * @on:
1623  *
1624  * This function is called by MAC Layer to enable or
1625  * disable the reception of all the packets on the medium
1626  */
1627 static int
1628 xgell_m_promisc(void *arg, boolean_t on)
1629 {
1630 	xgelldev_t *lldev = (xgelldev_t *)arg;
1631 	xge_hal_device_t *hldev = lldev->devh;
1632 
1633 	mutex_enter(&lldev->genlock);
1634 
1635 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1636 
1637 	if (!lldev->is_initialized) {
1638 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1639 		    XGELL_IFNAME, lldev->instance);
1640 		mutex_exit(&lldev->genlock);
1641 		return (EIO);
1642 	}
1643 
1644 	if (on) {
1645 		xge_hal_device_promisc_enable(hldev);
1646 	} else {
1647 		xge_hal_device_promisc_disable(hldev);
1648 	}
1649 
1650 	mutex_exit(&lldev->genlock);
1651 
1652 	return (0);
1653 }
1654 
1655 /*
1656  * xgell_m_stat
1657  * @arg: pointer to device private strucutre(hldev)
1658  *
1659  * This function is called by MAC Layer to get network statistics
1660  * from the driver.
1661  */
1662 static int
1663 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
1664 {
1665 	xge_hal_stats_hw_info_t *hw_info;
1666 	xgelldev_t *lldev = (xgelldev_t *)arg;
1667 	xge_hal_device_t *hldev = lldev->devh;
1668 
1669 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1670 
1671 	if (!mutex_tryenter(&lldev->genlock))
1672 		return (EAGAIN);
1673 
1674 	if (!lldev->is_initialized) {
1675 		mutex_exit(&lldev->genlock);
1676 		return (EAGAIN);
1677 	}
1678 
1679 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1680 		mutex_exit(&lldev->genlock);
1681 		return (EAGAIN);
1682 	}
1683 
1684 	switch (stat) {
1685 	case MAC_STAT_IFSPEED:
1686 		*val = 10000000000ull; /* 10G */
1687 		break;
1688 
1689 	case MAC_STAT_MULTIRCV:
1690 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
1691 		    hw_info->rmac_vld_mcst_frms;
1692 		break;
1693 
1694 	case MAC_STAT_BRDCSTRCV:
1695 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
1696 		    hw_info->rmac_vld_bcst_frms;
1697 		break;
1698 
1699 	case MAC_STAT_MULTIXMT:
1700 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
1701 		    hw_info->tmac_mcst_frms;
1702 		break;
1703 
1704 	case MAC_STAT_BRDCSTXMT:
1705 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
1706 		    hw_info->tmac_bcst_frms;
1707 		break;
1708 
1709 	case MAC_STAT_RBYTES:
1710 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
1711 		    hw_info->rmac_ttl_octets;
1712 		break;
1713 
1714 	case MAC_STAT_NORCVBUF:
1715 		*val = hw_info->rmac_drop_frms;
1716 		break;
1717 
1718 	case MAC_STAT_IERRORS:
1719 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
1720 		    hw_info->rmac_discarded_frms;
1721 		break;
1722 
1723 	case MAC_STAT_OBYTES:
1724 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
1725 		    hw_info->tmac_ttl_octets;
1726 		break;
1727 
1728 	case MAC_STAT_NOXMTBUF:
1729 		*val = hw_info->tmac_drop_frms;
1730 		break;
1731 
1732 	case MAC_STAT_OERRORS:
1733 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
1734 		    hw_info->tmac_any_err_frms;
1735 		break;
1736 
1737 	case MAC_STAT_IPACKETS:
1738 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
1739 		    hw_info->rmac_vld_frms;
1740 		break;
1741 
1742 	case MAC_STAT_OPACKETS:
1743 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
1744 		    hw_info->tmac_frms;
1745 		break;
1746 
1747 	case ETHER_STAT_FCS_ERRORS:
1748 		*val = hw_info->rmac_fcs_err_frms;
1749 		break;
1750 
1751 	case ETHER_STAT_TOOLONG_ERRORS:
1752 		*val = hw_info->rmac_long_frms;
1753 		break;
1754 
1755 	case ETHER_STAT_LINK_DUPLEX:
1756 		*val = LINK_DUPLEX_FULL;
1757 		break;
1758 
1759 	default:
1760 		mutex_exit(&lldev->genlock);
1761 		return (ENOTSUP);
1762 	}
1763 
1764 	mutex_exit(&lldev->genlock);
1765 
1766 	return (0);
1767 }
1768 
1769 /*
1770  * xgell_device_alloc - Allocate new LL device
1771  */
1772 int
1773 xgell_device_alloc(xge_hal_device_h devh,
1774     dev_info_t *dev_info, xgelldev_t **lldev_out)
1775 {
1776 	xgelldev_t *lldev;
1777 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1778 	int instance = ddi_get_instance(dev_info);
1779 
1780 	*lldev_out = NULL;
1781 
1782 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1783 	    XGELL_IFNAME, instance);
1784 
1785 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1786 
1787 	lldev->devh = hldev;
1788 	lldev->instance = instance;
1789 	lldev->dev_info = dev_info;
1790 
1791 	*lldev_out = lldev;
1792 
1793 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1794 
1795 	return (DDI_SUCCESS);
1796 }
1797 
1798 /*
1799  * xgell_device_free
1800  */
1801 void
1802 xgell_device_free(xgelldev_t *lldev)
1803 {
1804 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1805 	    XGELL_IFNAME, lldev->instance);
1806 
1807 	kmem_free(lldev, sizeof (xgelldev_t));
1808 }
1809 
1810 /*
1811  * xgell_ioctl
1812  */
1813 static void
1814 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1815 {
1816 	xgelldev_t *lldev = arg;
1817 	struct iocblk *iocp;
1818 	int err = 0;
1819 	int cmd;
1820 	int need_privilege = 1;
1821 	int ret = 0;
1822 
1823 
1824 	iocp = (struct iocblk *)mp->b_rptr;
1825 	iocp->ioc_error = 0;
1826 	cmd = iocp->ioc_cmd;
1827 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1828 	switch (cmd) {
1829 	case ND_GET:
1830 		need_privilege = 0;
1831 		/* FALLTHRU */
1832 	case ND_SET:
1833 		break;
1834 	default:
1835 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1836 		miocnak(wq, mp, 0, EINVAL);
1837 		return;
1838 	}
1839 
1840 	if (need_privilege) {
1841 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1842 		if (err != 0) {
1843 			xge_debug_ll(XGE_ERR,
1844 			    "drv_priv(): rejected cmd 0x%x, err %d",
1845 			    cmd, err);
1846 			miocnak(wq, mp, 0, err);
1847 			return;
1848 		}
1849 	}
1850 
1851 	switch (cmd) {
1852 	case ND_GET:
1853 		/*
1854 		 * If nd_getset() returns B_FALSE, the command was
1855 		 * not valid (e.g. unknown name), so we just tell the
1856 		 * top-level ioctl code to send a NAK (with code EINVAL).
1857 		 *
1858 		 * Otherwise, nd_getset() will have built the reply to
1859 		 * be sent (but not actually sent it), so we tell the
1860 		 * caller to send the prepared reply.
1861 		 */
1862 		ret = nd_getset(wq, lldev->ndp, mp);
1863 		xge_debug_ll(XGE_TRACE, "got ndd get ioctl");
1864 		break;
1865 
1866 	case ND_SET:
1867 		ret = nd_getset(wq, lldev->ndp, mp);
1868 		xge_debug_ll(XGE_TRACE, "got ndd set ioctl");
1869 		break;
1870 
1871 	default:
1872 		break;
1873 	}
1874 
1875 	if (ret == B_FALSE) {
1876 		xge_debug_ll(XGE_ERR,
1877 		    "nd_getset(): rejected cmd 0x%x, err %d",
1878 		    cmd, err);
1879 		miocnak(wq, mp, 0, EINVAL);
1880 	} else {
1881 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1882 		    M_IOCACK : M_IOCNAK;
1883 		qreply(wq, mp);
1884 	}
1885 }
1886 
1887 /* ARGSUSED */
1888 static boolean_t
1889 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1890 {
1891 	xgelldev_t *lldev = arg;
1892 
1893 	switch (cap) {
1894 	case MAC_CAPAB_HCKSUM: {
1895 		uint32_t *hcksum_txflags = cap_data;
1896 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
1897 		    HCKSUM_IPHDRCKSUM;
1898 		break;
1899 	}
1900 	case MAC_CAPAB_LSO: {
1901 		mac_capab_lso_t *cap_lso = cap_data;
1902 
1903 		if (lldev->config.lso_enable) {
1904 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1905 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
1906 			break;
1907 		} else {
1908 			return (B_FALSE);
1909 		}
1910 	}
1911 	default:
1912 		return (B_FALSE);
1913 	}
1914 	return (B_TRUE);
1915 }
1916 
1917 static int
1918 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
1919 {
1920 	xgelldev_t *lldev = (xgelldev_t *)cp;
1921 	xge_hal_status_e status;
1922 	int count = 0, retsize;
1923 	char *buf;
1924 
1925 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
1926 	if (buf == NULL) {
1927 		return (ENOSPC);
1928 	}
1929 
1930 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
1931 	    buf, &retsize);
1932 	if (status != XGE_HAL_OK) {
1933 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1934 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
1935 		return (EINVAL);
1936 	}
1937 	count += retsize;
1938 
1939 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
1940 	    XGELL_STATS_BUFSIZE - count,
1941 	    buf+count, &retsize);
1942 	if (status != XGE_HAL_OK) {
1943 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1944 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
1945 		return (EINVAL);
1946 	}
1947 	count += retsize;
1948 
1949 	status = xge_hal_aux_stats_pci_read(lldev->devh,
1950 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1951 	if (status != XGE_HAL_OK) {
1952 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1953 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
1954 		return (EINVAL);
1955 	}
1956 	count += retsize;
1957 
1958 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
1959 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1960 	if (status != XGE_HAL_OK) {
1961 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1962 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
1963 		return (EINVAL);
1964 	}
1965 	count += retsize;
1966 
1967 	status = xge_hal_aux_stats_hal_read(lldev->devh,
1968 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1969 	if (status != XGE_HAL_OK) {
1970 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1971 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
1972 		return (EINVAL);
1973 	}
1974 	count += retsize;
1975 
1976 	*(buf + count - 1) = '\0'; /* remove last '\n' */
1977 	(void) mi_mpprintf(mp, "%s", buf);
1978 	kmem_free(buf, XGELL_STATS_BUFSIZE);
1979 
1980 	return (0);
1981 }
1982 
1983 static int
1984 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
1985 {
1986 	xgelldev_t *lldev = (xgelldev_t *)cp;
1987 	xge_hal_status_e status;
1988 	int retsize;
1989 	char *buf;
1990 
1991 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
1992 	if (buf == NULL) {
1993 		return (ENOSPC);
1994 	}
1995 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
1996 	    buf, &retsize);
1997 	if (status != XGE_HAL_OK) {
1998 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
1999 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2000 		return (EINVAL);
2001 	}
2002 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2003 	(void) mi_mpprintf(mp, "%s", buf);
2004 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2005 
2006 	return (0);
2007 }
2008 
2009 static int
2010 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2011 {
2012 	xgelldev_t *lldev = (xgelldev_t *)cp;
2013 	xge_hal_status_e status;
2014 	int retsize;
2015 	char *buf;
2016 
2017 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2018 	if (buf == NULL) {
2019 		return (ENOSPC);
2020 	}
2021 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2022 	    buf, &retsize);
2023 	if (status != XGE_HAL_OK) {
2024 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2025 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2026 		return (EINVAL);
2027 	}
2028 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2029 	(void) mi_mpprintf(mp, "%s", buf);
2030 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2031 
2032 	return (0);
2033 }
2034 
2035 static unsigned long bar0_offset = 0x110; /* adapter_control */
2036 
2037 static int
2038 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2039 {
2040 	xgelldev_t *lldev = (xgelldev_t *)cp;
2041 	xge_hal_status_e status;
2042 	int retsize;
2043 	char *buf;
2044 
2045 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2046 	if (buf == NULL) {
2047 		return (ENOSPC);
2048 	}
2049 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2050 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2051 	if (status != XGE_HAL_OK) {
2052 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2053 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2054 		return (EINVAL);
2055 	}
2056 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2057 	(void) mi_mpprintf(mp, "%s", buf);
2058 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2059 
2060 	return (0);
2061 }
2062 
2063 static int
2064 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2065 {
2066 	unsigned long old_offset = bar0_offset;
2067 	char *end;
2068 
2069 	if (value && *value == '0' &&
2070 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2071 		value += 2;
2072 	}
2073 
2074 	bar0_offset = mi_strtol(value, &end, 16);
2075 	if (end == value) {
2076 		bar0_offset = old_offset;
2077 		return (EINVAL);
2078 	}
2079 
2080 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2081 
2082 	return (0);
2083 }
2084 
2085 static int
2086 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2087 {
2088 	char *buf;
2089 
2090 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2091 	if (buf == NULL) {
2092 		return (ENOSPC);
2093 	}
2094 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2095 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2096 
2097 	return (0);
2098 }
2099 
2100 static int
2101 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2102     cred_t *credp)
2103 {
2104 	int level;
2105 	char *end;
2106 
2107 	level = mi_strtol(value, &end, 10);
2108 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2109 		return (EINVAL);
2110 	}
2111 
2112 	xge_hal_driver_debug_level_set(level);
2113 
2114 	return (0);
2115 }
2116 
2117 static int
2118 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2119 {
2120 	char *buf;
2121 
2122 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2123 	if (buf == NULL) {
2124 		return (ENOSPC);
2125 	}
2126 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2127 	    xge_hal_driver_debug_module_mask());
2128 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2129 
2130 	return (0);
2131 }
2132 
2133 static int
2134 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2135 			    cred_t *credp)
2136 {
2137 	u32 mask;
2138 	char *end;
2139 
2140 	if (value && *value == '0' &&
2141 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2142 		value += 2;
2143 	}
2144 
2145 	mask = mi_strtol(value, &end, 16);
2146 	if (end == value) {
2147 		return (EINVAL);
2148 	}
2149 
2150 	xge_hal_driver_debug_module_mask_set(mask);
2151 
2152 	return (0);
2153 }
2154 
2155 static int
2156 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2157 {
2158 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2159 	xge_hal_status_e status;
2160 	int retsize;
2161 	char *buf;
2162 
2163 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2164 	if (buf == NULL) {
2165 		return (ENOSPC);
2166 	}
2167 	status = xge_hal_aux_device_config_read(lldev->devh,
2168 						XGELL_DEVCONF_BUFSIZE,
2169 						buf, &retsize);
2170 	if (status != XGE_HAL_OK) {
2171 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2172 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2173 		    status);
2174 		return (EINVAL);
2175 	}
2176 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2177 	(void) mi_mpprintf(mp, "%s", buf);
2178 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2179 
2180 	return (0);
2181 }
2182 
2183 /*
2184  * xgell_device_register
2185  * @devh: pointer on HAL device
2186  * @config: pointer on this network device configuration
2187  * @ll_out: output pointer. Will be assigned to valid LL device.
2188  *
2189  * This function will allocate and register network device
2190  */
2191 int
2192 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2193 {
2194 	mac_register_t *macp = NULL;
2195 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2196 
2197 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2198 	    (caddr_t)lldev) == B_FALSE)
2199 		goto xgell_ndd_fail;
2200 
2201 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2202 	    (caddr_t)lldev) == B_FALSE)
2203 		goto xgell_ndd_fail;
2204 
2205 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2206 	    (caddr_t)lldev) == B_FALSE)
2207 		goto xgell_ndd_fail;
2208 
2209 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2210 	    (caddr_t)lldev) == B_FALSE)
2211 		goto xgell_ndd_fail;
2212 
2213 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2214 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2215 		goto xgell_ndd_fail;
2216 
2217 	if (nd_load(&lldev->ndp, "debug_module_mask",
2218 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2219 	    (caddr_t)lldev) == B_FALSE)
2220 		goto xgell_ndd_fail;
2221 
2222 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2223 	    (caddr_t)lldev) == B_FALSE)
2224 		goto xgell_ndd_fail;
2225 
2226 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2227 
2228 	if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) {
2229 		nd_free(&lldev->ndp);
2230 		xge_debug_ll(XGE_ERR, "unable to create RX buffer pool");
2231 		return (DDI_FAILURE);
2232 	}
2233 
2234 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh);
2235 
2236 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2237 		goto xgell_register_fail;
2238 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2239 	macp->m_driver = lldev;
2240 	macp->m_dip = lldev->dev_info;
2241 	macp->m_src_addr = hldev->macaddr[0];
2242 	macp->m_callbacks = &xgell_m_callbacks;
2243 	macp->m_min_sdu = 0;
2244 	macp->m_max_sdu = hldev->config.mtu;
2245 	/*
2246 	 * Finally, we're ready to register ourselves with the Nemo
2247 	 * interface; if this succeeds, we're all ready to start()
2248 	 */
2249 
2250 	if (mac_register(macp, &lldev->mh) != 0)
2251 		goto xgell_register_fail;
2252 
2253 	/* Always free the macp after register */
2254 	if (macp != NULL)
2255 		mac_free(macp);
2256 
2257 	/* Calculate tx_copied_max here ??? */
2258 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2259 		hldev->config.fifo.alignment_size *
2260 		hldev->config.fifo.max_aligned_frags;
2261 
2262 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2263 	    XGELL_IFNAME, lldev->instance);
2264 
2265 	return (DDI_SUCCESS);
2266 
2267 xgell_ndd_fail:
2268 	nd_free(&lldev->ndp);
2269 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2270 	return (DDI_FAILURE);
2271 
2272 xgell_register_fail:
2273 	if (macp != NULL)
2274 		mac_free(macp);
2275 	nd_free(&lldev->ndp);
2276 	mutex_destroy(&lldev->genlock);
2277 	/* Ignore return value, since RX not start */
2278 	(void) xgell_rx_destroy_buffer_pool(lldev);
2279 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2280 	return (DDI_FAILURE);
2281 }
2282 
2283 /*
2284  * xgell_device_unregister
2285  * @devh: pointer on HAL device
2286  * @lldev: pointer to valid LL device.
2287  *
2288  * This function will unregister and free network device
2289  */
2290 int
2291 xgell_device_unregister(xgelldev_t *lldev)
2292 {
2293 	/*
2294 	 * Destroy RX buffer pool.
2295 	 */
2296 	if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) {
2297 		return (DDI_FAILURE);
2298 	}
2299 
2300 	if (mac_unregister(lldev->mh) != 0) {
2301 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2302 		    XGELL_IFNAME, lldev->instance);
2303 		return (DDI_FAILURE);
2304 	}
2305 
2306 	mutex_destroy(&lldev->genlock);
2307 
2308 	nd_free(&lldev->ndp);
2309 
2310 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2311 	    XGELL_IFNAME, lldev->instance);
2312 
2313 	return (DDI_SUCCESS);
2314 }
2315