xref: /titanic_44/usr/src/uts/common/io/xge/drv/xgell.c (revision f808c858fa61e7769218966759510a8b1190dfcf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *  Copyright (c) 2002-2005 Neterion, Inc.
31  *  All right Reserved.
32  *
33  *  FileName :    xgell.c
34  *
35  *  Description:  Xge Link Layer data path implementation
36  *
37  */
38 
39 #include "xgell.h"
40 
41 #include <netinet/ip.h>
42 #include <netinet/tcp.h>
43 
44 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
45     sizeof (struct ether_vlan_header))
46 
47 #define	HEADROOM		2	/* for DIX-only packets */
48 
49 #ifdef XGELL_L3_ALIGNED
50 void header_free_func(void *arg) { }
51 frtn_t header_frtn = {header_free_func, NULL};
52 #endif
53 
54 /* DMA attributes used for Tx side */
55 static struct ddi_dma_attr tx_dma_attr = {
56 	DMA_ATTR_V0,			/* dma_attr_version */
57 	0x0ULL,				/* dma_attr_addr_lo */
58 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
59 	0xFFFFFFFFULL,			/* dma_attr_count_max */
60 	0x1ULL,				/* dma_attr_align */
61 	0xFFF,				/* dma_attr_burstsizes */
62 	1,				/* dma_attr_minxfer */
63 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
64 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
65 	4,				/* dma_attr_sgllen */
66 	1,				/* dma_attr_granular */
67 	0				/* dma_attr_flags */
68 };
69 
70 /* Aligned DMA attributes used for Tx side */
71 struct ddi_dma_attr tx_dma_attr_align = {
72 	DMA_ATTR_V0,			/* dma_attr_version */
73 	0x0ULL,				/* dma_attr_addr_lo */
74 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
75 	0xFFFFFFFFULL,			/* dma_attr_count_max */
76 	4096,				/* dma_attr_align */
77 	0xFFF,				/* dma_attr_burstsizes */
78 	1,				/* dma_attr_minxfer */
79 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
80 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
81 	4,				/* dma_attr_sgllen */
82 	1,				/* dma_attr_granular */
83 	0				/* dma_attr_flags */
84 };
85 
86 /*
87  * DMA attributes used when using ddi_dma_mem_alloc to
88  * allocat HAL descriptors and Rx buffers during replenish
89  */
90 static struct ddi_dma_attr hal_dma_attr = {
91 	DMA_ATTR_V0,			/* dma_attr_version */
92 	0x0ULL,				/* dma_attr_addr_lo */
93 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
94 	0xFFFFFFFFULL,			/* dma_attr_count_max */
95 	0x1ULL,				/* dma_attr_align */
96 	0xFFF,				/* dma_attr_burstsizes */
97 	1,				/* dma_attr_minxfer */
98 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
99 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
100 	1,				/* dma_attr_sgllen */
101 	1,				/* dma_attr_granular */
102 	0				/* dma_attr_flags */
103 };
104 
105 /*
106  * Aligned DMA attributes used when using ddi_dma_mem_alloc to
107  * allocat HAL descriptors and Rx buffers during replenish
108  */
109 struct ddi_dma_attr hal_dma_attr_aligned = {
110 	DMA_ATTR_V0,			/* dma_attr_version */
111 	0x0ULL,				/* dma_attr_addr_lo */
112 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
113 	0xFFFFFFFFULL,			/* dma_attr_count_max */
114 	4096,				/* dma_attr_align */
115 	0xFFF,				/* dma_attr_burstsizes */
116 	1,				/* dma_attr_minxfer */
117 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
118 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
119 	1,				/* dma_attr_sgllen */
120 	1,				/* dma_attr_granular */
121 	0				/* dma_attr_flags */
122 };
123 
124 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
125 struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned;
126 
127 static int		xgell_m_stat(void *, uint_t, uint64_t *);
128 static int		xgell_m_start(void *);
129 static void		xgell_m_stop(void *);
130 static int		xgell_m_promisc(void *, boolean_t);
131 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
132 static int		xgell_m_unicst(void *, const uint8_t *);
133 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
134 static mblk_t 		*xgell_m_tx(void *, mblk_t *);
135 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
136 
137 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
138 
139 static mac_callbacks_t xgell_m_callbacks = {
140 	XGELL_M_CALLBACK_FLAGS,
141 	xgell_m_stat,
142 	xgell_m_start,
143 	xgell_m_stop,
144 	xgell_m_promisc,
145 	xgell_m_multicst,
146 	xgell_m_unicst,
147 	xgell_m_tx,
148 	NULL,
149 	xgell_m_ioctl,
150 	xgell_m_getcapab
151 };
152 
153 /*
154  * xge_device_poll
155  *
156  * Cyclic should call me every 1s. xge_callback_event_queued should call me
157  * when HAL hope event was rescheduled.
158  */
159 /*ARGSUSED*/
160 void
161 xge_device_poll(void *data)
162 {
163 	xgelldev_t *lldev = xge_hal_device_private(data);
164 
165 	mutex_enter(&lldev->genlock);
166 	if (lldev->is_initialized) {
167 		xge_hal_device_poll(data);
168 		lldev->timeout_id = timeout(xge_device_poll, data,
169 		    XGE_DEV_POLL_TICKS);
170 	}
171 	mutex_exit(&lldev->genlock);
172 }
173 
174 /*
175  * xge_device_poll_now
176  *
177  * Will call xge_device_poll() immediately
178  */
179 void
180 xge_device_poll_now(void *data)
181 {
182 	xgelldev_t *lldev = xge_hal_device_private(data);
183 
184 	mutex_enter(&lldev->genlock);
185 	(void) untimeout(lldev->timeout_id);
186 	lldev->timeout_id = timeout(xge_device_poll, data, 0);
187 	mutex_exit(&lldev->genlock);
188 
189 }
190 
191 /*
192  * xgell_callback_link_up
193  *
194  * This function called by HAL to notify HW link up state change.
195  */
196 void
197 xgell_callback_link_up(void *userdata)
198 {
199 	xgelldev_t *lldev = (xgelldev_t *)userdata;
200 
201 	mac_link_update(lldev->mh, LINK_STATE_UP);
202 	/* Link states should be reported to user whenever it changes */
203 	cmn_err(CE_NOTE, "!%s%d: Link is up [10 Gbps Full Duplex]",
204 	    XGELL_IFNAME, lldev->instance);
205 }
206 
207 /*
208  * xgell_callback_link_down
209  *
210  * This function called by HAL to notify HW link down state change.
211  */
212 void
213 xgell_callback_link_down(void *userdata)
214 {
215 	xgelldev_t *lldev = (xgelldev_t *)userdata;
216 
217 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
218 	/* Link states should be reported to user whenever it changes */
219 	cmn_err(CE_NOTE, "!%s%d: Link is down", XGELL_IFNAME,
220 	    lldev->instance);
221 }
222 
223 /*
224  * xgell_rx_buffer_replenish_all
225  *
226  * To replenish all freed dtr(s) with buffers in free pool. It's called by
227  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
228  * Must be called with pool_lock held.
229  */
230 static void
231 xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
232 {
233 	xge_hal_dtr_h dtr;
234 	xgell_rx_buffer_t *rx_buffer;
235 	xgell_rxd_priv_t *rxd_priv;
236 
237 	while ((lldev->bf_pool.free > 0) &&
238 	    (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
239 	    XGE_HAL_OK)) {
240 		rx_buffer = lldev->bf_pool.head;
241 		lldev->bf_pool.head = rx_buffer->next;
242 		lldev->bf_pool.free--;
243 
244 		xge_assert(rx_buffer);
245 		xge_assert(rx_buffer->dma_addr);
246 
247 		rxd_priv = (xgell_rxd_priv_t *)
248 		    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
249 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
250 		    lldev->bf_pool.size);
251 
252 		rxd_priv->rx_buffer = rx_buffer;
253 		xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr);
254 	}
255 }
256 
257 /*
258  * xgell_rx_buffer_release
259  *
260  * The only thing done here is to put the buffer back to the pool.
261  */
262 static void
263 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
264 {
265 	xgelldev_t *lldev = rx_buffer->lldev;
266 
267 	mutex_enter(&lldev->bf_pool.pool_lock);
268 
269 	/* Put the buffer back to pool */
270 	rx_buffer->next = lldev->bf_pool.head;
271 	lldev->bf_pool.head = rx_buffer;
272 
273 	lldev->bf_pool.free++;
274 
275 	mutex_exit(&lldev->bf_pool.pool_lock);
276 }
277 
278 /*
279  * xgell_rx_buffer_recycle
280  *
281  * Called by desballoc() to "free" the resource.
282  * We will try to replenish all descripters.
283  */
284 static void
285 xgell_rx_buffer_recycle(char *arg)
286 {
287 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
288 	xgelldev_t *lldev = rx_buffer->lldev;
289 
290 	xgell_rx_buffer_release(rx_buffer);
291 
292 	mutex_enter(&lldev->bf_pool.pool_lock);
293 	lldev->bf_pool.post--;
294 
295 	/*
296 	 * Before finding a good way to set this hiwat, just always call to
297 	 * replenish_all. *TODO*
298 	 */
299 	if (lldev->is_initialized != 0) {
300 		xgell_rx_buffer_replenish_all(lldev);
301 	}
302 
303 	mutex_exit(&lldev->bf_pool.pool_lock);
304 }
305 
306 /*
307  * xgell_rx_buffer_alloc
308  *
309  * Allocate one rx buffer and return with the pointer to the buffer.
310  * Return NULL if failed.
311  */
312 static xgell_rx_buffer_t *
313 xgell_rx_buffer_alloc(xgelldev_t *lldev)
314 {
315 	xge_hal_device_t *hldev;
316 	void *vaddr;
317 	ddi_dma_handle_t dma_handle;
318 	ddi_acc_handle_t dma_acch;
319 	dma_addr_t dma_addr;
320 	uint_t ncookies;
321 	ddi_dma_cookie_t dma_cookie;
322 	size_t real_size;
323 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
324 	xgell_rx_buffer_t *rx_buffer;
325 
326 	hldev = lldev->devh;
327 
328 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
329 	    0, &dma_handle) != DDI_SUCCESS) {
330 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
331 		    XGELL_IFNAME, lldev->instance);
332 		goto handle_failed;
333 	}
334 
335 	/* reserve some space at the end of the buffer for recycling */
336 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size +
337 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
338 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
339 	    DDI_SUCCESS) {
340 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
341 		    XGELL_IFNAME, lldev->instance);
342 		goto mem_failed;
343 	}
344 
345 	if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) >
346 	    real_size) {
347 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
348 		    XGELL_IFNAME, lldev->instance);
349 		goto bind_failed;
350 	}
351 
352 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
353 	    lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
354 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
355 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
356 		    XGELL_IFNAME, lldev->instance);
357 		goto bind_failed;
358 	}
359 
360 	if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) {
361 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
362 		    XGELL_IFNAME, lldev->instance);
363 		goto check_failed;
364 	}
365 
366 	dma_addr = dma_cookie.dmac_laddress;
367 
368 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
369 	    sizeof (xgell_rx_buffer_t));
370 	rx_buffer->next = NULL;
371 	rx_buffer->vaddr = vaddr;
372 	rx_buffer->dma_addr = dma_addr;
373 	rx_buffer->dma_handle = dma_handle;
374 	rx_buffer->dma_acch = dma_acch;
375 	rx_buffer->lldev = lldev;
376 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
377 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
378 
379 	return (rx_buffer);
380 
381 check_failed:
382 	(void) ddi_dma_unbind_handle(dma_handle);
383 bind_failed:
384 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
385 	ddi_dma_mem_free(&dma_acch);
386 mem_failed:
387 	ddi_dma_free_handle(&dma_handle);
388 handle_failed:
389 
390 	return (NULL);
391 }
392 
393 /*
394  * xgell_rx_destroy_buffer_pool
395  *
396  * Destroy buffer pool. If there is still any buffer hold by upper layer,
397  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
398  */
399 static int
400 xgell_rx_destroy_buffer_pool(xgelldev_t *lldev)
401 {
402 	xgell_rx_buffer_t *rx_buffer;
403 	ddi_dma_handle_t  dma_handle;
404 	ddi_acc_handle_t  dma_acch;
405 	int i;
406 
407 	/*
408 	 * If there is any posted buffer, the driver should reject to be
409 	 * detached. Need notice upper layer to release them.
410 	 */
411 	if (lldev->bf_pool.post != 0) {
412 		xge_debug_ll(XGE_ERR,
413 		    "%s%d has some buffers not be recycled, try later!",
414 		    XGELL_IFNAME, lldev->instance);
415 		return (DDI_FAILURE);
416 	}
417 
418 	/*
419 	 * Relase buffers one by one.
420 	 */
421 	for (i = lldev->bf_pool.total; i > 0; i--) {
422 		rx_buffer = lldev->bf_pool.head;
423 		xge_assert(rx_buffer != NULL);
424 
425 		lldev->bf_pool.head = rx_buffer->next;
426 
427 		dma_handle = rx_buffer->dma_handle;
428 		dma_acch = rx_buffer->dma_acch;
429 
430 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
431 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
432 			lldev->bf_pool.head = rx_buffer;
433 			return (DDI_FAILURE);
434 		}
435 		ddi_dma_mem_free(&dma_acch);
436 		ddi_dma_free_handle(&dma_handle);
437 
438 		lldev->bf_pool.total--;
439 		lldev->bf_pool.free--;
440 	}
441 
442 	mutex_destroy(&lldev->bf_pool.pool_lock);
443 	return (DDI_SUCCESS);
444 }
445 
446 /*
447  * xgell_rx_create_buffer_pool
448  *
449  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
450  */
451 static int
452 xgell_rx_create_buffer_pool(xgelldev_t *lldev)
453 {
454 	xge_hal_device_t *hldev;
455 	xgell_rx_buffer_t *rx_buffer;
456 	int i;
457 
458 	hldev = (xge_hal_device_t *)lldev->devh;
459 
460 	lldev->bf_pool.total = 0;
461 	lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev);
462 	lldev->bf_pool.head = NULL;
463 	lldev->bf_pool.free = 0;
464 	lldev->bf_pool.post = 0;
465 	lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
466 	lldev->bf_pool.recycle_hiwat = lldev->config.rx_buffer_recycle_hiwat;
467 
468 	mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
469 	    hldev->irqh);
470 
471 	/*
472 	 * Allocate buffers one by one. If failed, destroy whole pool by
473 	 * call to xgell_rx_destroy_buffer_pool().
474 	 */
475 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
476 		if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) {
477 			(void) xgell_rx_destroy_buffer_pool(lldev);
478 			return (DDI_FAILURE);
479 		}
480 
481 		rx_buffer->next = lldev->bf_pool.head;
482 		lldev->bf_pool.head = rx_buffer;
483 
484 		lldev->bf_pool.total++;
485 		lldev->bf_pool.free++;
486 	}
487 
488 	return (DDI_SUCCESS);
489 }
490 
491 /*
492  * xgell_rx_dtr_replenish
493  *
494  * Replenish descriptor with rx_buffer in RX buffer pool.
495  * The dtr should be post right away.
496  */
497 xge_hal_status_e
498 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
499     void *userdata, xge_hal_channel_reopen_e reopen)
500 {
501 	xgell_ring_t *ring = userdata;
502 	xgelldev_t *lldev = ring->lldev;
503 	xgell_rx_buffer_t *rx_buffer;
504 	xgell_rxd_priv_t *rxd_priv;
505 
506 	if (lldev->bf_pool.head == NULL) {
507 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
508 		return (XGE_HAL_FAIL);
509 	}
510 	rx_buffer = lldev->bf_pool.head;
511 	lldev->bf_pool.head = rx_buffer->next;
512 	lldev->bf_pool.free--;
513 
514 	xge_assert(rx_buffer);
515 	xge_assert(rx_buffer->dma_addr);
516 
517 	rxd_priv = (xgell_rxd_priv_t *)
518 	    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
519 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size);
520 
521 	rxd_priv->rx_buffer = rx_buffer;
522 
523 	return (XGE_HAL_OK);
524 }
525 
526 /*
527  * xgell_get_ip_offset
528  *
529  * Calculate the offset to IP header.
530  */
531 static inline int
532 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
533 {
534 	int ip_off;
535 
536 	/* get IP-header offset */
537 	switch (ext_info->frame) {
538 	case XGE_HAL_FRAME_TYPE_DIX:
539 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
540 		break;
541 	case XGE_HAL_FRAME_TYPE_IPX:
542 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
543 		    XGE_HAL_HEADER_802_2_SIZE +
544 		    XGE_HAL_HEADER_SNAP_SIZE);
545 		break;
546 	case XGE_HAL_FRAME_TYPE_LLC:
547 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
548 		    XGE_HAL_HEADER_802_2_SIZE);
549 		break;
550 	case XGE_HAL_FRAME_TYPE_SNAP:
551 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
552 		    XGE_HAL_HEADER_SNAP_SIZE);
553 		break;
554 	default:
555 		ip_off = 0;
556 		break;
557 	}
558 
559 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
560 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
561 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
562 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
563 	}
564 
565 	return (ip_off);
566 }
567 
568 /*
569  * xgell_rx_hcksum_assoc
570  *
571  * Judge the packet type and then call to hcksum_assoc() to associate
572  * h/w checksum information.
573  */
574 static inline void
575 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
576     xge_hal_dtr_info_t *ext_info)
577 {
578 	int cksum_flags = 0;
579 	int ip_off;
580 
581 	ip_off = xgell_get_ip_offset(ext_info);
582 
583 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
584 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
585 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
586 				cksum_flags |= HCK_IPV4_HDRCKSUM;
587 			}
588 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
589 				cksum_flags |= HCK_FULLCKSUM_OK;
590 			}
591 			if (cksum_flags) {
592 				cksum_flags |= HCK_FULLCKSUM;
593 				(void) hcksum_assoc(mp, NULL, NULL, 0,
594 				    0, 0, 0, cksum_flags, 0);
595 			}
596 		}
597 	} else if (ext_info->proto &
598 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
599 		/*
600 		 * Just pass the partial cksum up to IP.
601 		 */
602 		int start, end = pkt_length - ip_off;
603 
604 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
605 			struct ip *ip =
606 			    (struct ip *)(vaddr + ip_off);
607 			start = ip->ip_hl * 4 + ip_off;
608 		} else {
609 			start = ip_off + 40;
610 		}
611 		cksum_flags |= HCK_PARTIALCKSUM;
612 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
613 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
614 		    0);
615 	}
616 }
617 
618 /*
619  * xgell_rx_1b_msg_alloc
620  *
621  * Allocate message header for data buffer, and decide if copy the packet to
622  * new data buffer to release big rx_buffer to save memory.
623  *
624  * If the pkt_length <= XGELL_DMA_BUFFER_SIZE_LOWAT, call allocb() to allocate
625  * new message and copy the payload in.
626  */
627 static mblk_t *
628 xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
629     xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
630 {
631 	mblk_t *mp;
632 	mblk_t *nmp = NULL;
633 	char *vaddr;
634 	int hdr_length = 0;
635 
636 #ifdef XGELL_L3_ALIGNED
637 	int doalign = 1;
638 	struct ip *ip;
639 	struct tcphdr *tcp;
640 	int tcp_off;
641 	int mp_align_len;
642 	int ip_off;
643 
644 #endif
645 
646 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
647 #ifdef XGELL_L3_ALIGNED
648 	ip_off = xgell_get_ip_offset(ext_info);
649 
650 	/* Check ip_off with HEADROOM */
651 	if ((ip_off & 3) == HEADROOM) {
652 		doalign = 0;
653 	}
654 
655 	/*
656 	 * Doalign? Check for types of packets.
657 	 */
658 	/* Is IPv4 or IPv6? */
659 	if (doalign && !(ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
660 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6)) {
661 		doalign = 0;
662 	}
663 
664 	/* Is TCP? */
665 	if (doalign &&
666 	    ((ip = (struct ip *)(vaddr + ip_off))->ip_p == IPPROTO_TCP)) {
667 		tcp_off = ip->ip_hl * 4 + ip_off;
668 		tcp = (struct tcphdr *)(vaddr + tcp_off);
669 		hdr_length = tcp_off + tcp->th_off * 4;
670 		if (pkt_length < (XGE_HAL_TCPIP_HEADER_MAX_SIZE +
671 		    XGE_HAL_MAC_HEADER_MAX_SIZE)) {
672 			hdr_length = pkt_length;
673 		}
674 	} else {
675 		doalign = 0;
676 	}
677 #endif
678 
679 	/*
680 	 * Copy packet into new allocated message buffer, if pkt_length
681 	 * is less than XGELL_DMA_BUFFER_LOWAT
682 	 */
683 	if (*copyit || pkt_length <= XGELL_DMA_BUFFER_SIZE_LOWAT) {
684 		/* Keep room for alignment */
685 		if ((mp = allocb(pkt_length + HEADROOM + 4, 0)) == NULL) {
686 			return (NULL);
687 		}
688 #ifdef XGELL_L3_ALIGNED
689 		if (doalign) {
690 			mp_align_len =
691 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
692 			mp->b_rptr += mp_align_len;
693 		}
694 #endif
695 		bcopy(vaddr, mp->b_rptr, pkt_length);
696 		mp->b_wptr = mp->b_rptr + pkt_length;
697 		*copyit = B_TRUE;
698 		return (mp);
699 	}
700 
701 	/*
702 	 * Just allocate mblk for current data buffer
703 	 */
704 	if ((nmp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
705 	    &rx_buffer->frtn)) == NULL) {
706 		/* Drop it */
707 		return (NULL);
708 	}
709 
710 	/*
711 	 * Adjust the b_rptr/b_wptr in the mblk_t structure to point to
712 	 * payload.
713 	 */
714 	nmp->b_rptr += hdr_length;
715 	nmp->b_wptr += pkt_length;
716 
717 #ifdef XGELL_L3_ALIGNED
718 	if (doalign) {
719 		if ((mp = esballoc(rx_buffer->header, hdr_length + 4, 0,
720 		    &header_frtn)) == NULL) {
721 			/* can not align! */
722 			mp = nmp;
723 			mp->b_rptr = (u8 *)vaddr;
724 			mp->b_wptr = mp->b_rptr + pkt_length;
725 			mp->b_next = NULL;
726 			mp->b_cont = NULL;
727 		} else {
728 			/* align packet's ip-header offset */
729 			mp_align_len =
730 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
731 			mp->b_rptr += mp_align_len;
732 			mp->b_wptr += mp_align_len + hdr_length;
733 			mp->b_cont = nmp;
734 			mp->b_next = NULL;
735 			nmp->b_cont = NULL;
736 			nmp->b_next = NULL;
737 
738 			bcopy(vaddr, mp->b_rptr, hdr_length);
739 		}
740 	} else {
741 		/* no need to align */
742 		mp = nmp;
743 		mp->b_next = NULL;
744 		mp->b_cont = NULL;
745 	}
746 #else
747 	mp = nmp;
748 	mp->b_next = NULL;
749 	mp->b_cont = NULL;
750 #endif
751 
752 	return (mp);
753 }
754 
755 /*
756  * xgell_rx_1b_compl
757  *
758  * If the interrupt is because of a received frame or if the receive ring
759  * contains fresh as yet un-processed frames, this function is called.
760  */
761 static xge_hal_status_e
762 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
763     void *userdata)
764 {
765 	xgelldev_t *lldev = ((xgell_ring_t *)userdata)->lldev;
766 	xgell_rx_buffer_t *rx_buffer;
767 	mblk_t *mp_head = NULL;
768 	mblk_t *mp_end  = NULL;
769 
770 	do {
771 		int ret;
772 		int pkt_length;
773 		dma_addr_t dma_data;
774 		mblk_t *mp;
775 
776 		boolean_t copyit = B_FALSE;
777 
778 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
779 		    xge_hal_ring_dtr_private(channelh, dtr));
780 		xge_hal_dtr_info_t ext_info;
781 
782 		rx_buffer = rxd_priv->rx_buffer;
783 
784 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
785 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
786 
787 		xge_assert(dma_data == rx_buffer->dma_addr);
788 
789 		if (t_code != 0) {
790 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
791 			    " completed due to error t_code %01x", XGELL_IFNAME,
792 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
793 
794 			(void) xge_hal_device_handle_tcode(channelh, dtr,
795 			    t_code);
796 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
797 			xgell_rx_buffer_release(rx_buffer);
798 			continue;
799 		}
800 
801 		/*
802 		 * Sync the DMA memory
803 		 */
804 		ret = ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
805 		    DDI_DMA_SYNC_FORKERNEL);
806 		if (ret != DDI_SUCCESS) {
807 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
808 			    XGELL_IFNAME, lldev->instance);
809 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
810 			xgell_rx_buffer_release(rx_buffer);
811 			continue;
812 		}
813 
814 		/*
815 		 * Allocate message for the packet.
816 		 */
817 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
818 			copyit = B_TRUE;
819 		} else {
820 			copyit = B_FALSE;
821 		}
822 
823 		mp = xgell_rx_1b_msg_alloc(rx_buffer, pkt_length, &ext_info,
824 		    &copyit);
825 
826 		xge_hal_ring_dtr_free(channelh, dtr);
827 
828 		/*
829 		 * Release the buffer and recycle it later
830 		 */
831 		if ((mp == NULL) || copyit) {
832 			xgell_rx_buffer_release(rx_buffer);
833 		} else {
834 			/*
835 			 * Count it since the buffer should be loaned up.
836 			 */
837 			mutex_enter(&lldev->bf_pool.pool_lock);
838 			lldev->bf_pool.post++;
839 			mutex_exit(&lldev->bf_pool.pool_lock);
840 		}
841 		if (mp == NULL) {
842 			xge_debug_ll(XGE_ERR,
843 			    "%s%d: rx: can not allocate mp mblk", XGELL_IFNAME,
844 			    lldev->instance);
845 			continue;
846 		}
847 
848 		/*
849 		 * Associate cksum_flags per packet type and h/w cksum flags.
850 		 */
851 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
852 		    HEADROOM, pkt_length, &ext_info);
853 
854 		if (mp_head == NULL) {
855 			mp_head = mp;
856 			mp_end = mp;
857 		} else {
858 			mp_end->b_next = mp;
859 			mp_end = mp;
860 		}
861 
862 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
863 	    XGE_HAL_OK);
864 
865 	if (mp_head) {
866 		mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
867 	}
868 
869 	/*
870 	 * Always call replenish_all to recycle rx_buffers.
871 	 */
872 	mutex_enter(&lldev->bf_pool.pool_lock);
873 	xgell_rx_buffer_replenish_all(lldev);
874 	mutex_exit(&lldev->bf_pool.pool_lock);
875 
876 	return (XGE_HAL_OK);
877 }
878 
879 /*
880  * xgell_xmit_compl
881  *
882  * If an interrupt was raised to indicate DMA complete of the Tx packet,
883  * this function is called. It identifies the last TxD whose buffer was
884  * freed and frees all skbs whose data have already DMA'ed into the NICs
885  * internal memory.
886  */
887 static xge_hal_status_e
888 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
889     void *userdata)
890 {
891 	xgelldev_t *lldev = userdata;
892 
893 	do {
894 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
895 		    xge_hal_fifo_dtr_private(dtr));
896 		mblk_t *mp = txd_priv->mblk;
897 #if !defined(XGELL_TX_NOMAP_COPY)
898 		int i;
899 #endif
900 
901 		if (t_code) {
902 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
903 			    " completed due to error t_code %01x", XGELL_IFNAME,
904 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
905 
906 			(void) xge_hal_device_handle_tcode(channelh, dtr,
907 			    t_code);
908 		}
909 
910 #if !defined(XGELL_TX_NOMAP_COPY)
911 		for (i = 0; i < txd_priv->handle_cnt; i++) {
912 			xge_assert(txd_priv->dma_handles[i]);
913 			(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
914 			ddi_dma_free_handle(&txd_priv->dma_handles[i]);
915 			txd_priv->dma_handles[i] = 0;
916 		}
917 #endif
918 
919 		xge_hal_fifo_dtr_free(channelh, dtr);
920 
921 		freemsg(mp);
922 		lldev->resched_avail++;
923 
924 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
925 	    XGE_HAL_OK);
926 
927 	if (lldev->resched_retry &&
928 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
929 	    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
930 	    xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
931 		XGELL_IFNAME, lldev->instance,
932 		((xge_hal_channel_t *)lldev->fifo_channel)->post_qid);
933 		lldev->resched_send = lldev->resched_avail;
934 		lldev->resched_retry = 0;
935 	}
936 
937 	return (XGE_HAL_OK);
938 }
939 
940 /*
941  * xgell_send
942  * @hldev: pointer to s2hal_device_t strucutre
943  * @mblk: pointer to network buffer, i.e. mblk_t structure
944  *
945  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
946  * A pointer to an M_DATA message that contains the packet is passed to
947  * this routine.
948  */
949 static boolean_t
950 xgell_send(xge_hal_device_t *hldev, mblk_t *mp)
951 {
952 	mblk_t *bp;
953 	int retry, repeat;
954 	xge_hal_status_e status;
955 	xge_hal_dtr_h dtr;
956 	xgelldev_t *lldev = xge_hal_device_private(hldev);
957 	xgell_txd_priv_t *txd_priv;
958 	uint32_t pflags;
959 #ifndef XGELL_TX_NOMAP_COPY
960 	int handle_cnt, frag_cnt, ret, i;
961 #endif
962 
963 _begin:
964 	retry = repeat = 0;
965 #ifndef XGELL_TX_NOMAP_COPY
966 	handle_cnt = frag_cnt = 0;
967 #endif
968 
969 	if (!lldev->is_initialized || lldev->in_reset)
970 		return (B_FALSE);
971 
972 	/*
973 	 * If the free Tx dtrs count reaches the lower threshold,
974 	 * inform the gld to stop sending more packets till the free
975 	 * dtrs count exceeds higher threshold. Driver informs the
976 	 * gld through gld_sched call, when the free dtrs count exceeds
977 	 * the higher threshold.
978 	 */
979 	if (__hal_channel_dtr_count(lldev->fifo_channel)
980 	    <= XGELL_TX_LEVEL_LOW) {
981 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
982 		    "free descriptors count at low threshold %d",
983 		    XGELL_IFNAME, lldev->instance,
984 		    ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
985 		    XGELL_TX_LEVEL_LOW);
986 		retry = 1;
987 		goto _exit;
988 	}
989 
990 	status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr);
991 	if (status != XGE_HAL_OK) {
992 		switch (status) {
993 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
994 			xge_debug_ll(XGE_ERR,
995 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
996 			    lldev->instance,
997 			    ((xge_hal_channel_t *)
998 			    lldev->fifo_channel)->post_qid);
999 			retry = 1;
1000 			goto _exit;
1001 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
1002 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
1003 			    " out of descriptors.", XGELL_IFNAME,
1004 			    lldev->instance,
1005 			    ((xge_hal_channel_t *)
1006 			    lldev->fifo_channel)->post_qid);
1007 			retry = 1;
1008 			goto _exit;
1009 		default:
1010 			return (B_FALSE);
1011 		}
1012 	}
1013 
1014 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1015 	txd_priv->mblk = mp;
1016 
1017 	/*
1018 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1019 	 * do insertion.
1020 	 *
1021 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1022 	 * is the other implementation for xge. The driver can simple bcopy()
1023 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1024 	 * automatically, since it's impossible that GLD sends down mp(s) with
1025 	 * splited ether_vlan_header.
1026 	 *
1027 	 * struct ether_vlan_header *evhp;
1028 	 * uint16_t tci;
1029 	 *
1030 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1031 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1032 	 * 	tci = ntohs(evhp->ether_tci);
1033 	 * 	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1034 	 *	    2 * ETHERADDRL);
1035 	 * 	mp->b_rptr += VLAN_TAGSZ;
1036 	 *
1037 	 * 	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1038 	 * }
1039 	 */
1040 
1041 #ifdef XGELL_TX_NOMAP_COPY
1042 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1043 		int mblen;
1044 		xge_hal_status_e rc;
1045 
1046 		/* skip zero-length message blocks */
1047 		mblen = MBLKL(bp);
1048 		if (mblen == 0) {
1049 			continue;
1050 		}
1051 		rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel, dtr,
1052 			bp->b_rptr, mblen);
1053 		xge_assert(rc == XGE_HAL_OK);
1054 	}
1055 	xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr, 0);
1056 #else
1057 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1058 		int mblen;
1059 		uint_t ncookies;
1060 		ddi_dma_cookie_t dma_cookie;
1061 		ddi_dma_handle_t dma_handle;
1062 
1063 		/* skip zero-length message blocks */
1064 		mblen = MBLKL(bp);
1065 		if (mblen == 0) {
1066 			continue;
1067 		}
1068 
1069 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1070 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1071 		if (ret != DDI_SUCCESS) {
1072 			xge_debug_ll(XGE_ERR,
1073 			    "%s%d: can not allocate dma handle",
1074 			    XGELL_IFNAME, lldev->instance);
1075 			goto _exit_cleanup;
1076 		}
1077 
1078 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1079 		    (caddr_t)bp->b_rptr, mblen,
1080 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1081 		    &dma_cookie, &ncookies);
1082 
1083 		switch (ret) {
1084 		case DDI_DMA_MAPPED:
1085 			/* everything's fine */
1086 			break;
1087 
1088 		case DDI_DMA_NORESOURCES:
1089 			xge_debug_ll(XGE_ERR,
1090 			    "%s%d: can not bind dma address",
1091 			    XGELL_IFNAME, lldev->instance);
1092 			ddi_dma_free_handle(&dma_handle);
1093 			goto _exit_cleanup;
1094 
1095 		case DDI_DMA_NOMAPPING:
1096 		case DDI_DMA_INUSE:
1097 		case DDI_DMA_TOOBIG:
1098 		default:
1099 			/* drop packet, don't retry */
1100 			xge_debug_ll(XGE_ERR,
1101 			    "%s%d: can not map message buffer",
1102 			    XGELL_IFNAME, lldev->instance);
1103 			ddi_dma_free_handle(&dma_handle);
1104 			goto _exit_cleanup;
1105 		}
1106 
1107 		if (ncookies + frag_cnt > XGE_HAL_DEFAULT_FIFO_FRAGS) {
1108 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1109 			    "requested c:%d+f:%d", XGELL_IFNAME,
1110 			    lldev->instance, ncookies, frag_cnt);
1111 			(void) ddi_dma_unbind_handle(dma_handle);
1112 			ddi_dma_free_handle(&dma_handle);
1113 			goto _exit_cleanup;
1114 		}
1115 
1116 		/* setup the descriptors for this data buffer */
1117 		while (ncookies) {
1118 			xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr,
1119 			    frag_cnt++, dma_cookie.dmac_laddress,
1120 			    dma_cookie.dmac_size);
1121 			if (--ncookies) {
1122 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1123 			}
1124 
1125 		}
1126 
1127 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1128 
1129 		if (bp->b_cont &&
1130 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1131 		    XGE_HAL_DEFAULT_FIFO_FRAGS)) {
1132 			mblk_t *nmp;
1133 
1134 			xge_debug_ll(XGE_TRACE,
1135 			    "too many FRAGs [%d], pull up them", frag_cnt);
1136 
1137 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1138 				/* Drop packet, don't retry */
1139 				xge_debug_ll(XGE_ERR,
1140 				    "%s%d: can not pullup message buffer",
1141 				    XGELL_IFNAME, lldev->instance);
1142 				goto _exit_cleanup;
1143 			}
1144 			freemsg(bp->b_cont);
1145 			bp->b_cont = nmp;
1146 		}
1147 	}
1148 
1149 	txd_priv->handle_cnt = handle_cnt;
1150 #endif /* XGELL_TX_NOMAP_COPY */
1151 
1152 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags);
1153 	if (pflags & HCK_IPV4_HDRCKSUM) {
1154 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1155 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1156 	}
1157 	if (pflags & HCK_FULLCKSUM) {
1158 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1159 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1160 	}
1161 
1162 	xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr);
1163 
1164 	return (B_TRUE);
1165 
1166 _exit_cleanup:
1167 
1168 #if !defined(XGELL_TX_NOMAP_COPY)
1169 	for (i = 0; i < handle_cnt; i++) {
1170 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1171 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1172 		txd_priv->dma_handles[i] = 0;
1173 	}
1174 #endif
1175 
1176 	xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
1177 
1178 	if (repeat) {
1179 		goto _begin;
1180 	}
1181 
1182 _exit:
1183 	if (retry) {
1184 		if (lldev->resched_avail != lldev->resched_send &&
1185 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1186 		    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
1187 			lldev->resched_send = lldev->resched_avail;
1188 			return (B_FALSE);
1189 		} else {
1190 			lldev->resched_retry = 1;
1191 		}
1192 	}
1193 
1194 	freemsg(mp);
1195 	return (B_TRUE);
1196 }
1197 
1198 /*
1199  * xge_m_tx
1200  * @arg: pointer to the s2hal_device_t structure
1201  * @resid: resource id
1202  * @mp: pointer to the message buffer
1203  *
1204  * Called by MAC Layer to send a chain of packets
1205  */
1206 static mblk_t *
1207 xgell_m_tx(void *arg, mblk_t *mp)
1208 {
1209 	xge_hal_device_t *hldev = arg;
1210 	mblk_t *next;
1211 
1212 	while (mp != NULL) {
1213 		next = mp->b_next;
1214 		mp->b_next = NULL;
1215 
1216 		if (!xgell_send(hldev, mp)) {
1217 			mp->b_next = next;
1218 			break;
1219 		}
1220 		mp = next;
1221 	}
1222 
1223 	return (mp);
1224 }
1225 
1226 /*
1227  * xgell_rx_dtr_term
1228  *
1229  * Function will be called by HAL to terminate all DTRs for
1230  * Ring(s) type of channels.
1231  */
1232 static void
1233 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1234     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1235 {
1236 	xgell_rxd_priv_t *rxd_priv =
1237 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1238 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1239 
1240 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1241 		xge_hal_ring_dtr_free(channelh, dtrh);
1242 		xgell_rx_buffer_release(rx_buffer);
1243 	}
1244 }
1245 
1246 /*
1247  * xgell_tx_term
1248  *
1249  * Function will be called by HAL to terminate all DTRs for
1250  * Fifo(s) type of channels.
1251  */
1252 static void
1253 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1254     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1255 {
1256 	xgell_txd_priv_t *txd_priv =
1257 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1258 	mblk_t *mp = txd_priv->mblk;
1259 #if !defined(XGELL_TX_NOMAP_COPY)
1260 	int i;
1261 #endif
1262 	/*
1263 	 * for Tx we must clean up the DTR *only* if it has been
1264 	 * posted!
1265 	 */
1266 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1267 		return;
1268 	}
1269 
1270 #if !defined(XGELL_TX_NOMAP_COPY)
1271 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1272 		xge_assert(txd_priv->dma_handles[i]);
1273 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1274 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1275 		txd_priv->dma_handles[i] = 0;
1276 	}
1277 #endif
1278 
1279 	xge_hal_fifo_dtr_free(channelh, dtrh);
1280 
1281 	freemsg(mp);
1282 }
1283 
1284 /*
1285  * xgell_tx_open
1286  * @lldev: the link layer object
1287  *
1288  * Initialize and open all Tx channels;
1289  */
1290 static boolean_t
1291 xgell_tx_open(xgelldev_t *lldev)
1292 {
1293 	xge_hal_status_e status;
1294 	u64 adapter_status;
1295 	xge_hal_channel_attr_t attr;
1296 
1297 	attr.post_qid		= 0;
1298 	attr.compl_qid		= 0;
1299 	attr.callback		= xgell_xmit_compl;
1300 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1301 	attr.flags		= 0;
1302 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1303 	attr.userdata		= lldev;
1304 	attr.dtr_init		= NULL;
1305 	attr.dtr_term		= xgell_tx_term;
1306 
1307 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1308 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1309 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1310 		    lldev->instance, (uint64_t)adapter_status);
1311 		return (B_FALSE);
1312 	}
1313 
1314 	status = xge_hal_channel_open(lldev->devh, &attr,
1315 	    &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL);
1316 	if (status != XGE_HAL_OK) {
1317 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1318 		    "got status code %d", XGELL_IFNAME,
1319 		    lldev->instance, status);
1320 		return (B_FALSE);
1321 	}
1322 
1323 	return (B_TRUE);
1324 }
1325 
1326 /*
1327  * xgell_rx_open
1328  * @lldev: the link layer object
1329  *
1330  * Initialize and open all Rx channels;
1331  */
1332 static boolean_t
1333 xgell_rx_open(xgelldev_t *lldev)
1334 {
1335 	xge_hal_status_e status;
1336 	u64 adapter_status;
1337 	xge_hal_channel_attr_t attr;
1338 
1339 	attr.post_qid		= XGELL_RING_MAIN_QID;
1340 	attr.compl_qid		= 0;
1341 	attr.callback		= xgell_rx_1b_compl;
1342 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1343 	attr.flags		= 0;
1344 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1345 	attr.dtr_init		= xgell_rx_dtr_replenish;
1346 	attr.dtr_term		= xgell_rx_dtr_term;
1347 
1348 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1349 		xge_debug_ll(XGE_ERR,
1350 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1351 		    XGELL_IFNAME, lldev->instance,
1352 		    (uint64_t)adapter_status);
1353 		return (B_FALSE);
1354 	}
1355 
1356 	lldev->ring_main.lldev = lldev;
1357 	attr.userdata = &lldev->ring_main;
1358 
1359 	status = xge_hal_channel_open(lldev->devh, &attr,
1360 	    &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1361 	if (status != XGE_HAL_OK) {
1362 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1363 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1364 		return (B_FALSE);
1365 	}
1366 
1367 	return (B_TRUE);
1368 }
1369 
1370 static int
1371 xgell_initiate_start(xgelldev_t *lldev)
1372 {
1373 	xge_hal_status_e status;
1374 	xge_hal_device_t *hldev = lldev->devh;
1375 	int maxpkt = hldev->config.mtu;
1376 
1377 	/* check initial mtu before enabling the device */
1378 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1379 	if (status != XGE_HAL_OK) {
1380 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1381 		    XGELL_IFNAME, lldev->instance, maxpkt);
1382 		return (EINVAL);
1383 	}
1384 
1385 	/* set initial mtu before enabling the device */
1386 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1387 	if (status != XGE_HAL_OK) {
1388 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1389 		    XGELL_IFNAME, lldev->instance, maxpkt);
1390 		return (EIO);
1391 	}
1392 
1393 	/* now, enable the device */
1394 	status = xge_hal_device_enable(lldev->devh);
1395 	if (status != XGE_HAL_OK) {
1396 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1397 		    XGELL_IFNAME, lldev->instance);
1398 		return (EIO);
1399 	}
1400 
1401 	if (!xgell_rx_open(lldev)) {
1402 		status = xge_hal_device_disable(lldev->devh);
1403 		if (status != XGE_HAL_OK) {
1404 			u64 adapter_status;
1405 			(void) xge_hal_device_status(lldev->devh,
1406 			    &adapter_status);
1407 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1408 			    "the device. adaper status 0x%"PRIx64
1409 			    " returned status %d",
1410 			    XGELL_IFNAME, lldev->instance,
1411 			    (uint64_t)adapter_status, status);
1412 		}
1413 		xge_os_mdelay(1500);
1414 		return (ENOMEM);
1415 	}
1416 
1417 #ifdef XGELL_TX_NOMAP_COPY
1418 	hldev->config.fifo.alignment_size = XGELL_MAX_FRAME_SIZE(hldev);
1419 #endif
1420 
1421 	if (!xgell_tx_open(lldev)) {
1422 		status = xge_hal_device_disable(lldev->devh);
1423 		if (status != XGE_HAL_OK) {
1424 			u64 adapter_status;
1425 			(void) xge_hal_device_status(lldev->devh,
1426 			    &adapter_status);
1427 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1428 			    "the device. adaper status 0x%"PRIx64
1429 			    " returned status %d",
1430 			    XGELL_IFNAME, lldev->instance,
1431 			    (uint64_t)adapter_status, status);
1432 		}
1433 		xge_os_mdelay(1500);
1434 		xge_hal_channel_close(lldev->ring_main.channelh,
1435 		    XGE_HAL_CHANNEL_OC_NORMAL);
1436 		return (ENOMEM);
1437 	}
1438 
1439 	/* time to enable interrupts */
1440 	xge_hal_device_intr_enable(lldev->devh);
1441 
1442 	lldev->is_initialized = 1;
1443 
1444 	return (0);
1445 }
1446 
1447 static void
1448 xgell_initiate_stop(xgelldev_t *lldev)
1449 {
1450 	xge_hal_status_e status;
1451 
1452 	lldev->is_initialized = 0;
1453 
1454 	status = xge_hal_device_disable(lldev->devh);
1455 	if (status != XGE_HAL_OK) {
1456 		u64 adapter_status;
1457 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1458 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1459 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1460 		    XGELL_IFNAME, lldev->instance,
1461 		    (uint64_t)adapter_status, status);
1462 	}
1463 	xge_hal_device_intr_disable(lldev->devh);
1464 
1465 	xge_debug_ll(XGE_TRACE, "%s",
1466 	    "waiting for device irq to become quiescent...");
1467 	xge_os_mdelay(1500);
1468 
1469 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1470 
1471 	xge_hal_channel_close(lldev->ring_main.channelh,
1472 	    XGE_HAL_CHANNEL_OC_NORMAL);
1473 
1474 	xge_hal_channel_close(lldev->fifo_channel,
1475 	    XGE_HAL_CHANNEL_OC_NORMAL);
1476 }
1477 
1478 /*
1479  * xgell_m_start
1480  * @arg: pointer to device private strucutre(hldev)
1481  *
1482  * This function is called by MAC Layer to enable the XFRAME
1483  * firmware to generate interrupts and also prepare the
1484  * driver to call mac_rx for delivering receive packets
1485  * to MAC Layer.
1486  */
1487 static int
1488 xgell_m_start(void *arg)
1489 {
1490 	xge_hal_device_t *hldev = arg;
1491 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1492 	int ret;
1493 
1494 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1495 	    lldev->instance);
1496 
1497 	mutex_enter(&lldev->genlock);
1498 
1499 	if (lldev->is_initialized) {
1500 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1501 		    XGELL_IFNAME, lldev->instance);
1502 		mutex_exit(&lldev->genlock);
1503 		return (EINVAL);
1504 	}
1505 
1506 	hldev->terminating = 0;
1507 	if (ret = xgell_initiate_start(lldev)) {
1508 		mutex_exit(&lldev->genlock);
1509 		return (ret);
1510 	}
1511 
1512 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1513 
1514 	if (!lldev->timeout_id) {
1515 		xgell_initiate_stop(lldev);
1516 		mutex_exit(&lldev->genlock);
1517 		return (EINVAL);
1518 	}
1519 
1520 	mutex_exit(&lldev->genlock);
1521 
1522 	return (0);
1523 }
1524 
1525 /*
1526  * xgell_m_stop
1527  * @arg: pointer to device private data (hldev)
1528  *
1529  * This function is called by the MAC Layer to disable
1530  * the XFRAME firmware for generating any interrupts and
1531  * also stop the driver from calling mac_rx() for
1532  * delivering data packets to the MAC Layer.
1533  */
1534 static void
1535 xgell_m_stop(void *arg)
1536 {
1537 	xge_hal_device_t *hldev;
1538 	xgelldev_t *lldev;
1539 
1540 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1541 
1542 	hldev = arg;
1543 	xge_assert(hldev);
1544 
1545 	lldev = (xgelldev_t *)xge_hal_device_private(hldev);
1546 	xge_assert(lldev);
1547 
1548 	mutex_enter(&lldev->genlock);
1549 	if (!lldev->is_initialized) {
1550 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1551 		mutex_exit(&lldev->genlock);
1552 		return;
1553 	}
1554 
1555 	xge_hal_device_terminating(hldev);
1556 	xgell_initiate_stop(lldev);
1557 
1558 	/* reset device */
1559 	(void) xge_hal_device_reset(lldev->devh);
1560 
1561 	mutex_exit(&lldev->genlock);
1562 
1563 	(void) untimeout(lldev->timeout_id);
1564 
1565 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1566 }
1567 
1568 /*
1569  * xgell_onerr_reset
1570  * @lldev: pointer to xgelldev_t structure
1571  *
1572  * This function is called by HAL Event framework to reset the HW
1573  * This function is must be called with genlock taken.
1574  */
1575 int
1576 xgell_onerr_reset(xgelldev_t *lldev)
1577 {
1578 	int rc = 0;
1579 
1580 	if (!lldev->is_initialized) {
1581 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1582 		    XGELL_IFNAME, lldev->instance);
1583 		return (rc);
1584 	}
1585 
1586 	lldev->in_reset = 1;
1587 	xgell_initiate_stop(lldev);
1588 
1589 	/* reset device */
1590 	(void) xge_hal_device_reset(lldev->devh);
1591 
1592 	rc = xgell_initiate_start(lldev);
1593 	lldev->in_reset = 0;
1594 
1595 	return (rc);
1596 }
1597 
1598 
1599 /*
1600  * xgell_m_unicst
1601  * @arg: pointer to device private strucutre(hldev)
1602  * @mac_addr:
1603  *
1604  * This function is called by MAC Layer to set the physical address
1605  * of the XFRAME firmware.
1606  */
1607 static int
1608 xgell_m_unicst(void *arg, const uint8_t *macaddr)
1609 {
1610 	xge_hal_status_e status;
1611 	xge_hal_device_t *hldev = arg;
1612 	xgelldev_t *lldev = (xgelldev_t *)xge_hal_device_private(hldev);
1613 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1614 
1615 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1616 
1617 	mutex_enter(&lldev->genlock);
1618 
1619 	xge_debug_ll(XGE_TRACE,
1620 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1621 	    macaddr[0], macaddr[1], macaddr[2],
1622 	    macaddr[3], macaddr[4], macaddr[5]);
1623 
1624 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1625 	if (status != XGE_HAL_OK) {
1626 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1627 		    XGELL_IFNAME, lldev->instance);
1628 		mutex_exit(&lldev->genlock);
1629 		return (EIO);
1630 	}
1631 
1632 	mutex_exit(&lldev->genlock);
1633 
1634 	return (0);
1635 }
1636 
1637 
1638 /*
1639  * xgell_m_multicst
1640  * @arg: pointer to device private strucutre(hldev)
1641  * @add:
1642  * @mc_addr:
1643  *
1644  * This function is called by MAC Layer to enable or
1645  * disable device-level reception of specific multicast addresses.
1646  */
1647 static int
1648 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1649 {
1650 	xge_hal_status_e status;
1651 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1652 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1653 
1654 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1655 
1656 	mutex_enter(&lldev->genlock);
1657 
1658 	if (!lldev->is_initialized) {
1659 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1660 		    XGELL_IFNAME, lldev->instance);
1661 		mutex_exit(&lldev->genlock);
1662 		return (EIO);
1663 	}
1664 
1665 	/* FIXME: missing HAL functionality: enable_one() */
1666 
1667 	status = (add) ?
1668 	    xge_hal_device_mcast_enable(hldev) :
1669 	    xge_hal_device_mcast_disable(hldev);
1670 
1671 	if (status != XGE_HAL_OK) {
1672 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1673 		    add ? "enable" : "disable", status);
1674 		mutex_exit(&lldev->genlock);
1675 		return (EIO);
1676 	}
1677 
1678 	mutex_exit(&lldev->genlock);
1679 
1680 	return (0);
1681 }
1682 
1683 
1684 /*
1685  * xgell_m_promisc
1686  * @arg: pointer to device private strucutre(hldev)
1687  * @on:
1688  *
1689  * This function is called by MAC Layer to enable or
1690  * disable the reception of all the packets on the medium
1691  */
1692 static int
1693 xgell_m_promisc(void *arg, boolean_t on)
1694 {
1695 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1696 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1697 
1698 	mutex_enter(&lldev->genlock);
1699 
1700 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1701 
1702 	if (!lldev->is_initialized) {
1703 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1704 		    XGELL_IFNAME, lldev->instance);
1705 		mutex_exit(&lldev->genlock);
1706 		return (EIO);
1707 	}
1708 
1709 	if (on) {
1710 		xge_hal_device_promisc_enable(hldev);
1711 	} else {
1712 		xge_hal_device_promisc_disable(hldev);
1713 	}
1714 
1715 	mutex_exit(&lldev->genlock);
1716 
1717 	return (0);
1718 }
1719 
1720 /*
1721  * xgell_m_stat
1722  * @arg: pointer to device private strucutre(hldev)
1723  *
1724  * This function is called by MAC Layer to get network statistics
1725  * from the driver.
1726  */
1727 static int
1728 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
1729 {
1730 	xge_hal_stats_hw_info_t *hw_info;
1731 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1732 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1733 
1734 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1735 
1736 	if (!mutex_tryenter(&lldev->genlock))
1737 		return (EAGAIN);
1738 
1739 	if (!lldev->is_initialized) {
1740 		mutex_exit(&lldev->genlock);
1741 		return (EAGAIN);
1742 	}
1743 
1744 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1745 		mutex_exit(&lldev->genlock);
1746 		return (EAGAIN);
1747 	}
1748 
1749 	switch (stat) {
1750 	case MAC_STAT_IFSPEED:
1751 		*val = 10000000000ull; /* 10G */
1752 		break;
1753 
1754 	case MAC_STAT_MULTIRCV:
1755 		*val = hw_info->rmac_vld_mcst_frms;
1756 		break;
1757 
1758 	case MAC_STAT_BRDCSTRCV:
1759 		*val = hw_info->rmac_vld_bcst_frms;
1760 		break;
1761 
1762 	case MAC_STAT_MULTIXMT:
1763 		*val = hw_info->tmac_mcst_frms;
1764 		break;
1765 
1766 	case MAC_STAT_BRDCSTXMT:
1767 		*val = hw_info->tmac_bcst_frms;
1768 		break;
1769 
1770 	case MAC_STAT_RBYTES:
1771 		*val = hw_info->rmac_ttl_octets;
1772 		break;
1773 
1774 	case MAC_STAT_NORCVBUF:
1775 		*val = hw_info->rmac_drop_frms;
1776 		break;
1777 
1778 	case MAC_STAT_IERRORS:
1779 		*val = hw_info->rmac_discarded_frms;
1780 		break;
1781 
1782 	case MAC_STAT_OBYTES:
1783 		*val = hw_info->tmac_ttl_octets;
1784 		break;
1785 
1786 	case MAC_STAT_NOXMTBUF:
1787 		*val = hw_info->tmac_drop_frms;
1788 		break;
1789 
1790 	case MAC_STAT_OERRORS:
1791 		*val = hw_info->tmac_any_err_frms;
1792 		break;
1793 
1794 	case MAC_STAT_IPACKETS:
1795 		*val = hw_info->rmac_vld_frms;
1796 		break;
1797 
1798 	case MAC_STAT_OPACKETS:
1799 		*val = hw_info->tmac_frms;
1800 		break;
1801 
1802 	case ETHER_STAT_FCS_ERRORS:
1803 		*val = hw_info->rmac_fcs_err_frms;
1804 		break;
1805 
1806 	case ETHER_STAT_TOOLONG_ERRORS:
1807 		*val = hw_info->rmac_long_frms;
1808 		break;
1809 
1810 	case ETHER_STAT_LINK_DUPLEX:
1811 		*val = LINK_DUPLEX_FULL;
1812 		break;
1813 
1814 	default:
1815 		mutex_exit(&lldev->genlock);
1816 		return (ENOTSUP);
1817 	}
1818 
1819 	mutex_exit(&lldev->genlock);
1820 
1821 	return (0);
1822 }
1823 
1824 /*
1825  * xgell_device_alloc - Allocate new LL device
1826  */
1827 int
1828 xgell_device_alloc(xge_hal_device_h devh,
1829     dev_info_t *dev_info, xgelldev_t **lldev_out)
1830 {
1831 	xgelldev_t *lldev;
1832 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1833 	int instance = ddi_get_instance(dev_info);
1834 
1835 	*lldev_out = NULL;
1836 
1837 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1838 	    XGELL_IFNAME, instance);
1839 
1840 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1841 
1842 	/* allocate mac */
1843 	lldev->devh = hldev;
1844 	lldev->instance = instance;
1845 	lldev->dev_info = dev_info;
1846 
1847 	*lldev_out = lldev;
1848 
1849 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1850 
1851 	return (DDI_SUCCESS);
1852 }
1853 
1854 /*
1855  * xgell_device_free
1856  */
1857 void
1858 xgell_device_free(xgelldev_t *lldev)
1859 {
1860 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1861 	    XGELL_IFNAME, lldev->instance);
1862 
1863 	kmem_free(lldev, sizeof (xgelldev_t));
1864 }
1865 
1866 /*
1867  * xgell_ioctl
1868  */
1869 static void
1870 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1871 {
1872 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1873 	xgelldev_t *lldev = (xgelldev_t *)xge_hal_device_private(hldev);
1874 	struct iocblk *iocp;
1875 	int err = 0;
1876 	int cmd;
1877 	int need_privilege = 1;
1878 	int ret = 0;
1879 
1880 
1881 	iocp = (struct iocblk *)mp->b_rptr;
1882 	iocp->ioc_error = 0;
1883 	cmd = iocp->ioc_cmd;
1884 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1885 	switch (cmd) {
1886 	case ND_GET:
1887 		need_privilege = 0;
1888 		/* FALLTHRU */
1889 	case ND_SET:
1890 		break;
1891 	default:
1892 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1893 		miocnak(wq, mp, 0, EINVAL);
1894 		return;
1895 	}
1896 
1897 	if (need_privilege) {
1898 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1899 		if (err != 0) {
1900 			xge_debug_ll(XGE_ERR,
1901 			    "drv_priv(): rejected cmd 0x%x, err %d",
1902 			    cmd, err);
1903 			miocnak(wq, mp, 0, err);
1904 			return;
1905 		}
1906 	}
1907 
1908 	switch (cmd) {
1909 	case ND_GET:
1910 		/*
1911 		 * If nd_getset() returns B_FALSE, the command was
1912 		 * not valid (e.g. unknown name), so we just tell the
1913 		 * top-level ioctl code to send a NAK (with code EINVAL).
1914 		 *
1915 		 * Otherwise, nd_getset() will have built the reply to
1916 		 * be sent (but not actually sent it), so we tell the
1917 		 * caller to send the prepared reply.
1918 		 */
1919 		ret = nd_getset(wq, lldev->ndp, mp);
1920 		xge_debug_ll(XGE_TRACE, "got ndd get ioctl");
1921 		break;
1922 
1923 	case ND_SET:
1924 		ret = nd_getset(wq, lldev->ndp, mp);
1925 		xge_debug_ll(XGE_TRACE, "got ndd set ioctl");
1926 		break;
1927 
1928 	default:
1929 		break;
1930 	}
1931 
1932 	if (ret == B_FALSE) {
1933 		xge_debug_ll(XGE_ERR,
1934 		    "nd_getset(): rejected cmd 0x%x, err %d",
1935 		    cmd, err);
1936 		miocnak(wq, mp, 0, EINVAL);
1937 	} else {
1938 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1939 		    M_IOCACK : M_IOCNAK;
1940 		qreply(wq, mp);
1941 	}
1942 }
1943 
1944 /* ARGSUSED */
1945 static boolean_t
1946 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1947 {
1948 	switch (cap) {
1949 	case MAC_CAPAB_HCKSUM: {
1950 		uint32_t *hcksum_txflags = cap_data;
1951 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
1952 		    HCKSUM_IPHDRCKSUM;
1953 		break;
1954 	}
1955 	case MAC_CAPAB_POLL:
1956 		/*
1957 		 * Fallthrough to default, as we don't support GLDv3
1958 		 * polling.  When blanking is implemented, we will need to
1959 		 * change this to return B_TRUE in addition to registering
1960 		 * an mc_resources callback.
1961 		 */
1962 	default:
1963 		return (B_FALSE);
1964 	}
1965 	return (B_TRUE);
1966 }
1967 
1968 static int
1969 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
1970 {
1971 	xgelldev_t *lldev = (xgelldev_t *)cp;
1972 	xge_hal_status_e status;
1973 	int count = 0, retsize;
1974 	char *buf;
1975 
1976 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
1977 	if (buf == NULL) {
1978 		return (ENOSPC);
1979 	}
1980 
1981 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
1982 	    buf, &retsize);
1983 	if (status != XGE_HAL_OK) {
1984 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1985 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
1986 		return (EINVAL);
1987 	}
1988 	count += retsize;
1989 
1990 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
1991 	    XGELL_STATS_BUFSIZE - count,
1992 	    buf+count, &retsize);
1993 	if (status != XGE_HAL_OK) {
1994 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1995 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
1996 		return (EINVAL);
1997 	}
1998 	count += retsize;
1999 
2000 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2001 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2002 	if (status != XGE_HAL_OK) {
2003 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2004 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2005 		return (EINVAL);
2006 	}
2007 	count += retsize;
2008 
2009 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2010 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2011 	if (status != XGE_HAL_OK) {
2012 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2013 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2014 		return (EINVAL);
2015 	}
2016 	count += retsize;
2017 
2018 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2019 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2020 	if (status != XGE_HAL_OK) {
2021 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2022 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2023 		return (EINVAL);
2024 	}
2025 	count += retsize;
2026 
2027 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2028 	(void) mi_mpprintf(mp, "%s", buf);
2029 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2030 
2031 	return (0);
2032 }
2033 
2034 static int
2035 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2036 {
2037 	xgelldev_t *lldev = (xgelldev_t *)cp;
2038 	xge_hal_status_e status;
2039 	int retsize;
2040 	char *buf;
2041 
2042 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2043 	if (buf == NULL) {
2044 		return (ENOSPC);
2045 	}
2046 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2047 	    buf, &retsize);
2048 	if (status != XGE_HAL_OK) {
2049 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2050 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2051 		return (EINVAL);
2052 	}
2053 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2054 	(void) mi_mpprintf(mp, "%s", buf);
2055 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2056 
2057 	return (0);
2058 }
2059 
2060 static int
2061 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2062 {
2063 	xgelldev_t *lldev = (xgelldev_t *)cp;
2064 	xge_hal_status_e status;
2065 	int retsize;
2066 	char *buf;
2067 
2068 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2069 	if (buf == NULL) {
2070 		return (ENOSPC);
2071 	}
2072 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2073 	    buf, &retsize);
2074 	if (status != XGE_HAL_OK) {
2075 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2076 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2077 		return (EINVAL);
2078 	}
2079 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2080 	(void) mi_mpprintf(mp, "%s", buf);
2081 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2082 
2083 	return (0);
2084 }
2085 
2086 static unsigned long bar0_offset = 0x110; /* adapter_control */
2087 
2088 static int
2089 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2090 {
2091 	xgelldev_t *lldev = (xgelldev_t *)cp;
2092 	xge_hal_status_e status;
2093 	int retsize;
2094 	char *buf;
2095 
2096 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2097 	if (buf == NULL) {
2098 		return (ENOSPC);
2099 	}
2100 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2101 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2102 	if (status != XGE_HAL_OK) {
2103 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2104 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2105 		return (EINVAL);
2106 	}
2107 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2108 	(void) mi_mpprintf(mp, "%s", buf);
2109 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2110 
2111 	return (0);
2112 }
2113 
2114 static int
2115 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2116 {
2117 	unsigned long old_offset = bar0_offset;
2118 	char *end;
2119 
2120 	if (value && *value == '0' &&
2121 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2122 		value += 2;
2123 	}
2124 
2125 	bar0_offset = mi_strtol(value, &end, 16);
2126 	if (end == value) {
2127 		bar0_offset = old_offset;
2128 		return (EINVAL);
2129 	}
2130 
2131 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2132 
2133 	return (0);
2134 }
2135 
2136 static int
2137 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2138 {
2139 	char *buf;
2140 
2141 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2142 	if (buf == NULL) {
2143 		return (ENOSPC);
2144 	}
2145 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2146 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2147 
2148 	return (0);
2149 }
2150 
2151 static int
2152 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2153     cred_t *credp)
2154 {
2155 	int level;
2156 	char *end;
2157 
2158 	level = mi_strtol(value, &end, 10);
2159 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2160 		return (EINVAL);
2161 	}
2162 
2163 	xge_hal_driver_debug_level_set(level);
2164 
2165 	return (0);
2166 }
2167 
2168 static int
2169 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2170 {
2171 	char *buf;
2172 
2173 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2174 	if (buf == NULL) {
2175 		return (ENOSPC);
2176 	}
2177 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2178 	    xge_hal_driver_debug_module_mask());
2179 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2180 
2181 	return (0);
2182 }
2183 
2184 static int
2185 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2186 			    cred_t *credp)
2187 {
2188 	u32 mask;
2189 	char *end;
2190 
2191 	if (value && *value == '0' &&
2192 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2193 		value += 2;
2194 	}
2195 
2196 	mask = mi_strtol(value, &end, 16);
2197 	if (end == value) {
2198 		return (EINVAL);
2199 	}
2200 
2201 	xge_hal_driver_debug_module_mask_set(mask);
2202 
2203 	return (0);
2204 }
2205 
2206 static int
2207 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2208 {
2209 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2210 	xge_hal_status_e status;
2211 	int retsize;
2212 	char *buf;
2213 
2214 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2215 	if (buf == NULL) {
2216 		return (ENOSPC);
2217 	}
2218 	status = xge_hal_aux_device_config_read(lldev->devh,
2219 						XGELL_DEVCONF_BUFSIZE,
2220 						buf, &retsize);
2221 	if (status != XGE_HAL_OK) {
2222 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2223 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2224 		    status);
2225 		return (EINVAL);
2226 	}
2227 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2228 	(void) mi_mpprintf(mp, "%s", buf);
2229 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2230 
2231 	return (0);
2232 }
2233 
2234 /*
2235  * xgell_device_register
2236  * @devh: pointer on HAL device
2237  * @config: pointer on this network device configuration
2238  * @ll_out: output pointer. Will be assigned to valid LL device.
2239  *
2240  * This function will allocate and register network device
2241  */
2242 int
2243 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2244 {
2245 	mac_register_t *macp;
2246 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2247 	int err;
2248 
2249 
2250 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2251 	    (caddr_t)lldev) == B_FALSE)
2252 		goto xgell_ndd_fail;
2253 
2254 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2255 	    (caddr_t)lldev) == B_FALSE)
2256 		goto xgell_ndd_fail;
2257 
2258 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2259 	    (caddr_t)lldev) == B_FALSE)
2260 		goto xgell_ndd_fail;
2261 
2262 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2263 	    (caddr_t)lldev) == B_FALSE)
2264 		goto xgell_ndd_fail;
2265 
2266 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2267 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2268 		goto xgell_ndd_fail;
2269 
2270 	if (nd_load(&lldev->ndp, "debug_module_mask",
2271 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2272 	    (caddr_t)lldev) == B_FALSE)
2273 		goto xgell_ndd_fail;
2274 
2275 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2276 	    (caddr_t)lldev) == B_FALSE)
2277 		goto xgell_ndd_fail;
2278 
2279 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2280 
2281 	if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) {
2282 		nd_free(&lldev->ndp);
2283 		xge_debug_ll(XGE_ERR, "unable to create RX buffer pool");
2284 		return (DDI_FAILURE);
2285 	}
2286 
2287 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh);
2288 
2289 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2290 		goto xgell_register_fail;
2291 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2292 	macp->m_driver = hldev;
2293 	macp->m_dip = lldev->dev_info;
2294 	macp->m_src_addr = hldev->macaddr[0];
2295 	macp->m_callbacks = &xgell_m_callbacks;
2296 	macp->m_min_sdu = 0;
2297 	macp->m_max_sdu = hldev->config.mtu;
2298 	/*
2299 	 * Finally, we're ready to register ourselves with the Nemo
2300 	 * interface; if this succeeds, we're all ready to start()
2301 	 */
2302 	err = mac_register(macp, &lldev->mh);
2303 	mac_free(macp);
2304 	if (err != 0)
2305 		goto xgell_register_fail;
2306 
2307 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2308 	    XGELL_IFNAME, lldev->instance);
2309 
2310 	return (DDI_SUCCESS);
2311 
2312 xgell_ndd_fail:
2313 	nd_free(&lldev->ndp);
2314 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2315 	return (DDI_FAILURE);
2316 
2317 xgell_register_fail:
2318 	nd_free(&lldev->ndp);
2319 	mutex_destroy(&lldev->genlock);
2320 	/* Ignore return value, since RX not start */
2321 	(void) xgell_rx_destroy_buffer_pool(lldev);
2322 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2323 	return (DDI_FAILURE);
2324 }
2325 
2326 /*
2327  * xgell_device_unregister
2328  * @devh: pointer on HAL device
2329  * @lldev: pointer to valid LL device.
2330  *
2331  * This function will unregister and free network device
2332  */
2333 int
2334 xgell_device_unregister(xgelldev_t *lldev)
2335 {
2336 	/*
2337 	 * Destroy RX buffer pool.
2338 	 */
2339 	if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) {
2340 		return (DDI_FAILURE);
2341 	}
2342 
2343 	if (mac_unregister(lldev->mh) != 0) {
2344 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2345 		    XGELL_IFNAME, lldev->instance);
2346 		return (DDI_FAILURE);
2347 	}
2348 
2349 	mutex_destroy(&lldev->genlock);
2350 
2351 	nd_free(&lldev->ndp);
2352 
2353 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2354 	    XGELL_IFNAME, lldev->instance);
2355 
2356 	return (DDI_SUCCESS);
2357 }
2358