xref: /titanic_44/usr/src/uts/common/io/xge/drv/xgell.c (revision 39c23413b8df94a95f67b34cfd4a4dfc3fd0b48d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *  Copyright (c) 2002-2005 Neterion, Inc.
31  *  All right Reserved.
32  *
33  *  FileName :    xgell.c
34  *
35  *  Description:  Xge Link Layer data path implementation
36  *
37  */
38 
39 #include "xgell.h"
40 
41 #include <netinet/ip.h>
42 #include <netinet/tcp.h>
43 #include <netinet/udp.h>
44 
45 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
46     sizeof (struct ether_vlan_header))
47 
48 #define	HEADROOM		2	/* for DIX-only packets */
49 
50 #ifdef XGELL_L3_ALIGNED
51 void header_free_func(void *arg) { }
52 frtn_t header_frtn = {header_free_func, NULL};
53 #endif
54 
55 /* DMA attributes used for Tx side */
56 static struct ddi_dma_attr tx_dma_attr = {
57 	DMA_ATTR_V0,			/* dma_attr_version */
58 	0x0ULL,				/* dma_attr_addr_lo */
59 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
60 	0xFFFFFFFFULL,			/* dma_attr_count_max */
61 	0x1ULL,				/* dma_attr_align */
62 	0xFFF,				/* dma_attr_burstsizes */
63 	1,				/* dma_attr_minxfer */
64 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
65 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
66 	18,				/* dma_attr_sgllen */
67 	1,				/* dma_attr_granular */
68 	0				/* dma_attr_flags */
69 };
70 
71 /* Aligned DMA attributes used for Tx side */
72 struct ddi_dma_attr tx_dma_attr_align = {
73 	DMA_ATTR_V0,			/* dma_attr_version */
74 	0x0ULL,				/* dma_attr_addr_lo */
75 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
76 	0xFFFFFFFFULL,			/* dma_attr_count_max */
77 	4096,				/* dma_attr_align */
78 	0xFFF,				/* dma_attr_burstsizes */
79 	1,				/* dma_attr_minxfer */
80 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
81 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
82 	4,				/* dma_attr_sgllen */
83 	1,				/* dma_attr_granular */
84 	0				/* dma_attr_flags */
85 };
86 
87 /*
88  * DMA attributes used when using ddi_dma_mem_alloc to
89  * allocat HAL descriptors and Rx buffers during replenish
90  */
91 static struct ddi_dma_attr hal_dma_attr = {
92 	DMA_ATTR_V0,			/* dma_attr_version */
93 	0x0ULL,				/* dma_attr_addr_lo */
94 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
95 	0xFFFFFFFFULL,			/* dma_attr_count_max */
96 	0x1ULL,				/* dma_attr_align */
97 	0xFFF,				/* dma_attr_burstsizes */
98 	1,				/* dma_attr_minxfer */
99 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
100 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
101 	1,				/* dma_attr_sgllen */
102 	1,				/* dma_attr_granular */
103 	0				/* dma_attr_flags */
104 };
105 
106 /*
107  * Aligned DMA attributes used when using ddi_dma_mem_alloc to
108  * allocat HAL descriptors and Rx buffers during replenish
109  */
110 struct ddi_dma_attr hal_dma_attr_aligned = {
111 	DMA_ATTR_V0,			/* dma_attr_version */
112 	0x0ULL,				/* dma_attr_addr_lo */
113 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
114 	0xFFFFFFFFULL,			/* dma_attr_count_max */
115 	4096,				/* dma_attr_align */
116 	0xFFF,				/* dma_attr_burstsizes */
117 	1,				/* dma_attr_minxfer */
118 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
119 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
120 	1,				/* dma_attr_sgllen */
121 	1,				/* dma_attr_granular */
122 	0				/* dma_attr_flags */
123 };
124 
125 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
126 struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned;
127 
128 static int		xgell_m_stat(void *, uint_t, uint64_t *);
129 static int		xgell_m_start(void *);
130 static void		xgell_m_stop(void *);
131 static int		xgell_m_promisc(void *, boolean_t);
132 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
133 static int		xgell_m_unicst(void *, const uint8_t *);
134 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
135 static mblk_t 		*xgell_m_tx(void *, mblk_t *);
136 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
137 
138 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
139 
140 static mac_callbacks_t xgell_m_callbacks = {
141 	XGELL_M_CALLBACK_FLAGS,
142 	xgell_m_stat,
143 	xgell_m_start,
144 	xgell_m_stop,
145 	xgell_m_promisc,
146 	xgell_m_multicst,
147 	xgell_m_unicst,
148 	xgell_m_tx,
149 	NULL,
150 	xgell_m_ioctl,
151 	xgell_m_getcapab
152 };
153 
154 /*
155  * xge_device_poll
156  *
157  * Cyclic should call me every 1s. xge_callback_event_queued should call me
158  * when HAL hope event was rescheduled.
159  */
160 /*ARGSUSED*/
161 void
162 xge_device_poll(void *data)
163 {
164 	xgelldev_t *lldev = xge_hal_device_private(data);
165 
166 	mutex_enter(&lldev->genlock);
167 	if (lldev->is_initialized) {
168 		xge_hal_device_poll(data);
169 		lldev->timeout_id = timeout(xge_device_poll, data,
170 		    XGE_DEV_POLL_TICKS);
171 	} else if (lldev->in_reset == 1) {
172 		lldev->timeout_id = timeout(xge_device_poll, data,
173 		    XGE_DEV_POLL_TICKS);
174 	} else {
175 		lldev->timeout_id = 0;
176 	}
177 	mutex_exit(&lldev->genlock);
178 }
179 
180 /*
181  * xge_device_poll_now
182  *
183  * Will call xge_device_poll() immediately
184  */
185 void
186 xge_device_poll_now(void *data)
187 {
188 	xgelldev_t *lldev = xge_hal_device_private(data);
189 
190 	mutex_enter(&lldev->genlock);
191 	if (lldev->is_initialized) {
192 		xge_hal_device_poll(data);
193 	}
194 	mutex_exit(&lldev->genlock);
195 }
196 
197 /*
198  * xgell_callback_link_up
199  *
200  * This function called by HAL to notify HW link up state change.
201  */
202 void
203 xgell_callback_link_up(void *userdata)
204 {
205 	xgelldev_t *lldev = (xgelldev_t *)userdata;
206 
207 	mac_link_update(lldev->mh, LINK_STATE_UP);
208 	/* Link states should be reported to user whenever it changes */
209 	cmn_err(CE_NOTE, "!%s%d: Link is up [10 Gbps Full Duplex]",
210 	    XGELL_IFNAME, lldev->instance);
211 }
212 
213 /*
214  * xgell_callback_link_down
215  *
216  * This function called by HAL to notify HW link down state change.
217  */
218 void
219 xgell_callback_link_down(void *userdata)
220 {
221 	xgelldev_t *lldev = (xgelldev_t *)userdata;
222 
223 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
224 	/* Link states should be reported to user whenever it changes */
225 	cmn_err(CE_NOTE, "!%s%d: Link is down", XGELL_IFNAME,
226 	    lldev->instance);
227 }
228 
229 /*
230  * xgell_rx_buffer_replenish_all
231  *
232  * To replenish all freed dtr(s) with buffers in free pool. It's called by
233  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
234  * Must be called with pool_lock held.
235  */
236 static void
237 xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
238 {
239 	xge_hal_dtr_h dtr;
240 	xgell_rx_buffer_t *rx_buffer;
241 	xgell_rxd_priv_t *rxd_priv;
242 
243 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
244 
245 	while ((lldev->bf_pool.free > 0) &&
246 	    (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
247 	    XGE_HAL_OK)) {
248 		rx_buffer = lldev->bf_pool.head;
249 		lldev->bf_pool.head = rx_buffer->next;
250 		lldev->bf_pool.free--;
251 
252 		xge_assert(rx_buffer);
253 		xge_assert(rx_buffer->dma_addr);
254 
255 		rxd_priv = (xgell_rxd_priv_t *)
256 		    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
257 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
258 		    lldev->bf_pool.size);
259 
260 		rxd_priv->rx_buffer = rx_buffer;
261 		xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr);
262 	}
263 }
264 
265 /*
266  * xgell_rx_buffer_release
267  *
268  * The only thing done here is to put the buffer back to the pool.
269  * Calling this function need be protected by mutex, bf_pool.pool_lock.
270  */
271 static void
272 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
273 {
274 	xgelldev_t *lldev = rx_buffer->lldev;
275 
276 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
277 
278 	/* Put the buffer back to pool */
279 	rx_buffer->next = lldev->bf_pool.head;
280 	lldev->bf_pool.head = rx_buffer;
281 
282 	lldev->bf_pool.free++;
283 }
284 
285 /*
286  * xgell_rx_buffer_recycle
287  *
288  * Called by desballoc() to "free" the resource.
289  * We will try to replenish all descripters.
290  */
291 static void
292 xgell_rx_buffer_recycle(char *arg)
293 {
294 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
295 	xgelldev_t *lldev = rx_buffer->lldev;
296 
297 	mutex_enter(&lldev->bf_pool.pool_lock);
298 
299 	xgell_rx_buffer_release(rx_buffer);
300 	lldev->bf_pool.post--;
301 
302 	/*
303 	 * Before finding a good way to set this hiwat, just always call to
304 	 * replenish_all. *TODO*
305 	 */
306 	if (lldev->is_initialized != 0) {
307 		xgell_rx_buffer_replenish_all(lldev);
308 	}
309 
310 	mutex_exit(&lldev->bf_pool.pool_lock);
311 }
312 
313 /*
314  * xgell_rx_buffer_alloc
315  *
316  * Allocate one rx buffer and return with the pointer to the buffer.
317  * Return NULL if failed.
318  */
319 static xgell_rx_buffer_t *
320 xgell_rx_buffer_alloc(xgelldev_t *lldev)
321 {
322 	xge_hal_device_t *hldev;
323 	void *vaddr;
324 	ddi_dma_handle_t dma_handle;
325 	ddi_acc_handle_t dma_acch;
326 	dma_addr_t dma_addr;
327 	uint_t ncookies;
328 	ddi_dma_cookie_t dma_cookie;
329 	size_t real_size;
330 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
331 	xgell_rx_buffer_t *rx_buffer;
332 
333 	hldev = (xge_hal_device_t *)lldev->devh;
334 
335 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
336 	    0, &dma_handle) != DDI_SUCCESS) {
337 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
338 		    XGELL_IFNAME, lldev->instance);
339 		goto handle_failed;
340 	}
341 
342 	/* reserve some space at the end of the buffer for recycling */
343 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size +
344 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
345 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
346 	    DDI_SUCCESS) {
347 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
348 		    XGELL_IFNAME, lldev->instance);
349 		goto mem_failed;
350 	}
351 
352 	if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) >
353 	    real_size) {
354 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
355 		    XGELL_IFNAME, lldev->instance);
356 		goto bind_failed;
357 	}
358 
359 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
360 	    lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
361 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
362 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
363 		    XGELL_IFNAME, lldev->instance);
364 		goto bind_failed;
365 	}
366 
367 	if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) {
368 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
369 		    XGELL_IFNAME, lldev->instance);
370 		goto check_failed;
371 	}
372 
373 	dma_addr = dma_cookie.dmac_laddress;
374 
375 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
376 	    sizeof (xgell_rx_buffer_t));
377 	rx_buffer->next = NULL;
378 	rx_buffer->vaddr = vaddr;
379 	rx_buffer->dma_addr = dma_addr;
380 	rx_buffer->dma_handle = dma_handle;
381 	rx_buffer->dma_acch = dma_acch;
382 	rx_buffer->lldev = lldev;
383 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
384 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
385 
386 	return (rx_buffer);
387 
388 check_failed:
389 	(void) ddi_dma_unbind_handle(dma_handle);
390 bind_failed:
391 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
392 	ddi_dma_mem_free(&dma_acch);
393 mem_failed:
394 	ddi_dma_free_handle(&dma_handle);
395 handle_failed:
396 
397 	return (NULL);
398 }
399 
400 /*
401  * xgell_rx_destroy_buffer_pool
402  *
403  * Destroy buffer pool. If there is still any buffer hold by upper layer,
404  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
405  */
406 static int
407 xgell_rx_destroy_buffer_pool(xgelldev_t *lldev)
408 {
409 	xgell_rx_buffer_t *rx_buffer;
410 	ddi_dma_handle_t  dma_handle;
411 	ddi_acc_handle_t  dma_acch;
412 	int i;
413 
414 	/*
415 	 * If there is any posted buffer, the driver should reject to be
416 	 * detached. Need notice upper layer to release them.
417 	 */
418 	if (lldev->bf_pool.post != 0) {
419 		xge_debug_ll(XGE_ERR,
420 		    "%s%d has some buffers not be recycled, try later!",
421 		    XGELL_IFNAME, lldev->instance);
422 		return (DDI_FAILURE);
423 	}
424 
425 	/*
426 	 * Relase buffers one by one.
427 	 */
428 	for (i = lldev->bf_pool.total; i > 0; i--) {
429 		rx_buffer = lldev->bf_pool.head;
430 		xge_assert(rx_buffer != NULL);
431 
432 		lldev->bf_pool.head = rx_buffer->next;
433 
434 		dma_handle = rx_buffer->dma_handle;
435 		dma_acch = rx_buffer->dma_acch;
436 
437 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
438 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
439 			lldev->bf_pool.head = rx_buffer;
440 			return (DDI_FAILURE);
441 		}
442 		ddi_dma_mem_free(&dma_acch);
443 		ddi_dma_free_handle(&dma_handle);
444 
445 		lldev->bf_pool.total--;
446 		lldev->bf_pool.free--;
447 	}
448 
449 	mutex_destroy(&lldev->bf_pool.pool_lock);
450 	return (DDI_SUCCESS);
451 }
452 
453 /*
454  * xgell_rx_create_buffer_pool
455  *
456  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
457  */
458 static int
459 xgell_rx_create_buffer_pool(xgelldev_t *lldev)
460 {
461 	xge_hal_device_t *hldev;
462 	xgell_rx_buffer_t *rx_buffer;
463 	int i;
464 
465 	hldev = (xge_hal_device_t *)lldev->devh;
466 
467 	lldev->bf_pool.total = 0;
468 	lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev);
469 	lldev->bf_pool.head = NULL;
470 	lldev->bf_pool.free = 0;
471 	lldev->bf_pool.post = 0;
472 	lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
473 
474 	mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
475 	    hldev->irqh);
476 
477 	/*
478 	 * Allocate buffers one by one. If failed, destroy whole pool by
479 	 * call to xgell_rx_destroy_buffer_pool().
480 	 */
481 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
482 		if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) {
483 			(void) xgell_rx_destroy_buffer_pool(lldev);
484 			return (DDI_FAILURE);
485 		}
486 
487 		rx_buffer->next = lldev->bf_pool.head;
488 		lldev->bf_pool.head = rx_buffer;
489 
490 		lldev->bf_pool.total++;
491 		lldev->bf_pool.free++;
492 	}
493 
494 	return (DDI_SUCCESS);
495 }
496 
497 /*
498  * xgell_rx_dtr_replenish
499  *
500  * Replenish descriptor with rx_buffer in RX buffer pool.
501  * The dtr should be post right away.
502  */
503 xge_hal_status_e
504 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
505     void *userdata, xge_hal_channel_reopen_e reopen)
506 {
507 	xgell_ring_t *ring = userdata;
508 	xgelldev_t *lldev = ring->lldev;
509 	xgell_rx_buffer_t *rx_buffer;
510 	xgell_rxd_priv_t *rxd_priv;
511 
512 	if (lldev->bf_pool.head == NULL) {
513 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
514 		return (XGE_HAL_FAIL);
515 	}
516 	rx_buffer = lldev->bf_pool.head;
517 	lldev->bf_pool.head = rx_buffer->next;
518 	lldev->bf_pool.free--;
519 
520 	xge_assert(rx_buffer);
521 	xge_assert(rx_buffer->dma_addr);
522 
523 	rxd_priv = (xgell_rxd_priv_t *)
524 	    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
525 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size);
526 
527 	rxd_priv->rx_buffer = rx_buffer;
528 
529 	return (XGE_HAL_OK);
530 }
531 
532 /*
533  * xgell_get_ip_offset
534  *
535  * Calculate the offset to IP header.
536  */
537 static inline int
538 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
539 {
540 	int ip_off;
541 
542 	/* get IP-header offset */
543 	switch (ext_info->frame) {
544 	case XGE_HAL_FRAME_TYPE_DIX:
545 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
546 		break;
547 	case XGE_HAL_FRAME_TYPE_IPX:
548 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
549 		    XGE_HAL_HEADER_802_2_SIZE +
550 		    XGE_HAL_HEADER_SNAP_SIZE);
551 		break;
552 	case XGE_HAL_FRAME_TYPE_LLC:
553 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
554 		    XGE_HAL_HEADER_802_2_SIZE);
555 		break;
556 	case XGE_HAL_FRAME_TYPE_SNAP:
557 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
558 		    XGE_HAL_HEADER_SNAP_SIZE);
559 		break;
560 	default:
561 		ip_off = 0;
562 		break;
563 	}
564 
565 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
566 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
567 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
568 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
569 	}
570 
571 	return (ip_off);
572 }
573 
574 /*
575  * xgell_rx_hcksum_assoc
576  *
577  * Judge the packet type and then call to hcksum_assoc() to associate
578  * h/w checksum information.
579  */
580 static inline void
581 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
582     xge_hal_dtr_info_t *ext_info)
583 {
584 	int cksum_flags = 0;
585 
586 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
587 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
588 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
589 				cksum_flags |= HCK_IPV4_HDRCKSUM;
590 			}
591 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
592 				cksum_flags |= HCK_FULLCKSUM_OK;
593 			}
594 			if (cksum_flags) {
595 				cksum_flags |= HCK_FULLCKSUM;
596 				(void) hcksum_assoc(mp, NULL, NULL, 0,
597 				    0, 0, 0, cksum_flags, 0);
598 			}
599 		}
600 	} else if (ext_info->proto &
601 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
602 		/*
603 		 * Just pass the partial cksum up to IP.
604 		 */
605 		int ip_off = xgell_get_ip_offset(ext_info);
606 		int start, end = pkt_length - ip_off;
607 
608 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
609 			struct ip *ip =
610 			    (struct ip *)(vaddr + ip_off);
611 			start = ip->ip_hl * 4 + ip_off;
612 		} else {
613 			start = ip_off + 40;
614 		}
615 		cksum_flags |= HCK_PARTIALCKSUM;
616 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
617 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
618 		    0);
619 	}
620 }
621 
622 /*
623  * xgell_rx_1b_msg_alloc
624  *
625  * Allocate message header for data buffer, and decide if copy the packet to
626  * new data buffer to release big rx_buffer to save memory.
627  *
628  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
629  * new message and copy the payload in.
630  */
631 static mblk_t *
632 xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer,
633     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
634 {
635 	mblk_t *mp;
636 	mblk_t *nmp = NULL;
637 	char *vaddr;
638 	int hdr_length = 0;
639 
640 #ifdef XGELL_L3_ALIGNED
641 	boolean_t doalign = B_TRUE;
642 	struct ip *ip;
643 	struct tcphdr *tcp;
644 	int tcp_off;
645 	int mp_align_len;
646 	int ip_off;
647 #endif
648 
649 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
650 #ifdef XGELL_L3_ALIGNED
651 	ip_off = xgell_get_ip_offset(ext_info);
652 
653 	/* Check ip_off with HEADROOM */
654 	if ((ip_off & 3) == HEADROOM) {
655 		doalign = B_FALSE;
656 	}
657 
658 	/*
659 	 * Doalign? Check for types of packets.
660 	 */
661 	/* Is IPv4 or IPv6? */
662 	if (doalign && !(ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
663 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6)) {
664 		doalign = B_FALSE;
665 	}
666 
667 	/* Is TCP? */
668 	if (doalign &&
669 	    ((ip = (struct ip *)(vaddr + ip_off))->ip_p == IPPROTO_TCP)) {
670 		tcp_off = ip->ip_hl * 4 + ip_off;
671 		tcp = (struct tcphdr *)(vaddr + tcp_off);
672 		hdr_length = tcp_off + tcp->th_off * 4;
673 		if (pkt_length < (XGE_HAL_TCPIP_HEADER_MAX_SIZE +
674 		    XGE_HAL_MAC_HEADER_MAX_SIZE)) {
675 			hdr_length = pkt_length;
676 		}
677 	} else {
678 		doalign = B_FALSE;
679 	}
680 #endif
681 
682 	/*
683 	 * Copy packet into new allocated message buffer, if pkt_length
684 	 * is less than XGELL_RX_DMA_LOWAT
685 	 */
686 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
687 		/* Keep room for alignment */
688 		if ((mp = allocb(pkt_length + HEADROOM + 4, 0)) == NULL) {
689 			return (NULL);
690 		}
691 #ifdef XGELL_L3_ALIGNED
692 		if (doalign) {
693 			mp_align_len =
694 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
695 			mp->b_rptr += mp_align_len;
696 		}
697 #endif
698 		bcopy(vaddr, mp->b_rptr, pkt_length);
699 		mp->b_wptr = mp->b_rptr + pkt_length;
700 		*copyit = B_TRUE;
701 		return (mp);
702 	}
703 
704 	/*
705 	 * Just allocate mblk for current data buffer
706 	 */
707 	if ((nmp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
708 	    &rx_buffer->frtn)) == NULL) {
709 		/* Drop it */
710 		return (NULL);
711 	}
712 
713 	/*
714 	 * Adjust the b_rptr/b_wptr in the mblk_t structure to point to
715 	 * payload.
716 	 */
717 	nmp->b_rptr += hdr_length;
718 	nmp->b_wptr += pkt_length;
719 
720 #ifdef XGELL_L3_ALIGNED
721 	if (doalign) {
722 		if ((mp = esballoc(rx_buffer->header, hdr_length + 4, 0,
723 		    &header_frtn)) == NULL) {
724 			/* can not align! */
725 			mp = nmp;
726 			mp->b_rptr = (u8 *)vaddr;
727 			mp->b_wptr = mp->b_rptr + pkt_length;
728 			mp->b_next = NULL;
729 			mp->b_cont = NULL;
730 		} else {
731 			/* align packet's ip-header offset */
732 			mp_align_len =
733 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
734 			mp->b_rptr += mp_align_len;
735 			mp->b_wptr += mp_align_len + hdr_length;
736 			mp->b_cont = nmp;
737 			mp->b_next = NULL;
738 			nmp->b_cont = NULL;
739 			nmp->b_next = NULL;
740 
741 			bcopy(vaddr, mp->b_rptr, hdr_length);
742 		}
743 	} else {
744 		/* no need to align */
745 		mp = nmp;
746 		mp->b_next = NULL;
747 		mp->b_cont = NULL;
748 	}
749 #else
750 	mp = nmp;
751 	mp->b_next = NULL;
752 	mp->b_cont = NULL;
753 #endif
754 
755 	return (mp);
756 }
757 
758 /*
759  * xgell_rx_1b_compl
760  *
761  * If the interrupt is because of a received frame or if the receive ring
762  * contains fresh as yet un-processed frames, this function is called.
763  */
764 static xge_hal_status_e
765 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
766     void *userdata)
767 {
768 	xgelldev_t *lldev = ((xgell_ring_t *)userdata)->lldev;
769 	xgell_rx_buffer_t *rx_buffer;
770 	mblk_t *mp_head = NULL;
771 	mblk_t *mp_end  = NULL;
772 	int pkt_burst = 0;
773 
774 	mutex_enter(&lldev->bf_pool.pool_lock);
775 
776 	do {
777 		int pkt_length;
778 		dma_addr_t dma_data;
779 		mblk_t *mp;
780 		boolean_t copyit = B_FALSE;
781 
782 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
783 		    xge_hal_ring_dtr_private(channelh, dtr));
784 		xge_hal_dtr_info_t ext_info;
785 
786 		rx_buffer = rxd_priv->rx_buffer;
787 
788 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
789 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
790 
791 		xge_assert(dma_data == rx_buffer->dma_addr);
792 
793 		if (t_code != 0) {
794 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
795 			    " completed due to error t_code %01x", XGELL_IFNAME,
796 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
797 
798 			(void) xge_hal_device_handle_tcode(channelh, dtr,
799 			    t_code);
800 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
801 			xgell_rx_buffer_release(rx_buffer);
802 			continue;
803 		}
804 
805 		/*
806 		 * Sync the DMA memory
807 		 */
808 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
809 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
810 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
811 			    XGELL_IFNAME, lldev->instance);
812 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
813 			xgell_rx_buffer_release(rx_buffer);
814 			continue;
815 		}
816 
817 		/*
818 		 * Allocate message for the packet.
819 		 */
820 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
821 			copyit = B_TRUE;
822 		} else {
823 			copyit = B_FALSE;
824 		}
825 
826 		mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length,
827 		    &ext_info, &copyit);
828 
829 		xge_hal_ring_dtr_free(channelh, dtr);
830 
831 		/*
832 		 * Release the buffer and recycle it later
833 		 */
834 		if ((mp == NULL) || copyit) {
835 			xgell_rx_buffer_release(rx_buffer);
836 		} else {
837 			/*
838 			 * Count it since the buffer should be loaned up.
839 			 */
840 			lldev->bf_pool.post++;
841 		}
842 		if (mp == NULL) {
843 			xge_debug_ll(XGE_ERR,
844 			    "%s%d: rx: can not allocate mp mblk",
845 			    XGELL_IFNAME, lldev->instance);
846 			continue;
847 		}
848 
849 		/*
850 		 * Associate cksum_flags per packet type and h/w
851 		 * cksum flags.
852 		 */
853 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
854 		    HEADROOM, pkt_length, &ext_info);
855 
856 		if (mp_head == NULL) {
857 			mp_head = mp;
858 			mp_end = mp;
859 		} else {
860 			mp_end->b_next = mp;
861 			mp_end = mp;
862 		}
863 
864 		if (++pkt_burst < lldev->config.rx_pkt_burst)
865 			continue;
866 
867 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
868 			/* Replenish rx buffers */
869 			xgell_rx_buffer_replenish_all(lldev);
870 		}
871 		mutex_exit(&lldev->bf_pool.pool_lock);
872 		if (mp_head != NULL) {
873 			mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle,
874 			    mp_head);
875 		}
876 		mp_head = mp_end  = NULL;
877 		pkt_burst = 0;
878 		mutex_enter(&lldev->bf_pool.pool_lock);
879 
880 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
881 	    XGE_HAL_OK);
882 
883 	/*
884 	 * Always call replenish_all to recycle rx_buffers.
885 	 */
886 	xgell_rx_buffer_replenish_all(lldev);
887 	mutex_exit(&lldev->bf_pool.pool_lock);
888 
889 	if (mp_head != NULL) {
890 		mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
891 	}
892 
893 	return (XGE_HAL_OK);
894 }
895 
896 /*
897  * xgell_xmit_compl
898  *
899  * If an interrupt was raised to indicate DMA complete of the Tx packet,
900  * this function is called. It identifies the last TxD whose buffer was
901  * freed and frees all skbs whose data have already DMA'ed into the NICs
902  * internal memory.
903  */
904 static xge_hal_status_e
905 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
906     void *userdata)
907 {
908 	xgelldev_t *lldev = userdata;
909 
910 	do {
911 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
912 		    xge_hal_fifo_dtr_private(dtr));
913 		mblk_t *mp = txd_priv->mblk;
914 		int i;
915 
916 		if (t_code) {
917 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
918 			    " completed due to error t_code %01x", XGELL_IFNAME,
919 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
920 
921 			(void) xge_hal_device_handle_tcode(channelh, dtr,
922 			    t_code);
923 		}
924 
925 		for (i = 0; i < txd_priv->handle_cnt; i++) {
926 			xge_assert(txd_priv->dma_handles[i]);
927 			(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
928 			ddi_dma_free_handle(&txd_priv->dma_handles[i]);
929 			txd_priv->dma_handles[i] = 0;
930 		}
931 
932 		xge_hal_fifo_dtr_free(channelh, dtr);
933 
934 		freemsg(mp);
935 		lldev->resched_avail++;
936 
937 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
938 	    XGE_HAL_OK);
939 
940 	if (lldev->resched_retry &&
941 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
942 	    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
943 	    xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
944 		XGELL_IFNAME, lldev->instance,
945 		((xge_hal_channel_t *)lldev->fifo_channel)->post_qid);
946 		lldev->resched_send = lldev->resched_avail;
947 		lldev->resched_retry = 0;
948 	}
949 
950 	return (XGE_HAL_OK);
951 }
952 
953 /*
954  * xgell_send
955  * @hldev: pointer to xge_hal_device_t strucutre
956  * @mblk: pointer to network buffer, i.e. mblk_t structure
957  *
958  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
959  * A pointer to an M_DATA message that contains the packet is passed to
960  * this routine.
961  */
962 static boolean_t
963 xgell_send(xgelldev_t *lldev, mblk_t *mp)
964 {
965 	mblk_t *bp;
966 	boolean_t retry;
967 	xge_hal_device_t *hldev = lldev->devh;
968 	xge_hal_status_e status;
969 	xge_hal_dtr_h dtr;
970 	xgell_txd_priv_t *txd_priv;
971 	uint32_t hckflags;
972 	uint32_t mss;
973 	int handle_cnt, frag_cnt, ret, i, copied;
974 	boolean_t used_copy;
975 
976 _begin:
977 	retry = B_FALSE;
978 	handle_cnt = frag_cnt = 0;
979 
980 	if (!lldev->is_initialized || lldev->in_reset)
981 		return (B_FALSE);
982 
983 	/*
984 	 * If the free Tx dtrs count reaches the lower threshold,
985 	 * inform the gld to stop sending more packets till the free
986 	 * dtrs count exceeds higher threshold. Driver informs the
987 	 * gld through gld_sched call, when the free dtrs count exceeds
988 	 * the higher threshold.
989 	 */
990 	if (xge_hal_channel_dtr_count(lldev->fifo_channel)
991 	    <= XGELL_TX_LEVEL_LOW) {
992 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
993 		    "free descriptors count at low threshold %d",
994 		    XGELL_IFNAME, lldev->instance,
995 		    ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
996 		    XGELL_TX_LEVEL_LOW);
997 		retry = B_TRUE;
998 		goto _exit;
999 	}
1000 
1001 	status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr);
1002 	if (status != XGE_HAL_OK) {
1003 		switch (status) {
1004 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
1005 			xge_debug_ll(XGE_ERR,
1006 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
1007 			    lldev->instance,
1008 			    ((xge_hal_channel_t *)
1009 			    lldev->fifo_channel)->post_qid);
1010 			retry = B_TRUE;
1011 			goto _exit;
1012 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
1013 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
1014 			    " out of descriptors.", XGELL_IFNAME,
1015 			    lldev->instance,
1016 			    ((xge_hal_channel_t *)
1017 			    lldev->fifo_channel)->post_qid);
1018 			retry = B_TRUE;
1019 			goto _exit;
1020 		default:
1021 			return (B_FALSE);
1022 		}
1023 	}
1024 
1025 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1026 	txd_priv->mblk = mp;
1027 
1028 	/*
1029 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1030 	 * do insertion.
1031 	 *
1032 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1033 	 * is the other implementation for xge. The driver can simple bcopy()
1034 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1035 	 * automatically, since it's impossible that GLD sends down mp(s) with
1036 	 * splited ether_vlan_header.
1037 	 *
1038 	 * struct ether_vlan_header *evhp;
1039 	 * uint16_t tci;
1040 	 *
1041 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1042 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1043 	 *	tci = ntohs(evhp->ether_tci);
1044 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1045 	 *	    2 * ETHERADDRL);
1046 	 *	mp->b_rptr += VLAN_TAGSZ;
1047 	 *
1048 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1049 	 * }
1050 	 */
1051 
1052 	copied = 0;
1053 	used_copy = B_FALSE;
1054 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1055 		int mblen;
1056 		uint_t ncookies;
1057 		ddi_dma_cookie_t dma_cookie;
1058 		ddi_dma_handle_t dma_handle;
1059 
1060 		/* skip zero-length message blocks */
1061 		mblen = MBLKL(bp);
1062 		if (mblen == 0) {
1063 			continue;
1064 		}
1065 
1066 		/*
1067 		 * Check the message length to decide to DMA or bcopy() data
1068 		 * to tx descriptor(s).
1069 		 */
1070 		if (mblen < lldev->config.tx_dma_lowat &&
1071 		    (copied + mblen) < lldev->tx_copied_max) {
1072 			xge_hal_status_e rc;
1073 			rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel,
1074 			    dtr, bp->b_rptr, mblen);
1075 			if (rc == XGE_HAL_OK) {
1076 				used_copy = B_TRUE;
1077 				copied += mblen;
1078 				continue;
1079 			} else if (used_copy) {
1080 				xge_hal_fifo_dtr_buffer_finalize(
1081 					lldev->fifo_channel, dtr, frag_cnt++);
1082 				used_copy = B_FALSE;
1083 			}
1084 		} else if (used_copy) {
1085 			xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel,
1086 			    dtr, frag_cnt++);
1087 			used_copy = B_FALSE;
1088 		}
1089 
1090 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1091 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1092 		if (ret != DDI_SUCCESS) {
1093 			xge_debug_ll(XGE_ERR,
1094 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1095 			    lldev->instance);
1096 			goto _exit_cleanup;
1097 		}
1098 
1099 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1100 		    (caddr_t)bp->b_rptr, mblen,
1101 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1102 		    &dma_cookie, &ncookies);
1103 
1104 		switch (ret) {
1105 		case DDI_DMA_MAPPED:
1106 			/* everything's fine */
1107 			break;
1108 
1109 		case DDI_DMA_NORESOURCES:
1110 			xge_debug_ll(XGE_ERR,
1111 			    "%s%d: can not bind dma address",
1112 			    XGELL_IFNAME, lldev->instance);
1113 			ddi_dma_free_handle(&dma_handle);
1114 			goto _exit_cleanup;
1115 
1116 		case DDI_DMA_NOMAPPING:
1117 		case DDI_DMA_INUSE:
1118 		case DDI_DMA_TOOBIG:
1119 		default:
1120 			/* drop packet, don't retry */
1121 			xge_debug_ll(XGE_ERR,
1122 			    "%s%d: can not map message buffer",
1123 			    XGELL_IFNAME, lldev->instance);
1124 			ddi_dma_free_handle(&dma_handle);
1125 			goto _exit_cleanup;
1126 		}
1127 
1128 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1129 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1130 			    "requested c:%d+f:%d", XGELL_IFNAME,
1131 			    lldev->instance, ncookies, frag_cnt);
1132 			(void) ddi_dma_unbind_handle(dma_handle);
1133 			ddi_dma_free_handle(&dma_handle);
1134 			goto _exit_cleanup;
1135 		}
1136 
1137 		/* setup the descriptors for this data buffer */
1138 		while (ncookies) {
1139 			xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr,
1140 			    frag_cnt++, dma_cookie.dmac_laddress,
1141 			    dma_cookie.dmac_size);
1142 			if (--ncookies) {
1143 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1144 			}
1145 
1146 		}
1147 
1148 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1149 
1150 		if (bp->b_cont &&
1151 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1152 			hldev->config.fifo.max_frags)) {
1153 			mblk_t *nmp;
1154 
1155 			xge_debug_ll(XGE_TRACE,
1156 			    "too many FRAGs [%d], pull up them", frag_cnt);
1157 
1158 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1159 				/* Drop packet, don't retry */
1160 				xge_debug_ll(XGE_ERR,
1161 				    "%s%d: can not pullup message buffer",
1162 				    XGELL_IFNAME, lldev->instance);
1163 				goto _exit_cleanup;
1164 			}
1165 			freemsg(bp->b_cont);
1166 			bp->b_cont = nmp;
1167 		}
1168 	}
1169 
1170 	/* finalize unfinished copies */
1171 	if (used_copy) {
1172 		xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr,
1173 		    frag_cnt++);
1174 	}
1175 
1176 	txd_priv->handle_cnt = handle_cnt;
1177 
1178 	/*
1179 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1180 	 * do all necessary work.
1181 	 */
1182 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags);
1183 	if ((hckflags & HW_LSO) && (mss != 0)) {
1184 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1185 	}
1186 
1187 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1188 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1189 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1190 	}
1191 	if (hckflags & HCK_FULLCKSUM) {
1192 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1193 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1194 	}
1195 
1196 	xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr);
1197 
1198 	return (B_TRUE);
1199 
1200 _exit_cleanup:
1201 
1202 	for (i = 0; i < handle_cnt; i++) {
1203 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1204 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1205 		txd_priv->dma_handles[i] = 0;
1206 	}
1207 
1208 	xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
1209 
1210 _exit:
1211 	if (retry) {
1212 		if (lldev->resched_avail != lldev->resched_send &&
1213 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1214 		    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
1215 			lldev->resched_send = lldev->resched_avail;
1216 			return (B_FALSE);
1217 		} else {
1218 			lldev->resched_retry = 1;
1219 		}
1220 	}
1221 
1222 	freemsg(mp);
1223 	return (B_TRUE);
1224 }
1225 
1226 /*
1227  * xge_m_tx
1228  * @arg: pointer to the xgelldev_t structure
1229  * @resid: resource id
1230  * @mp: pointer to the message buffer
1231  *
1232  * Called by MAC Layer to send a chain of packets
1233  */
1234 static mblk_t *
1235 xgell_m_tx(void *arg, mblk_t *mp)
1236 {
1237 	xgelldev_t *lldev = arg;
1238 	mblk_t *next;
1239 
1240 	while (mp != NULL) {
1241 		next = mp->b_next;
1242 		mp->b_next = NULL;
1243 
1244 		if (!xgell_send(lldev, mp)) {
1245 			mp->b_next = next;
1246 			break;
1247 		}
1248 		mp = next;
1249 	}
1250 
1251 	return (mp);
1252 }
1253 
1254 /*
1255  * xgell_rx_dtr_term
1256  *
1257  * Function will be called by HAL to terminate all DTRs for
1258  * Ring(s) type of channels.
1259  */
1260 static void
1261 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1262     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1263 {
1264 	xgell_rxd_priv_t *rxd_priv =
1265 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1266 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1267 
1268 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1269 		xgelldev_t *lldev = rx_buffer->lldev;
1270 
1271 		mutex_enter(&lldev->bf_pool.pool_lock);
1272 		xge_hal_ring_dtr_free(channelh, dtrh);
1273 		xgell_rx_buffer_release(rx_buffer);
1274 		mutex_exit(&lldev->bf_pool.pool_lock);
1275 	}
1276 }
1277 
1278 /*
1279  * xgell_tx_term
1280  *
1281  * Function will be called by HAL to terminate all DTRs for
1282  * Fifo(s) type of channels.
1283  */
1284 static void
1285 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1286     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1287 {
1288 	xgell_txd_priv_t *txd_priv =
1289 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1290 	mblk_t *mp = txd_priv->mblk;
1291 	int i;
1292 
1293 	/*
1294 	 * for Tx we must clean up the DTR *only* if it has been
1295 	 * posted!
1296 	 */
1297 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1298 		return;
1299 	}
1300 
1301 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1302 		xge_assert(txd_priv->dma_handles[i]);
1303 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1304 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1305 		txd_priv->dma_handles[i] = 0;
1306 	}
1307 
1308 	xge_hal_fifo_dtr_free(channelh, dtrh);
1309 
1310 	freemsg(mp);
1311 }
1312 
1313 /*
1314  * xgell_tx_open
1315  * @lldev: the link layer object
1316  *
1317  * Initialize and open all Tx channels;
1318  */
1319 static boolean_t
1320 xgell_tx_open(xgelldev_t *lldev)
1321 {
1322 	xge_hal_status_e status;
1323 	u64 adapter_status;
1324 	xge_hal_channel_attr_t attr;
1325 
1326 	attr.post_qid		= 0;
1327 	attr.compl_qid		= 0;
1328 	attr.callback		= xgell_xmit_compl;
1329 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1330 	attr.flags		= 0;
1331 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1332 	attr.userdata		= lldev;
1333 	attr.dtr_init		= NULL;
1334 	attr.dtr_term		= xgell_tx_term;
1335 
1336 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1337 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1338 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1339 		    lldev->instance, (uint64_t)adapter_status);
1340 		return (B_FALSE);
1341 	}
1342 
1343 	status = xge_hal_channel_open(lldev->devh, &attr,
1344 	    &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL);
1345 	if (status != XGE_HAL_OK) {
1346 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1347 		    "got status code %d", XGELL_IFNAME,
1348 		    lldev->instance, status);
1349 		return (B_FALSE);
1350 	}
1351 
1352 	return (B_TRUE);
1353 }
1354 
1355 /*
1356  * xgell_rx_open
1357  * @lldev: the link layer object
1358  *
1359  * Initialize and open all Rx channels;
1360  */
1361 static boolean_t
1362 xgell_rx_open(xgelldev_t *lldev)
1363 {
1364 	xge_hal_status_e status;
1365 	u64 adapter_status;
1366 	xge_hal_channel_attr_t attr;
1367 
1368 	attr.post_qid		= XGELL_RING_MAIN_QID;
1369 	attr.compl_qid		= 0;
1370 	attr.callback		= xgell_rx_1b_compl;
1371 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1372 	attr.flags		= 0;
1373 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1374 	attr.dtr_init		= xgell_rx_dtr_replenish;
1375 	attr.dtr_term		= xgell_rx_dtr_term;
1376 
1377 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1378 		xge_debug_ll(XGE_ERR,
1379 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1380 		    XGELL_IFNAME, lldev->instance,
1381 		    (uint64_t)adapter_status);
1382 		return (B_FALSE);
1383 	}
1384 
1385 	lldev->ring_main.lldev = lldev;
1386 	attr.userdata = &lldev->ring_main;
1387 
1388 	status = xge_hal_channel_open(lldev->devh, &attr,
1389 	    &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1390 	if (status != XGE_HAL_OK) {
1391 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1392 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1393 		return (B_FALSE);
1394 	}
1395 
1396 	return (B_TRUE);
1397 }
1398 
1399 static int
1400 xgell_initiate_start(xgelldev_t *lldev)
1401 {
1402 	xge_hal_status_e status;
1403 	xge_hal_device_t *hldev = lldev->devh;
1404 	int maxpkt = hldev->config.mtu;
1405 
1406 	/* check initial mtu before enabling the device */
1407 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1408 	if (status != XGE_HAL_OK) {
1409 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1410 		    XGELL_IFNAME, lldev->instance, maxpkt);
1411 		return (EINVAL);
1412 	}
1413 
1414 	/* set initial mtu before enabling the device */
1415 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1416 	if (status != XGE_HAL_OK) {
1417 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1418 		    XGELL_IFNAME, lldev->instance, maxpkt);
1419 		return (EIO);
1420 	}
1421 
1422 	/* tune jumbo/normal frame UFC counters */
1423 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \
1424 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1425 			XGE_HAL_DEFAULT_RX_UFC_B_J :
1426 			XGE_HAL_DEFAULT_RX_UFC_B_N;
1427 
1428 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \
1429 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1430 			XGE_HAL_DEFAULT_RX_UFC_C_J :
1431 			XGE_HAL_DEFAULT_RX_UFC_C_N;
1432 
1433 	/* now, enable the device */
1434 	status = xge_hal_device_enable(lldev->devh);
1435 	if (status != XGE_HAL_OK) {
1436 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1437 		    XGELL_IFNAME, lldev->instance);
1438 		return (EIO);
1439 	}
1440 
1441 	if (!xgell_rx_open(lldev)) {
1442 		status = xge_hal_device_disable(lldev->devh);
1443 		if (status != XGE_HAL_OK) {
1444 			u64 adapter_status;
1445 			(void) xge_hal_device_status(lldev->devh,
1446 			    &adapter_status);
1447 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1448 			    "the device. adaper status 0x%"PRIx64
1449 			    " returned status %d",
1450 			    XGELL_IFNAME, lldev->instance,
1451 			    (uint64_t)adapter_status, status);
1452 		}
1453 		xge_os_mdelay(1500);
1454 		return (ENOMEM);
1455 	}
1456 
1457 	if (!xgell_tx_open(lldev)) {
1458 		status = xge_hal_device_disable(lldev->devh);
1459 		if (status != XGE_HAL_OK) {
1460 			u64 adapter_status;
1461 			(void) xge_hal_device_status(lldev->devh,
1462 			    &adapter_status);
1463 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1464 			    "the device. adaper status 0x%"PRIx64
1465 			    " returned status %d",
1466 			    XGELL_IFNAME, lldev->instance,
1467 			    (uint64_t)adapter_status, status);
1468 		}
1469 		xge_os_mdelay(1500);
1470 		xge_hal_channel_close(lldev->ring_main.channelh,
1471 		    XGE_HAL_CHANNEL_OC_NORMAL);
1472 		return (ENOMEM);
1473 	}
1474 
1475 	/* time to enable interrupts */
1476 	xge_hal_device_intr_enable(lldev->devh);
1477 
1478 	lldev->is_initialized = 1;
1479 
1480 	return (0);
1481 }
1482 
1483 static void
1484 xgell_initiate_stop(xgelldev_t *lldev)
1485 {
1486 	xge_hal_status_e status;
1487 
1488 	lldev->is_initialized = 0;
1489 
1490 	status = xge_hal_device_disable(lldev->devh);
1491 	if (status != XGE_HAL_OK) {
1492 		u64 adapter_status;
1493 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1494 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1495 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1496 		    XGELL_IFNAME, lldev->instance,
1497 		    (uint64_t)adapter_status, status);
1498 	}
1499 	xge_hal_device_intr_disable(lldev->devh);
1500 
1501 	xge_debug_ll(XGE_TRACE, "%s",
1502 	    "waiting for device irq to become quiescent...");
1503 	xge_os_mdelay(1500);
1504 
1505 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1506 
1507 	xge_hal_channel_close(lldev->ring_main.channelh,
1508 	    XGE_HAL_CHANNEL_OC_NORMAL);
1509 
1510 	xge_hal_channel_close(lldev->fifo_channel,
1511 	    XGE_HAL_CHANNEL_OC_NORMAL);
1512 }
1513 
1514 /*
1515  * xgell_m_start
1516  * @arg: pointer to device private strucutre(hldev)
1517  *
1518  * This function is called by MAC Layer to enable the XFRAME
1519  * firmware to generate interrupts and also prepare the
1520  * driver to call mac_rx for delivering receive packets
1521  * to MAC Layer.
1522  */
1523 static int
1524 xgell_m_start(void *arg)
1525 {
1526 	xgelldev_t *lldev = arg;
1527 	xge_hal_device_t *hldev = lldev->devh;
1528 	int ret;
1529 
1530 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1531 	    lldev->instance);
1532 
1533 	mutex_enter(&lldev->genlock);
1534 
1535 	if (lldev->is_initialized) {
1536 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1537 		    XGELL_IFNAME, lldev->instance);
1538 		mutex_exit(&lldev->genlock);
1539 		return (EINVAL);
1540 	}
1541 
1542 	hldev->terminating = 0;
1543 	if (ret = xgell_initiate_start(lldev)) {
1544 		mutex_exit(&lldev->genlock);
1545 		return (ret);
1546 	}
1547 
1548 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1549 
1550 	mutex_exit(&lldev->genlock);
1551 
1552 	return (0);
1553 }
1554 
1555 /*
1556  * xgell_m_stop
1557  * @arg: pointer to device private data (hldev)
1558  *
1559  * This function is called by the MAC Layer to disable
1560  * the XFRAME firmware for generating any interrupts and
1561  * also stop the driver from calling mac_rx() for
1562  * delivering data packets to the MAC Layer.
1563  */
1564 static void
1565 xgell_m_stop(void *arg)
1566 {
1567 	xgelldev_t *lldev = arg;
1568 	xge_hal_device_t *hldev = lldev->devh;
1569 
1570 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1571 
1572 	mutex_enter(&lldev->genlock);
1573 	if (!lldev->is_initialized) {
1574 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1575 		mutex_exit(&lldev->genlock);
1576 		return;
1577 	}
1578 
1579 	xge_hal_device_terminating(hldev);
1580 	xgell_initiate_stop(lldev);
1581 
1582 	/* reset device */
1583 	(void) xge_hal_device_reset(lldev->devh);
1584 
1585 	mutex_exit(&lldev->genlock);
1586 
1587 	if (lldev->timeout_id != 0) {
1588 		(void) untimeout(lldev->timeout_id);
1589 	}
1590 
1591 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1592 }
1593 
1594 /*
1595  * xgell_onerr_reset
1596  * @lldev: pointer to xgelldev_t structure
1597  *
1598  * This function is called by HAL Event framework to reset the HW
1599  * This function is must be called with genlock taken.
1600  */
1601 int
1602 xgell_onerr_reset(xgelldev_t *lldev)
1603 {
1604 	int rc = 0;
1605 
1606 	if (!lldev->is_initialized) {
1607 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1608 		    XGELL_IFNAME, lldev->instance);
1609 		return (rc);
1610 	}
1611 
1612 	lldev->in_reset = 1;
1613 	xgell_initiate_stop(lldev);
1614 
1615 	/* reset device */
1616 	(void) xge_hal_device_reset(lldev->devh);
1617 
1618 	rc = xgell_initiate_start(lldev);
1619 	lldev->in_reset = 0;
1620 
1621 	return (rc);
1622 }
1623 
1624 
1625 /*
1626  * xgell_m_unicst
1627  * @arg: pointer to device private strucutre(hldev)
1628  * @mac_addr:
1629  *
1630  * This function is called by MAC Layer to set the physical address
1631  * of the XFRAME firmware.
1632  */
1633 static int
1634 xgell_m_unicst(void *arg, const uint8_t *macaddr)
1635 {
1636 	xge_hal_status_e status;
1637 	xgelldev_t *lldev = (xgelldev_t *)arg;
1638 	xge_hal_device_t *hldev = lldev->devh;
1639 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1640 
1641 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1642 
1643 	mutex_enter(&lldev->genlock);
1644 
1645 	xge_debug_ll(XGE_TRACE,
1646 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1647 	    macaddr[0], macaddr[1], macaddr[2],
1648 	    macaddr[3], macaddr[4], macaddr[5]);
1649 
1650 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1651 	if (status != XGE_HAL_OK) {
1652 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1653 		    XGELL_IFNAME, lldev->instance);
1654 		mutex_exit(&lldev->genlock);
1655 		return (EIO);
1656 	}
1657 
1658 	mutex_exit(&lldev->genlock);
1659 
1660 	return (0);
1661 }
1662 
1663 
1664 /*
1665  * xgell_m_multicst
1666  * @arg: pointer to device private strucutre(hldev)
1667  * @add:
1668  * @mc_addr:
1669  *
1670  * This function is called by MAC Layer to enable or
1671  * disable device-level reception of specific multicast addresses.
1672  */
1673 static int
1674 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1675 {
1676 	xge_hal_status_e status;
1677 	xgelldev_t *lldev = (xgelldev_t *)arg;
1678 	xge_hal_device_t *hldev = lldev->devh;
1679 
1680 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1681 
1682 	mutex_enter(&lldev->genlock);
1683 
1684 	if (!lldev->is_initialized) {
1685 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1686 		    XGELL_IFNAME, lldev->instance);
1687 		mutex_exit(&lldev->genlock);
1688 		return (EIO);
1689 	}
1690 
1691 	/* FIXME: missing HAL functionality: enable_one() */
1692 
1693 	status = (add) ?
1694 	    xge_hal_device_mcast_enable(hldev) :
1695 	    xge_hal_device_mcast_disable(hldev);
1696 
1697 	if (status != XGE_HAL_OK) {
1698 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1699 		    add ? "enable" : "disable", status);
1700 		mutex_exit(&lldev->genlock);
1701 		return (EIO);
1702 	}
1703 
1704 	mutex_exit(&lldev->genlock);
1705 
1706 	return (0);
1707 }
1708 
1709 
1710 /*
1711  * xgell_m_promisc
1712  * @arg: pointer to device private strucutre(hldev)
1713  * @on:
1714  *
1715  * This function is called by MAC Layer to enable or
1716  * disable the reception of all the packets on the medium
1717  */
1718 static int
1719 xgell_m_promisc(void *arg, boolean_t on)
1720 {
1721 	xgelldev_t *lldev = (xgelldev_t *)arg;
1722 	xge_hal_device_t *hldev = lldev->devh;
1723 
1724 	mutex_enter(&lldev->genlock);
1725 
1726 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1727 
1728 	if (!lldev->is_initialized) {
1729 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1730 		    XGELL_IFNAME, lldev->instance);
1731 		mutex_exit(&lldev->genlock);
1732 		return (EIO);
1733 	}
1734 
1735 	if (on) {
1736 		xge_hal_device_promisc_enable(hldev);
1737 	} else {
1738 		xge_hal_device_promisc_disable(hldev);
1739 	}
1740 
1741 	mutex_exit(&lldev->genlock);
1742 
1743 	return (0);
1744 }
1745 
1746 /*
1747  * xgell_m_stat
1748  * @arg: pointer to device private strucutre(hldev)
1749  *
1750  * This function is called by MAC Layer to get network statistics
1751  * from the driver.
1752  */
1753 static int
1754 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
1755 {
1756 	xge_hal_stats_hw_info_t *hw_info;
1757 	xgelldev_t *lldev = (xgelldev_t *)arg;
1758 	xge_hal_device_t *hldev = lldev->devh;
1759 
1760 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1761 
1762 	if (!mutex_tryenter(&lldev->genlock))
1763 		return (EAGAIN);
1764 
1765 	if (!lldev->is_initialized) {
1766 		mutex_exit(&lldev->genlock);
1767 		return (EAGAIN);
1768 	}
1769 
1770 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1771 		mutex_exit(&lldev->genlock);
1772 		return (EAGAIN);
1773 	}
1774 
1775 	switch (stat) {
1776 	case MAC_STAT_IFSPEED:
1777 		*val = 10000000000ull; /* 10G */
1778 		break;
1779 
1780 	case MAC_STAT_MULTIRCV:
1781 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
1782 		    hw_info->rmac_vld_mcst_frms;
1783 		break;
1784 
1785 	case MAC_STAT_BRDCSTRCV:
1786 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
1787 		    hw_info->rmac_vld_bcst_frms;
1788 		break;
1789 
1790 	case MAC_STAT_MULTIXMT:
1791 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
1792 		    hw_info->tmac_mcst_frms;
1793 		break;
1794 
1795 	case MAC_STAT_BRDCSTXMT:
1796 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
1797 		    hw_info->tmac_bcst_frms;
1798 		break;
1799 
1800 	case MAC_STAT_RBYTES:
1801 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
1802 		    hw_info->rmac_ttl_octets;
1803 		break;
1804 
1805 	case MAC_STAT_NORCVBUF:
1806 		*val = hw_info->rmac_drop_frms;
1807 		break;
1808 
1809 	case MAC_STAT_IERRORS:
1810 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
1811 		    hw_info->rmac_discarded_frms;
1812 		break;
1813 
1814 	case MAC_STAT_OBYTES:
1815 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
1816 		    hw_info->tmac_ttl_octets;
1817 		break;
1818 
1819 	case MAC_STAT_NOXMTBUF:
1820 		*val = hw_info->tmac_drop_frms;
1821 		break;
1822 
1823 	case MAC_STAT_OERRORS:
1824 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
1825 		    hw_info->tmac_any_err_frms;
1826 		break;
1827 
1828 	case MAC_STAT_IPACKETS:
1829 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
1830 		    hw_info->rmac_vld_frms;
1831 		break;
1832 
1833 	case MAC_STAT_OPACKETS:
1834 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
1835 		    hw_info->tmac_frms;
1836 		break;
1837 
1838 	case ETHER_STAT_FCS_ERRORS:
1839 		*val = hw_info->rmac_fcs_err_frms;
1840 		break;
1841 
1842 	case ETHER_STAT_TOOLONG_ERRORS:
1843 		*val = hw_info->rmac_long_frms;
1844 		break;
1845 
1846 	case ETHER_STAT_LINK_DUPLEX:
1847 		*val = LINK_DUPLEX_FULL;
1848 		break;
1849 
1850 	default:
1851 		mutex_exit(&lldev->genlock);
1852 		return (ENOTSUP);
1853 	}
1854 
1855 	mutex_exit(&lldev->genlock);
1856 
1857 	return (0);
1858 }
1859 
1860 /*
1861  * xgell_device_alloc - Allocate new LL device
1862  */
1863 int
1864 xgell_device_alloc(xge_hal_device_h devh,
1865     dev_info_t *dev_info, xgelldev_t **lldev_out)
1866 {
1867 	xgelldev_t *lldev;
1868 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1869 	int instance = ddi_get_instance(dev_info);
1870 
1871 	*lldev_out = NULL;
1872 
1873 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1874 	    XGELL_IFNAME, instance);
1875 
1876 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1877 
1878 	lldev->devh = hldev;
1879 	lldev->instance = instance;
1880 	lldev->dev_info = dev_info;
1881 
1882 	*lldev_out = lldev;
1883 
1884 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1885 
1886 	return (DDI_SUCCESS);
1887 }
1888 
1889 /*
1890  * xgell_device_free
1891  */
1892 void
1893 xgell_device_free(xgelldev_t *lldev)
1894 {
1895 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1896 	    XGELL_IFNAME, lldev->instance);
1897 
1898 	kmem_free(lldev, sizeof (xgelldev_t));
1899 }
1900 
1901 /*
1902  * xgell_ioctl
1903  */
1904 static void
1905 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1906 {
1907 	xgelldev_t *lldev = arg;
1908 	struct iocblk *iocp;
1909 	int err = 0;
1910 	int cmd;
1911 	int need_privilege = 1;
1912 	int ret = 0;
1913 
1914 
1915 	iocp = (struct iocblk *)mp->b_rptr;
1916 	iocp->ioc_error = 0;
1917 	cmd = iocp->ioc_cmd;
1918 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1919 	switch (cmd) {
1920 	case ND_GET:
1921 		need_privilege = 0;
1922 		/* FALLTHRU */
1923 	case ND_SET:
1924 		break;
1925 	default:
1926 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1927 		miocnak(wq, mp, 0, EINVAL);
1928 		return;
1929 	}
1930 
1931 	if (need_privilege) {
1932 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1933 		if (err != 0) {
1934 			xge_debug_ll(XGE_ERR,
1935 			    "drv_priv(): rejected cmd 0x%x, err %d",
1936 			    cmd, err);
1937 			miocnak(wq, mp, 0, err);
1938 			return;
1939 		}
1940 	}
1941 
1942 	switch (cmd) {
1943 	case ND_GET:
1944 		/*
1945 		 * If nd_getset() returns B_FALSE, the command was
1946 		 * not valid (e.g. unknown name), so we just tell the
1947 		 * top-level ioctl code to send a NAK (with code EINVAL).
1948 		 *
1949 		 * Otherwise, nd_getset() will have built the reply to
1950 		 * be sent (but not actually sent it), so we tell the
1951 		 * caller to send the prepared reply.
1952 		 */
1953 		ret = nd_getset(wq, lldev->ndp, mp);
1954 		xge_debug_ll(XGE_TRACE, "got ndd get ioctl");
1955 		break;
1956 
1957 	case ND_SET:
1958 		ret = nd_getset(wq, lldev->ndp, mp);
1959 		xge_debug_ll(XGE_TRACE, "got ndd set ioctl");
1960 		break;
1961 
1962 	default:
1963 		break;
1964 	}
1965 
1966 	if (ret == B_FALSE) {
1967 		xge_debug_ll(XGE_ERR,
1968 		    "nd_getset(): rejected cmd 0x%x, err %d",
1969 		    cmd, err);
1970 		miocnak(wq, mp, 0, EINVAL);
1971 	} else {
1972 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1973 		    M_IOCACK : M_IOCNAK;
1974 		qreply(wq, mp);
1975 	}
1976 }
1977 
1978 /* ARGSUSED */
1979 static boolean_t
1980 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1981 {
1982 	xgelldev_t *lldev = arg;
1983 
1984 	switch (cap) {
1985 	case MAC_CAPAB_HCKSUM: {
1986 		uint32_t *hcksum_txflags = cap_data;
1987 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
1988 		    HCKSUM_IPHDRCKSUM;
1989 		break;
1990 	}
1991 	case MAC_CAPAB_LSO: {
1992 		mac_capab_lso_t *cap_lso = cap_data;
1993 
1994 		if (lldev->config.lso_enable) {
1995 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1996 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
1997 			break;
1998 		} else {
1999 			return (B_FALSE);
2000 		}
2001 	}
2002 	default:
2003 		return (B_FALSE);
2004 	}
2005 	return (B_TRUE);
2006 }
2007 
2008 static int
2009 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2010 {
2011 	xgelldev_t *lldev = (xgelldev_t *)cp;
2012 	xge_hal_status_e status;
2013 	int count = 0, retsize;
2014 	char *buf;
2015 
2016 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2017 	if (buf == NULL) {
2018 		return (ENOSPC);
2019 	}
2020 
2021 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2022 	    buf, &retsize);
2023 	if (status != XGE_HAL_OK) {
2024 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2025 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2026 		return (EINVAL);
2027 	}
2028 	count += retsize;
2029 
2030 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2031 	    XGELL_STATS_BUFSIZE - count,
2032 	    buf+count, &retsize);
2033 	if (status != XGE_HAL_OK) {
2034 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2035 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2036 		return (EINVAL);
2037 	}
2038 	count += retsize;
2039 
2040 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2041 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2042 	if (status != XGE_HAL_OK) {
2043 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2044 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2045 		return (EINVAL);
2046 	}
2047 	count += retsize;
2048 
2049 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2050 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2051 	if (status != XGE_HAL_OK) {
2052 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2053 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2054 		return (EINVAL);
2055 	}
2056 	count += retsize;
2057 
2058 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2059 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2060 	if (status != XGE_HAL_OK) {
2061 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2062 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2063 		return (EINVAL);
2064 	}
2065 	count += retsize;
2066 
2067 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2068 	(void) mi_mpprintf(mp, "%s", buf);
2069 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2070 
2071 	return (0);
2072 }
2073 
2074 static int
2075 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2076 {
2077 	xgelldev_t *lldev = (xgelldev_t *)cp;
2078 	xge_hal_status_e status;
2079 	int retsize;
2080 	char *buf;
2081 
2082 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2083 	if (buf == NULL) {
2084 		return (ENOSPC);
2085 	}
2086 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2087 	    buf, &retsize);
2088 	if (status != XGE_HAL_OK) {
2089 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2090 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2091 		return (EINVAL);
2092 	}
2093 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2094 	(void) mi_mpprintf(mp, "%s", buf);
2095 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2096 
2097 	return (0);
2098 }
2099 
2100 static int
2101 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2102 {
2103 	xgelldev_t *lldev = (xgelldev_t *)cp;
2104 	xge_hal_status_e status;
2105 	int retsize;
2106 	char *buf;
2107 
2108 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2109 	if (buf == NULL) {
2110 		return (ENOSPC);
2111 	}
2112 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2113 	    buf, &retsize);
2114 	if (status != XGE_HAL_OK) {
2115 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2116 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2117 		return (EINVAL);
2118 	}
2119 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2120 	(void) mi_mpprintf(mp, "%s", buf);
2121 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2122 
2123 	return (0);
2124 }
2125 
2126 static unsigned long bar0_offset = 0x110; /* adapter_control */
2127 
2128 static int
2129 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2130 {
2131 	xgelldev_t *lldev = (xgelldev_t *)cp;
2132 	xge_hal_status_e status;
2133 	int retsize;
2134 	char *buf;
2135 
2136 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2137 	if (buf == NULL) {
2138 		return (ENOSPC);
2139 	}
2140 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2141 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2142 	if (status != XGE_HAL_OK) {
2143 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2144 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2145 		return (EINVAL);
2146 	}
2147 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2148 	(void) mi_mpprintf(mp, "%s", buf);
2149 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2150 
2151 	return (0);
2152 }
2153 
2154 static int
2155 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2156 {
2157 	unsigned long old_offset = bar0_offset;
2158 	char *end;
2159 
2160 	if (value && *value == '0' &&
2161 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2162 		value += 2;
2163 	}
2164 
2165 	bar0_offset = mi_strtol(value, &end, 16);
2166 	if (end == value) {
2167 		bar0_offset = old_offset;
2168 		return (EINVAL);
2169 	}
2170 
2171 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2172 
2173 	return (0);
2174 }
2175 
2176 static int
2177 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2178 {
2179 	char *buf;
2180 
2181 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2182 	if (buf == NULL) {
2183 		return (ENOSPC);
2184 	}
2185 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2186 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2187 
2188 	return (0);
2189 }
2190 
2191 static int
2192 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2193     cred_t *credp)
2194 {
2195 	int level;
2196 	char *end;
2197 
2198 	level = mi_strtol(value, &end, 10);
2199 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2200 		return (EINVAL);
2201 	}
2202 
2203 	xge_hal_driver_debug_level_set(level);
2204 
2205 	return (0);
2206 }
2207 
2208 static int
2209 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2210 {
2211 	char *buf;
2212 
2213 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2214 	if (buf == NULL) {
2215 		return (ENOSPC);
2216 	}
2217 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2218 	    xge_hal_driver_debug_module_mask());
2219 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2220 
2221 	return (0);
2222 }
2223 
2224 static int
2225 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2226 			    cred_t *credp)
2227 {
2228 	u32 mask;
2229 	char *end;
2230 
2231 	if (value && *value == '0' &&
2232 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2233 		value += 2;
2234 	}
2235 
2236 	mask = mi_strtol(value, &end, 16);
2237 	if (end == value) {
2238 		return (EINVAL);
2239 	}
2240 
2241 	xge_hal_driver_debug_module_mask_set(mask);
2242 
2243 	return (0);
2244 }
2245 
2246 static int
2247 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2248 {
2249 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2250 	xge_hal_status_e status;
2251 	int retsize;
2252 	char *buf;
2253 
2254 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2255 	if (buf == NULL) {
2256 		return (ENOSPC);
2257 	}
2258 	status = xge_hal_aux_device_config_read(lldev->devh,
2259 						XGELL_DEVCONF_BUFSIZE,
2260 						buf, &retsize);
2261 	if (status != XGE_HAL_OK) {
2262 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2263 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2264 		    status);
2265 		return (EINVAL);
2266 	}
2267 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2268 	(void) mi_mpprintf(mp, "%s", buf);
2269 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2270 
2271 	return (0);
2272 }
2273 
2274 /*
2275  * xgell_device_register
2276  * @devh: pointer on HAL device
2277  * @config: pointer on this network device configuration
2278  * @ll_out: output pointer. Will be assigned to valid LL device.
2279  *
2280  * This function will allocate and register network device
2281  */
2282 int
2283 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2284 {
2285 	mac_register_t *macp = NULL;
2286 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2287 
2288 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2289 	    (caddr_t)lldev) == B_FALSE)
2290 		goto xgell_ndd_fail;
2291 
2292 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2293 	    (caddr_t)lldev) == B_FALSE)
2294 		goto xgell_ndd_fail;
2295 
2296 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2297 	    (caddr_t)lldev) == B_FALSE)
2298 		goto xgell_ndd_fail;
2299 
2300 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2301 	    (caddr_t)lldev) == B_FALSE)
2302 		goto xgell_ndd_fail;
2303 
2304 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2305 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2306 		goto xgell_ndd_fail;
2307 
2308 	if (nd_load(&lldev->ndp, "debug_module_mask",
2309 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2310 	    (caddr_t)lldev) == B_FALSE)
2311 		goto xgell_ndd_fail;
2312 
2313 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2314 	    (caddr_t)lldev) == B_FALSE)
2315 		goto xgell_ndd_fail;
2316 
2317 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2318 
2319 	if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) {
2320 		nd_free(&lldev->ndp);
2321 		xge_debug_ll(XGE_ERR, "unable to create RX buffer pool");
2322 		return (DDI_FAILURE);
2323 	}
2324 
2325 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh);
2326 
2327 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2328 		goto xgell_register_fail;
2329 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2330 	macp->m_driver = lldev;
2331 	macp->m_dip = lldev->dev_info;
2332 	macp->m_src_addr = hldev->macaddr[0];
2333 	macp->m_callbacks = &xgell_m_callbacks;
2334 	macp->m_min_sdu = 0;
2335 	macp->m_max_sdu = hldev->config.mtu;
2336 	/*
2337 	 * Finally, we're ready to register ourselves with the Nemo
2338 	 * interface; if this succeeds, we're all ready to start()
2339 	 */
2340 
2341 	if (mac_register(macp, &lldev->mh) != 0)
2342 		goto xgell_register_fail;
2343 
2344 	/* Calculate tx_copied_max here ??? */
2345 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2346 		hldev->config.fifo.alignment_size *
2347 		hldev->config.fifo.max_aligned_frags;
2348 
2349 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2350 	    XGELL_IFNAME, lldev->instance);
2351 
2352 	return (DDI_SUCCESS);
2353 
2354 xgell_ndd_fail:
2355 	nd_free(&lldev->ndp);
2356 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2357 	return (DDI_FAILURE);
2358 
2359 xgell_register_fail:
2360 	if (macp != NULL)
2361 		mac_free(macp);
2362 	nd_free(&lldev->ndp);
2363 	mutex_destroy(&lldev->genlock);
2364 	/* Ignore return value, since RX not start */
2365 	(void) xgell_rx_destroy_buffer_pool(lldev);
2366 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2367 	return (DDI_FAILURE);
2368 }
2369 
2370 /*
2371  * xgell_device_unregister
2372  * @devh: pointer on HAL device
2373  * @lldev: pointer to valid LL device.
2374  *
2375  * This function will unregister and free network device
2376  */
2377 int
2378 xgell_device_unregister(xgelldev_t *lldev)
2379 {
2380 	/*
2381 	 * Destroy RX buffer pool.
2382 	 */
2383 	if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) {
2384 		return (DDI_FAILURE);
2385 	}
2386 
2387 	if (mac_unregister(lldev->mh) != 0) {
2388 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2389 		    XGELL_IFNAME, lldev->instance);
2390 		return (DDI_FAILURE);
2391 	}
2392 
2393 	mutex_destroy(&lldev->genlock);
2394 
2395 	nd_free(&lldev->ndp);
2396 
2397 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2398 	    XGELL_IFNAME, lldev->instance);
2399 
2400 	return (DDI_SUCCESS);
2401 }
2402