xref: /illumos-gate/usr/src/uts/common/io/xge/drv/xgell.c (revision 7f7322febbcfe774b7270abc3b191c094bfcc517)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *  Copyright (c) 2002-2005 Neterion, Inc.
31  *  All right Reserved.
32  *
33  *  FileName :    xgell.c
34  *
35  *  Description:  Xge Link Layer data path implementation
36  *
37  */
38 
39 #include "xgell.h"
40 
41 #include <netinet/ip.h>
42 #include <netinet/tcp.h>
43 
44 #define	XGELL_MAX_FRAME_SIZE(macp)	((macp->m_info.mi_sdu_max) + \
45     sizeof (struct ether_vlan_header))
46 
47 u8 xge_broadcast_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
48 
49 #define	HEADROOM		2	/* for DIX-only packets */
50 
51 #ifdef XGELL_L3_ALIGNED
52 void header_free_func(void *arg) { }
53 frtn_t header_frtn = {header_free_func, NULL};
54 #endif
55 
56 /* DMA attributes used for Tx side */
57 static struct ddi_dma_attr tx_dma_attr = {
58 	DMA_ATTR_V0,			/* dma_attr_version */
59 	0x0ULL,				/* dma_attr_addr_lo */
60 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
61 	0xFFFFFFFFULL,			/* dma_attr_count_max */
62 	0x1ULL,				/* dma_attr_align */
63 	0xFFF,				/* dma_attr_burstsizes */
64 	1,				/* dma_attr_minxfer */
65 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
66 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
67 	4,				/* dma_attr_sgllen */
68 	1,				/* dma_attr_granular */
69 	0				/* dma_attr_flags */
70 };
71 
72 /* Aligned DMA attributes used for Tx side */
73 struct ddi_dma_attr tx_dma_attr_align = {
74 	DMA_ATTR_V0,			/* dma_attr_version */
75 	0x0ULL,				/* dma_attr_addr_lo */
76 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
77 	0xFFFFFFFFULL,			/* dma_attr_count_max */
78 	4096,				/* dma_attr_align */
79 	0xFFF,				/* dma_attr_burstsizes */
80 	1,				/* dma_attr_minxfer */
81 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
82 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
83 	4,				/* dma_attr_sgllen */
84 	1,				/* dma_attr_granular */
85 	0				/* dma_attr_flags */
86 };
87 
88 /*
89  * DMA attributes used when using ddi_dma_mem_alloc to
90  * allocat HAL descriptors and Rx buffers during replenish
91  */
92 static struct ddi_dma_attr hal_dma_attr = {
93 	DMA_ATTR_V0,			/* dma_attr_version */
94 	0x0ULL,				/* dma_attr_addr_lo */
95 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
96 	0xFFFFFFFFULL,			/* dma_attr_count_max */
97 	0x1ULL,				/* dma_attr_align */
98 	0xFFF,				/* dma_attr_burstsizes */
99 	1,				/* dma_attr_minxfer */
100 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
101 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
102 	1,				/* dma_attr_sgllen */
103 	1,				/* dma_attr_granular */
104 	0				/* dma_attr_flags */
105 };
106 
107 /*
108  * Aligned DMA attributes used when using ddi_dma_mem_alloc to
109  * allocat HAL descriptors and Rx buffers during replenish
110  */
111 struct ddi_dma_attr hal_dma_attr_aligned = {
112 	DMA_ATTR_V0,			/* dma_attr_version */
113 	0x0ULL,				/* dma_attr_addr_lo */
114 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
115 	0xFFFFFFFFULL,			/* dma_attr_count_max */
116 	4096,				/* dma_attr_align */
117 	0xFFF,				/* dma_attr_burstsizes */
118 	1,				/* dma_attr_minxfer */
119 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
120 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
121 	1,				/* dma_attr_sgllen */
122 	1,				/* dma_attr_granular */
123 	0				/* dma_attr_flags */
124 };
125 
126 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
127 struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned;
128 
129 /*
130  * xge_device_poll
131  *
132  * Cyclic should call me every 1s. xge_callback_event_queued should call me
133  * when HAL hope event was rescheduled.
134  */
135 /*ARGSUSED*/
136 void
137 xge_device_poll(void *data)
138 {
139 	xgelldev_t *lldev = xge_hal_device_private(data);
140 
141 	mutex_enter(&lldev->genlock);
142 	if (lldev->is_initialized) {
143 		xge_hal_device_poll(data);
144 		lldev->timeout_id = timeout(xge_device_poll, data,
145 		    XGE_DEV_POLL_TICKS);
146 	}
147 	mutex_exit(&lldev->genlock);
148 }
149 
150 /*
151  * xge_device_poll_now
152  *
153  * Will call xge_device_poll() immediately
154  */
155 void
156 xge_device_poll_now(void *data)
157 {
158 	xgelldev_t *lldev = xge_hal_device_private(data);
159 
160 	mutex_enter(&lldev->genlock);
161 	(void) untimeout(lldev->timeout_id);
162 	lldev->timeout_id = timeout(xge_device_poll, data, 0);
163 	mutex_exit(&lldev->genlock);
164 
165 }
166 
167 /*
168  * xgell_callback_link_up
169  *
170  * This function called by HAL to notify HW link up state change.
171  */
172 void
173 xgell_callback_link_up(void *userdata)
174 {
175 	xgelldev_t *lldev = (xgelldev_t *)userdata;
176 
177 	mac_link_update(lldev->macp, LINK_STATE_UP);
178 	/* Link states should be reported to user whenever it changes */
179 	cmn_err(CE_NOTE, "!%s%d: Link is up [10 Gbps Full Duplex]",
180 	    XGELL_IFNAME, lldev->instance);
181 }
182 
183 /*
184  * xgell_callback_link_down
185  *
186  * This function called by HAL to notify HW link down state change.
187  */
188 void
189 xgell_callback_link_down(void *userdata)
190 {
191 	xgelldev_t *lldev = (xgelldev_t *)userdata;
192 
193 	mac_link_update(lldev->macp, LINK_STATE_DOWN);
194 	/* Link states should be reported to user whenever it changes */
195 	cmn_err(CE_NOTE, "!%s%d: Link is down", XGELL_IFNAME,
196 	    lldev->instance);
197 }
198 
199 /*
200  * xgell_rx_buffer_replenish_all
201  *
202  * To replenish all freed dtr(s) with buffers in free pool. It's called by
203  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
204  * Must be called with pool_lock held.
205  */
206 static void
207 xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
208 {
209 	xge_hal_dtr_h dtr;
210 	xgell_rx_buffer_t *rx_buffer;
211 	xgell_rxd_priv_t *rxd_priv;
212 
213 	while ((lldev->bf_pool.free > 0) &&
214 	    (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
215 	    XGE_HAL_OK)) {
216 		rx_buffer = lldev->bf_pool.head;
217 		lldev->bf_pool.head = rx_buffer->next;
218 		lldev->bf_pool.free--;
219 
220 		xge_assert(rx_buffer);
221 		xge_assert(rx_buffer->dma_addr);
222 
223 		rxd_priv = (xgell_rxd_priv_t *)
224 		    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
225 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
226 		    lldev->bf_pool.size);
227 
228 		rxd_priv->rx_buffer = rx_buffer;
229 		xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr);
230 	}
231 }
232 
233 /*
234  * xgell_rx_buffer_release
235  *
236  * The only thing done here is to put the buffer back to the pool.
237  */
238 static void
239 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
240 {
241 	xgelldev_t *lldev = rx_buffer->lldev;
242 
243 	mutex_enter(&lldev->bf_pool.pool_lock);
244 
245 	/* Put the buffer back to pool */
246 	rx_buffer->next = lldev->bf_pool.head;
247 	lldev->bf_pool.head = rx_buffer;
248 
249 	lldev->bf_pool.free++;
250 
251 	mutex_exit(&lldev->bf_pool.pool_lock);
252 }
253 
254 /*
255  * xgell_rx_buffer_recycle
256  *
257  * Called by desballoc() to "free" the resource.
258  * We will try to replenish all descripters.
259  */
260 static void
261 xgell_rx_buffer_recycle(char *arg)
262 {
263 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
264 	xgelldev_t *lldev = rx_buffer->lldev;
265 
266 	xgell_rx_buffer_release(rx_buffer);
267 
268 	mutex_enter(&lldev->bf_pool.pool_lock);
269 	lldev->bf_pool.post--;
270 
271 	/*
272 	 * Before finding a good way to set this hiwat, just always call to
273 	 * replenish_all. *TODO*
274 	 */
275 	if (lldev->is_initialized != 0) {
276 		xgell_rx_buffer_replenish_all(lldev);
277 	}
278 
279 	mutex_exit(&lldev->bf_pool.pool_lock);
280 }
281 
282 /*
283  * xgell_rx_buffer_alloc
284  *
285  * Allocate one rx buffer and return with the pointer to the buffer.
286  * Return NULL if failed.
287  */
288 static xgell_rx_buffer_t *
289 xgell_rx_buffer_alloc(xgelldev_t *lldev)
290 {
291 	xge_hal_device_t *hldev;
292 	void *vaddr;
293 	ddi_dma_handle_t dma_handle;
294 	ddi_acc_handle_t dma_acch;
295 	dma_addr_t dma_addr;
296 	uint_t ncookies;
297 	ddi_dma_cookie_t dma_cookie;
298 	size_t real_size;
299 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
300 	xgell_rx_buffer_t *rx_buffer;
301 
302 	hldev = (xge_hal_device_t *)lldev->devh;
303 
304 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
305 	    0, &dma_handle) != DDI_SUCCESS) {
306 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
307 		    XGELL_IFNAME, lldev->instance);
308 		goto handle_failed;
309 	}
310 
311 	/* reserve some space at the end of the buffer for recycling */
312 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size +
313 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
314 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
315 	    DDI_SUCCESS) {
316 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
317 		    XGELL_IFNAME, lldev->instance);
318 		goto mem_failed;
319 	}
320 
321 	if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) >
322 	    real_size) {
323 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
324 		    XGELL_IFNAME, lldev->instance);
325 		goto bind_failed;
326 	}
327 
328 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
329 	    lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
330 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
331 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
332 		    XGELL_IFNAME, lldev->instance);
333 		goto bind_failed;
334 	}
335 
336 	if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) {
337 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
338 		    XGELL_IFNAME, lldev->instance);
339 		goto check_failed;
340 	}
341 
342 	dma_addr = dma_cookie.dmac_laddress;
343 
344 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
345 	    sizeof (xgell_rx_buffer_t));
346 	rx_buffer->next = NULL;
347 	rx_buffer->vaddr = vaddr;
348 	rx_buffer->dma_addr = dma_addr;
349 	rx_buffer->dma_handle = dma_handle;
350 	rx_buffer->dma_acch = dma_acch;
351 	rx_buffer->lldev = lldev;
352 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
353 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
354 
355 	return (rx_buffer);
356 
357 check_failed:
358 	(void) ddi_dma_unbind_handle(dma_handle);
359 bind_failed:
360 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
361 	ddi_dma_mem_free(&dma_acch);
362 mem_failed:
363 	ddi_dma_free_handle(&dma_handle);
364 handle_failed:
365 
366 	return (NULL);
367 }
368 
369 /*
370  * xgell_rx_destroy_buffer_pool
371  *
372  * Destroy buffer pool. If there is still any buffer hold by upper layer,
373  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
374  */
375 static int
376 xgell_rx_destroy_buffer_pool(xgelldev_t *lldev)
377 {
378 	xgell_rx_buffer_t *rx_buffer;
379 	ddi_dma_handle_t  dma_handle;
380 	ddi_acc_handle_t  dma_acch;
381 	int i;
382 
383 	/*
384 	 * If there is any posted buffer, the driver should reject to be
385 	 * detached. Need notice upper layer to release them.
386 	 */
387 	if (lldev->bf_pool.post != 0) {
388 		xge_debug_ll(XGE_ERR,
389 		    "%s%d has some buffers not be recycled, try later!",
390 		    XGELL_IFNAME, lldev->instance);
391 		return (DDI_FAILURE);
392 	}
393 
394 	/*
395 	 * Relase buffers one by one.
396 	 */
397 	for (i = lldev->bf_pool.total; i > 0; i--) {
398 		rx_buffer = lldev->bf_pool.head;
399 		xge_assert(rx_buffer != NULL);
400 
401 		lldev->bf_pool.head = rx_buffer->next;
402 
403 		dma_handle = rx_buffer->dma_handle;
404 		dma_acch = rx_buffer->dma_acch;
405 
406 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
407 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
408 			lldev->bf_pool.head = rx_buffer;
409 			return (DDI_FAILURE);
410 		}
411 		ddi_dma_mem_free(&dma_acch);
412 		ddi_dma_free_handle(&dma_handle);
413 
414 		lldev->bf_pool.total--;
415 		lldev->bf_pool.free--;
416 	}
417 
418 	mutex_destroy(&lldev->bf_pool.pool_lock);
419 	return (DDI_SUCCESS);
420 }
421 
422 /*
423  * xgell_rx_create_buffer_pool
424  *
425  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
426  */
427 static int
428 xgell_rx_create_buffer_pool(xgelldev_t *lldev)
429 {
430 	mac_t *macp;
431 	xge_hal_device_t *hldev;
432 	xgell_rx_buffer_t *rx_buffer;
433 	int i;
434 
435 	macp = lldev->macp;
436 	hldev = macp->m_driver;
437 
438 	lldev->bf_pool.total = 0;
439 	lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(lldev->macp);
440 	lldev->bf_pool.head = NULL;
441 	lldev->bf_pool.free = 0;
442 	lldev->bf_pool.post = 0;
443 	lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
444 	lldev->bf_pool.recycle_hiwat = lldev->config.rx_buffer_recycle_hiwat;
445 
446 	mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
447 	    hldev->irqh);
448 
449 	/*
450 	 * Allocate buffers one by one. If failed, destroy whole pool by
451 	 * call to xgell_rx_destroy_buffer_pool().
452 	 */
453 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
454 		if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) {
455 			(void) xgell_rx_destroy_buffer_pool(lldev);
456 			return (DDI_FAILURE);
457 		}
458 
459 		rx_buffer->next = lldev->bf_pool.head;
460 		lldev->bf_pool.head = rx_buffer;
461 
462 		lldev->bf_pool.total++;
463 		lldev->bf_pool.free++;
464 	}
465 
466 	return (DDI_SUCCESS);
467 }
468 
469 /*
470  * xgell_rx_dtr_replenish
471  *
472  * Replenish descriptor with rx_buffer in RX buffer pool.
473  * The dtr should be post right away.
474  */
475 xge_hal_status_e
476 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
477     void *userdata, xge_hal_channel_reopen_e reopen)
478 {
479 	xgell_ring_t *ring = userdata;
480 	mac_t *macp = ring->macp;
481 	xge_hal_device_t *hldev = (xge_hal_device_t *)macp->m_driver;
482 	xgelldev_t *lldev = xge_hal_device_private(hldev);
483 	xgell_rx_buffer_t *rx_buffer;
484 	xgell_rxd_priv_t *rxd_priv;
485 
486 	if (lldev->bf_pool.head == NULL) {
487 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
488 		return (XGE_HAL_FAIL);
489 	}
490 	rx_buffer = lldev->bf_pool.head;
491 	lldev->bf_pool.head = rx_buffer->next;
492 	lldev->bf_pool.free--;
493 
494 	xge_assert(rx_buffer);
495 	xge_assert(rx_buffer->dma_addr);
496 
497 	rxd_priv = (xgell_rxd_priv_t *)
498 	    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
499 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size);
500 
501 	rxd_priv->rx_buffer = rx_buffer;
502 
503 	return (XGE_HAL_OK);
504 }
505 
506 /*
507  * xgell_get_ip_offset
508  *
509  * Calculate the offset to IP header.
510  */
511 static inline int
512 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
513 {
514 	int ip_off;
515 
516 	/* get IP-header offset */
517 	switch (ext_info->frame) {
518 	case XGE_HAL_FRAME_TYPE_DIX:
519 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
520 		break;
521 	case XGE_HAL_FRAME_TYPE_IPX:
522 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
523 		    XGE_HAL_HEADER_802_2_SIZE +
524 		    XGE_HAL_HEADER_SNAP_SIZE);
525 		break;
526 	case XGE_HAL_FRAME_TYPE_LLC:
527 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
528 		    XGE_HAL_HEADER_802_2_SIZE);
529 		break;
530 	case XGE_HAL_FRAME_TYPE_SNAP:
531 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
532 		    XGE_HAL_HEADER_SNAP_SIZE);
533 		break;
534 	default:
535 		ip_off = 0;
536 		break;
537 	}
538 
539 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
540 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
541 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
542 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
543 	}
544 
545 	return (ip_off);
546 }
547 
548 /*
549  * xgell_rx_hcksum_assoc
550  *
551  * Judge the packet type and then call to hcksum_assoc() to associate
552  * h/w checksum information.
553  */
554 static inline void
555 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
556     xge_hal_dtr_info_t *ext_info)
557 {
558 	int cksum_flags = 0;
559 	int ip_off;
560 
561 	ip_off = xgell_get_ip_offset(ext_info);
562 
563 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
564 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
565 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
566 				cksum_flags |= HCK_IPV4_HDRCKSUM;
567 			}
568 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
569 				cksum_flags |= HCK_FULLCKSUM_OK;
570 			}
571 			if (cksum_flags) {
572 				cksum_flags |= HCK_FULLCKSUM;
573 				(void) hcksum_assoc(mp, NULL, NULL, 0,
574 				    0, 0, 0, cksum_flags, 0);
575 			}
576 		}
577 	} else if (ext_info->proto &
578 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
579 		/*
580 		 * Just pass the partial cksum up to IP.
581 		 */
582 		int start, end = pkt_length - ip_off;
583 
584 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
585 			struct ip *ip =
586 			    (struct ip *)(vaddr + ip_off);
587 			start = ip->ip_hl * 4 + ip_off;
588 		} else {
589 			start = ip_off + 40;
590 		}
591 		cksum_flags |= HCK_PARTIALCKSUM;
592 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
593 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
594 		    0);
595 	}
596 }
597 
598 /*
599  * xgell_rx_1b_msg_alloc
600  *
601  * Allocate message header for data buffer, and decide if copy the packet to
602  * new data buffer to release big rx_buffer to save memory.
603  *
604  * If the pkt_length <= XGELL_DMA_BUFFER_SIZE_LOWAT, call allocb() to allocate
605  * new message and copy the payload in.
606  */
607 static mblk_t *
608 xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
609     xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
610 {
611 	mblk_t *mp;
612 	mblk_t *nmp = NULL;
613 	char *vaddr;
614 	int hdr_length = 0;
615 
616 #ifdef XGELL_L3_ALIGNED
617 	int doalign = 1;
618 	struct ip *ip;
619 	struct tcphdr *tcp;
620 	int tcp_off;
621 	int mp_align_len;
622 	int ip_off;
623 
624 #endif
625 
626 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
627 #ifdef XGELL_L3_ALIGNED
628 	ip_off = xgell_get_ip_offset(ext_info);
629 
630 	/* Check ip_off with HEADROOM */
631 	if ((ip_off & 3) == HEADROOM) {
632 		doalign = 0;
633 	}
634 
635 	/*
636 	 * Doalign? Check for types of packets.
637 	 */
638 	/* Is IPv4 or IPv6? */
639 	if (doalign && !(ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
640 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6)) {
641 		doalign = 0;
642 	}
643 
644 	/* Is TCP? */
645 	if (doalign &&
646 	    ((ip = (struct ip *)(vaddr + ip_off))->ip_p == IPPROTO_TCP)) {
647 		tcp_off = ip->ip_hl * 4 + ip_off;
648 		tcp = (struct tcphdr *)(vaddr + tcp_off);
649 		hdr_length = tcp_off + tcp->th_off * 4;
650 		if (pkt_length < (XGE_HAL_TCPIP_HEADER_MAX_SIZE +
651 		    XGE_HAL_MAC_HEADER_MAX_SIZE)) {
652 			hdr_length = pkt_length;
653 		}
654 	} else {
655 		doalign = 0;
656 	}
657 #endif
658 
659 	/*
660 	 * Copy packet into new allocated message buffer, if pkt_length
661 	 * is less than XGELL_DMA_BUFFER_LOWAT
662 	 */
663 	if (*copyit || pkt_length <= XGELL_DMA_BUFFER_SIZE_LOWAT) {
664 		/* Keep room for alignment */
665 		if ((mp = allocb(pkt_length + HEADROOM + 4, 0)) == NULL) {
666 			return (NULL);
667 		}
668 #ifdef XGELL_L3_ALIGNED
669 		if (doalign) {
670 			mp_align_len =
671 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
672 			mp->b_rptr += mp_align_len;
673 		}
674 #endif
675 		bcopy(vaddr, mp->b_rptr, pkt_length);
676 		mp->b_wptr = mp->b_rptr + pkt_length;
677 		*copyit = B_TRUE;
678 		return (mp);
679 	}
680 
681 	/*
682 	 * Just allocate mblk for current data buffer
683 	 */
684 	if ((nmp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
685 	    &rx_buffer->frtn)) == NULL) {
686 		/* Drop it */
687 		return (NULL);
688 	}
689 
690 	/*
691 	 * Adjust the b_rptr/b_wptr in the mblk_t structure to point to
692 	 * payload.
693 	 */
694 	nmp->b_rptr += hdr_length;
695 	nmp->b_wptr += pkt_length;
696 
697 #ifdef XGELL_L3_ALIGNED
698 	if (doalign) {
699 		if ((mp = esballoc(rx_buffer->header, hdr_length + 4, 0,
700 		    &header_frtn)) == NULL) {
701 			/* can not align! */
702 			mp = nmp;
703 			mp->b_rptr = (u8 *)vaddr;
704 			mp->b_wptr = mp->b_rptr + pkt_length;
705 			mp->b_next = NULL;
706 			mp->b_cont = NULL;
707 		} else {
708 			/* align packet's ip-header offset */
709 			mp_align_len =
710 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
711 			mp->b_rptr += mp_align_len;
712 			mp->b_wptr += mp_align_len + hdr_length;
713 			mp->b_cont = nmp;
714 			mp->b_next = NULL;
715 			nmp->b_cont = NULL;
716 			nmp->b_next = NULL;
717 
718 			bcopy(vaddr, mp->b_rptr, hdr_length);
719 		}
720 	} else {
721 		/* no need to align */
722 		mp = nmp;
723 		mp->b_next = NULL;
724 		mp->b_cont = NULL;
725 	}
726 #else
727 	mp = nmp;
728 	mp->b_next = NULL;
729 	mp->b_cont = NULL;
730 #endif
731 
732 	return (mp);
733 }
734 
735 /*
736  * xgell_rx_1b_compl
737  *
738  * If the interrupt is because of a received frame or if the receive ring
739  * contains fresh as yet un-processed frames, this function is called.
740  */
741 static xge_hal_status_e
742 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
743     void *userdata)
744 {
745 	mac_t *macp = ((xgell_ring_t *)userdata)->macp;
746 	xgell_rx_buffer_t *rx_buffer;
747 	xge_hal_device_t *hldev = (xge_hal_device_t *)macp->m_driver;
748 	xgelldev_t *lldev = xge_hal_device_private(hldev);
749 	mblk_t *mp_head = NULL;
750 	mblk_t *mp_end  = NULL;
751 
752 	do {
753 		int ret;
754 		int pkt_length;
755 		dma_addr_t dma_data;
756 		mblk_t *mp;
757 
758 		boolean_t copyit = B_FALSE;
759 
760 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
761 		    xge_hal_ring_dtr_private(channelh, dtr));
762 		xge_hal_dtr_info_t ext_info;
763 
764 		rx_buffer = rxd_priv->rx_buffer;
765 
766 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
767 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
768 
769 		xge_assert(dma_data == rx_buffer->dma_addr);
770 
771 		if (t_code != 0) {
772 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
773 			    " completed due to error t_code %01x", XGELL_IFNAME,
774 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
775 
776 			(void) xge_hal_device_handle_tcode(channelh, dtr,
777 			    t_code);
778 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
779 			xgell_rx_buffer_release(rx_buffer);
780 			continue;
781 		}
782 
783 		/*
784 		 * Sync the DMA memory
785 		 */
786 		ret = ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
787 		    DDI_DMA_SYNC_FORKERNEL);
788 		if (ret != DDI_SUCCESS) {
789 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
790 			    XGELL_IFNAME, lldev->instance);
791 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
792 			xgell_rx_buffer_release(rx_buffer);
793 			continue;
794 		}
795 
796 		/*
797 		 * Allocate message for the packet.
798 		 */
799 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
800 			copyit = B_TRUE;
801 		} else {
802 			copyit = B_FALSE;
803 		}
804 
805 		mp = xgell_rx_1b_msg_alloc(rx_buffer, pkt_length, &ext_info,
806 		    &copyit);
807 
808 		xge_hal_ring_dtr_free(channelh, dtr);
809 
810 		/*
811 		 * Release the buffer and recycle it later
812 		 */
813 		if ((mp == NULL) || copyit) {
814 			xgell_rx_buffer_release(rx_buffer);
815 		} else {
816 			/*
817 			 * Count it since the buffer should be loaned up.
818 			 */
819 			mutex_enter(&lldev->bf_pool.pool_lock);
820 			lldev->bf_pool.post++;
821 			mutex_exit(&lldev->bf_pool.pool_lock);
822 		}
823 		if (mp == NULL) {
824 			xge_debug_ll(XGE_ERR,
825 			    "%s%d: rx: can not allocate mp mblk", XGELL_IFNAME,
826 			    lldev->instance);
827 			continue;
828 		}
829 
830 		/*
831 		 * Associate cksum_flags per packet type and h/w cksum flags.
832 		 */
833 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
834 		    HEADROOM, pkt_length, &ext_info);
835 
836 		if (mp_head == NULL) {
837 			mp_head = mp;
838 			mp_end = mp;
839 		} else {
840 			mp_end->b_next = mp;
841 			mp_end = mp;
842 		}
843 
844 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
845 	    XGE_HAL_OK);
846 
847 	if (mp_head) {
848 		mac_rx(macp, ((xgell_ring_t *)userdata)->handle, mp_head);
849 	}
850 
851 	/*
852 	 * Always call replenish_all to recycle rx_buffers.
853 	 */
854 	mutex_enter(&lldev->bf_pool.pool_lock);
855 	xgell_rx_buffer_replenish_all(lldev);
856 	mutex_exit(&lldev->bf_pool.pool_lock);
857 
858 	return (XGE_HAL_OK);
859 }
860 
861 /*
862  * xgell_xmit_compl
863  *
864  * If an interrupt was raised to indicate DMA complete of the Tx packet,
865  * this function is called. It identifies the last TxD whose buffer was
866  * freed and frees all skbs whose data have already DMA'ed into the NICs
867  * internal memory.
868  */
869 static xge_hal_status_e
870 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
871     void *userdata)
872 {
873 	xgelldev_t *lldev = userdata;
874 
875 	do {
876 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
877 		    xge_hal_fifo_dtr_private(dtr));
878 		mblk_t *mp = txd_priv->mblk;
879 #if !defined(XGELL_TX_NOMAP_COPY)
880 		int i;
881 #endif
882 
883 		if (t_code) {
884 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
885 			    " completed due to error t_code %01x", XGELL_IFNAME,
886 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
887 
888 			(void) xge_hal_device_handle_tcode(channelh, dtr,
889 			    t_code);
890 		}
891 
892 #if !defined(XGELL_TX_NOMAP_COPY)
893 		for (i = 0; i < txd_priv->handle_cnt; i++) {
894 			xge_assert(txd_priv->dma_handles[i]);
895 			(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
896 			ddi_dma_free_handle(&txd_priv->dma_handles[i]);
897 			txd_priv->dma_handles[i] = 0;
898 		}
899 #endif
900 
901 		xge_hal_fifo_dtr_free(channelh, dtr);
902 
903 		freemsg(mp);
904 		lldev->resched_avail++;
905 
906 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
907 	    XGE_HAL_OK);
908 
909 	if (lldev->resched_retry &&
910 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
911 	    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
912 	    xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
913 		XGELL_IFNAME, lldev->instance,
914 		((xge_hal_channel_t *)lldev->fifo_channel)->post_qid);
915 		lldev->resched_send = lldev->resched_avail;
916 		lldev->resched_retry = 0;
917 	}
918 
919 	return (XGE_HAL_OK);
920 }
921 
922 /*
923  * xgell_send
924  * @hldev: pointer to s2hal_device_t strucutre
925  * @mblk: pointer to network buffer, i.e. mblk_t structure
926  *
927  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
928  * A pointer to an M_DATA message that contains the packet is passed to
929  * this routine.
930  */
931 static boolean_t
932 xgell_send(xge_hal_device_t *hldev, mblk_t *mp)
933 {
934 	mblk_t *bp;
935 	int retry, repeat;
936 	xge_hal_status_e status;
937 	xge_hal_dtr_h dtr;
938 	xgelldev_t *lldev = xge_hal_device_private(hldev);
939 	xgell_txd_priv_t *txd_priv;
940 	uint32_t pflags;
941 #ifndef XGELL_TX_NOMAP_COPY
942 	int handle_cnt, frag_cnt, ret, i;
943 #endif
944 
945 _begin:
946 	retry = repeat = 0;
947 #ifndef XGELL_TX_NOMAP_COPY
948 	handle_cnt = frag_cnt = 0;
949 #endif
950 
951 	if (!lldev->is_initialized || lldev->in_reset)
952 		return (B_FALSE);
953 
954 	/*
955 	 * If the free Tx dtrs count reaches the lower threshold,
956 	 * inform the gld to stop sending more packets till the free
957 	 * dtrs count exceeds higher threshold. Driver informs the
958 	 * gld through gld_sched call, when the free dtrs count exceeds
959 	 * the higher threshold.
960 	 */
961 	if (__hal_channel_dtr_count(lldev->fifo_channel)
962 	    <= XGELL_TX_LEVEL_LOW) {
963 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
964 		    "free descriptors count at low threshold %d",
965 		    XGELL_IFNAME, lldev->instance,
966 		    ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
967 		    XGELL_TX_LEVEL_LOW);
968 		retry = 1;
969 		goto _exit;
970 	}
971 
972 	status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr);
973 	if (status != XGE_HAL_OK) {
974 		switch (status) {
975 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
976 			xge_debug_ll(XGE_ERR,
977 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
978 			    lldev->instance,
979 			    ((xge_hal_channel_t *)
980 			    lldev->fifo_channel)->post_qid);
981 			retry = 1;
982 			goto _exit;
983 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
984 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
985 			    " out of descriptors.", XGELL_IFNAME,
986 			    lldev->instance,
987 			    ((xge_hal_channel_t *)
988 			    lldev->fifo_channel)->post_qid);
989 			retry = 1;
990 			goto _exit;
991 		default:
992 			return (B_FALSE);
993 		}
994 	}
995 
996 	txd_priv = xge_hal_fifo_dtr_private(dtr);
997 	txd_priv->mblk = mp;
998 
999 	/*
1000 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1001 	 * do insertion.
1002 	 *
1003 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1004 	 * is the other implementation for xge. The driver can simple bcopy()
1005 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1006 	 * automatically, since it's impossible that GLD sends down mp(s) with
1007 	 * splited ether_vlan_header.
1008 	 *
1009 	 * struct ether_vlan_header *evhp;
1010 	 * uint16_t tci;
1011 	 *
1012 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1013 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1014 	 * 	tci = ntohs(evhp->ether_tci);
1015 	 * 	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1016 	 *	    2 * ETHERADDRL);
1017 	 * 	mp->b_rptr += VLAN_TAGSZ;
1018 	 *
1019 	 * 	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1020 	 * }
1021 	 */
1022 
1023 #ifdef XGELL_TX_NOMAP_COPY
1024 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1025 		int mblen;
1026 		xge_hal_status_e rc;
1027 
1028 		/* skip zero-length message blocks */
1029 		mblen = MBLKL(bp);
1030 		if (mblen == 0) {
1031 			continue;
1032 		}
1033 		rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel, dtr,
1034 			bp->b_rptr, mblen);
1035 		xge_assert(rc == XGE_HAL_OK);
1036 	}
1037 	xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr, 0);
1038 #else
1039 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1040 		int mblen;
1041 		uint_t ncookies;
1042 		ddi_dma_cookie_t dma_cookie;
1043 		ddi_dma_handle_t dma_handle;
1044 
1045 		/* skip zero-length message blocks */
1046 		mblen = MBLKL(bp);
1047 		if (mblen == 0) {
1048 			continue;
1049 		}
1050 
1051 		ret = ddi_dma_alloc_handle(lldev->macp->m_dip, &tx_dma_attr,
1052 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1053 		if (ret != DDI_SUCCESS) {
1054 			xge_debug_ll(XGE_ERR,
1055 			    "%s%d: can not allocate dma handle",
1056 			    XGELL_IFNAME, lldev->instance);
1057 			goto _exit_cleanup;
1058 		}
1059 
1060 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1061 		    (caddr_t)bp->b_rptr, mblen,
1062 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1063 		    &dma_cookie, &ncookies);
1064 
1065 		switch (ret) {
1066 		case DDI_DMA_MAPPED:
1067 			/* everything's fine */
1068 			break;
1069 
1070 		case DDI_DMA_NORESOURCES:
1071 			xge_debug_ll(XGE_ERR,
1072 			    "%s%d: can not bind dma address",
1073 			    XGELL_IFNAME, lldev->instance);
1074 			ddi_dma_free_handle(&dma_handle);
1075 			goto _exit_cleanup;
1076 
1077 		case DDI_DMA_NOMAPPING:
1078 		case DDI_DMA_INUSE:
1079 		case DDI_DMA_TOOBIG:
1080 		default:
1081 			/* drop packet, don't retry */
1082 			xge_debug_ll(XGE_ERR,
1083 			    "%s%d: can not map message buffer",
1084 			    XGELL_IFNAME, lldev->instance);
1085 			ddi_dma_free_handle(&dma_handle);
1086 			goto _exit_cleanup;
1087 		}
1088 
1089 		if (ncookies + frag_cnt > XGE_HAL_DEFAULT_FIFO_FRAGS) {
1090 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1091 			    "requested c:%d+f:%d", XGELL_IFNAME,
1092 			    lldev->instance, ncookies, frag_cnt);
1093 			(void) ddi_dma_unbind_handle(dma_handle);
1094 			ddi_dma_free_handle(&dma_handle);
1095 			goto _exit_cleanup;
1096 		}
1097 
1098 		/* setup the descriptors for this data buffer */
1099 		while (ncookies) {
1100 			xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr,
1101 			    frag_cnt++, dma_cookie.dmac_laddress,
1102 			    dma_cookie.dmac_size);
1103 			if (--ncookies) {
1104 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1105 			}
1106 
1107 		}
1108 
1109 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1110 
1111 		if (bp->b_cont &&
1112 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1113 		    XGE_HAL_DEFAULT_FIFO_FRAGS)) {
1114 			mblk_t *nmp;
1115 
1116 			xge_debug_ll(XGE_TRACE,
1117 			    "too many FRAGs [%d], pull up them", frag_cnt);
1118 
1119 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1120 				/* Drop packet, don't retry */
1121 				xge_debug_ll(XGE_ERR,
1122 				    "%s%d: can not pullup message buffer",
1123 				    XGELL_IFNAME, lldev->instance);
1124 				goto _exit_cleanup;
1125 			}
1126 			freemsg(bp->b_cont);
1127 			bp->b_cont = nmp;
1128 		}
1129 	}
1130 
1131 	txd_priv->handle_cnt = handle_cnt;
1132 #endif /* XGELL_TX_NOMAP_COPY */
1133 
1134 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags);
1135 	if (pflags & HCK_IPV4_HDRCKSUM) {
1136 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1137 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1138 	}
1139 	if (pflags & HCK_FULLCKSUM) {
1140 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1141 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1142 	}
1143 
1144 	xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr);
1145 
1146 	return (B_TRUE);
1147 
1148 _exit_cleanup:
1149 
1150 #if !defined(XGELL_TX_NOMAP_COPY)
1151 	for (i = 0; i < handle_cnt; i++) {
1152 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1153 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1154 		txd_priv->dma_handles[i] = 0;
1155 	}
1156 #endif
1157 
1158 	xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
1159 
1160 	if (repeat) {
1161 		goto _begin;
1162 	}
1163 
1164 _exit:
1165 	if (retry) {
1166 		if (lldev->resched_avail != lldev->resched_send &&
1167 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1168 		    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
1169 			lldev->resched_send = lldev->resched_avail;
1170 			return (B_FALSE);
1171 		} else {
1172 			lldev->resched_retry = 1;
1173 		}
1174 	}
1175 
1176 	freemsg(mp);
1177 	return (B_TRUE);
1178 }
1179 
1180 /*
1181  * xge_m_tx
1182  * @arg: pointer to the s2hal_device_t structure
1183  * @resid: resource id
1184  * @mp: pointer to the message buffer
1185  *
1186  * Called by MAC Layer to send a chain of packets
1187  */
1188 static mblk_t *
1189 xgell_m_tx(void *arg, mblk_t *mp)
1190 {
1191 	xge_hal_device_t *hldev = arg;
1192 	mblk_t *next;
1193 
1194 	while (mp != NULL) {
1195 		next = mp->b_next;
1196 		mp->b_next = NULL;
1197 
1198 		if (!xgell_send(hldev, mp)) {
1199 			mp->b_next = next;
1200 			break;
1201 		}
1202 		mp = next;
1203 	}
1204 
1205 	return (mp);
1206 }
1207 
1208 /*
1209  * xgell_rx_dtr_term
1210  *
1211  * Function will be called by HAL to terminate all DTRs for
1212  * Ring(s) type of channels.
1213  */
1214 static void
1215 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1216     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1217 {
1218 	xgell_rxd_priv_t *rxd_priv =
1219 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1220 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1221 
1222 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1223 		xge_hal_ring_dtr_free(channelh, dtrh);
1224 		xgell_rx_buffer_release(rx_buffer);
1225 	}
1226 }
1227 
1228 /*
1229  * xgell_tx_term
1230  *
1231  * Function will be called by HAL to terminate all DTRs for
1232  * Fifo(s) type of channels.
1233  */
1234 static void
1235 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1236     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1237 {
1238 	xgell_txd_priv_t *txd_priv =
1239 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1240 	mblk_t *mp = txd_priv->mblk;
1241 #if !defined(XGELL_TX_NOMAP_COPY)
1242 	int i;
1243 #endif
1244 	/*
1245 	 * for Tx we must clean up the DTR *only* if it has been
1246 	 * posted!
1247 	 */
1248 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1249 		return;
1250 	}
1251 
1252 #if !defined(XGELL_TX_NOMAP_COPY)
1253 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1254 		xge_assert(txd_priv->dma_handles[i]);
1255 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1256 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1257 		txd_priv->dma_handles[i] = 0;
1258 	}
1259 #endif
1260 
1261 	xge_hal_fifo_dtr_free(channelh, dtrh);
1262 
1263 	freemsg(mp);
1264 }
1265 
1266 /*
1267  * xgell_tx_open
1268  * @lldev: the link layer object
1269  *
1270  * Initialize and open all Tx channels;
1271  */
1272 static boolean_t
1273 xgell_tx_open(xgelldev_t *lldev)
1274 {
1275 	xge_hal_status_e status;
1276 	u64 adapter_status;
1277 	xge_hal_channel_attr_t attr;
1278 
1279 	attr.post_qid		= 0;
1280 	attr.compl_qid		= 0;
1281 	attr.callback		= xgell_xmit_compl;
1282 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1283 	attr.flags		= 0;
1284 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1285 	attr.userdata		= lldev;
1286 	attr.dtr_init		= NULL;
1287 	attr.dtr_term		= xgell_tx_term;
1288 
1289 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1290 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1291 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1292 		    lldev->instance, (uint64_t)adapter_status);
1293 		return (B_FALSE);
1294 	}
1295 
1296 	status = xge_hal_channel_open(lldev->devh, &attr,
1297 	    &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL);
1298 	if (status != XGE_HAL_OK) {
1299 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1300 		    "got status code %d", XGELL_IFNAME,
1301 		    lldev->instance, status);
1302 		return (B_FALSE);
1303 	}
1304 
1305 	return (B_TRUE);
1306 }
1307 
1308 /*
1309  * xgell_rx_open
1310  * @lldev: the link layer object
1311  *
1312  * Initialize and open all Rx channels;
1313  */
1314 static boolean_t
1315 xgell_rx_open(xgelldev_t *lldev)
1316 {
1317 	xge_hal_status_e status;
1318 	u64 adapter_status;
1319 	xge_hal_channel_attr_t attr;
1320 
1321 	attr.post_qid		= XGELL_RING_MAIN_QID;
1322 	attr.compl_qid		= 0;
1323 	attr.callback		= xgell_rx_1b_compl;
1324 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1325 	attr.flags		= 0;
1326 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1327 	attr.dtr_init		= xgell_rx_dtr_replenish;
1328 	attr.dtr_term		= xgell_rx_dtr_term;
1329 
1330 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1331 		xge_debug_ll(XGE_ERR,
1332 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1333 		    XGELL_IFNAME, lldev->instance,
1334 		    (uint64_t)adapter_status);
1335 		return (B_FALSE);
1336 	}
1337 
1338 	lldev->ring_main.macp = lldev->macp;
1339 	attr.userdata = &lldev->ring_main;
1340 
1341 	status = xge_hal_channel_open(lldev->devh, &attr,
1342 	    &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1343 	if (status != XGE_HAL_OK) {
1344 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1345 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1346 		return (B_FALSE);
1347 	}
1348 
1349 	return (B_TRUE);
1350 }
1351 
1352 static int
1353 xgell_initiate_start(xgelldev_t *lldev)
1354 {
1355 	xge_hal_status_e status;
1356 #ifdef XGELL_TX_NOMAP_COPY
1357 	xge_hal_device_t *hldev = lldev->devh;
1358 #endif
1359 	int maxpkt = lldev->macp->m_info.mi_sdu_max;
1360 
1361 	/* check initial mtu before enabling the device */
1362 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1363 	if (status != XGE_HAL_OK) {
1364 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1365 		    XGELL_IFNAME, lldev->instance, maxpkt);
1366 		return (EINVAL);
1367 	}
1368 
1369 	/* set initial mtu before enabling the device */
1370 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1371 	if (status != XGE_HAL_OK) {
1372 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1373 		    XGELL_IFNAME, lldev->instance, maxpkt);
1374 		return (EIO);
1375 	}
1376 
1377 	/* now, enable the device */
1378 	status = xge_hal_device_enable(lldev->devh);
1379 	if (status != XGE_HAL_OK) {
1380 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1381 		    XGELL_IFNAME, lldev->instance);
1382 		return (EIO);
1383 	}
1384 
1385 	if (!xgell_rx_open(lldev)) {
1386 		status = xge_hal_device_disable(lldev->devh);
1387 		if (status != XGE_HAL_OK) {
1388 			u64 adapter_status;
1389 			(void) xge_hal_device_status(lldev->devh,
1390 			    &adapter_status);
1391 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1392 			    "the device. adaper status 0x%"PRIx64
1393 			    " returned status %d",
1394 			    XGELL_IFNAME, lldev->instance,
1395 			    (uint64_t)adapter_status, status);
1396 		}
1397 		xge_os_mdelay(1500);
1398 		return (ENOMEM);
1399 	}
1400 
1401 #ifdef XGELL_TX_NOMAP_COPY
1402 	hldev->config.fifo.alignment_size = XGELL_MAX_FRAME_SIZE(lldev->macp);
1403 #endif
1404 
1405 	if (!xgell_tx_open(lldev)) {
1406 		status = xge_hal_device_disable(lldev->devh);
1407 		if (status != XGE_HAL_OK) {
1408 			u64 adapter_status;
1409 			(void) xge_hal_device_status(lldev->devh,
1410 			    &adapter_status);
1411 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1412 			    "the device. adaper status 0x%"PRIx64
1413 			    " returned status %d",
1414 			    XGELL_IFNAME, lldev->instance,
1415 			    (uint64_t)adapter_status, status);
1416 		}
1417 		xge_os_mdelay(1500);
1418 		xge_hal_channel_close(lldev->ring_main.channelh,
1419 		    XGE_HAL_CHANNEL_OC_NORMAL);
1420 		return (ENOMEM);
1421 	}
1422 
1423 	/* time to enable interrupts */
1424 	xge_hal_device_intr_enable(lldev->devh);
1425 
1426 	lldev->is_initialized = 1;
1427 
1428 	return (0);
1429 }
1430 
1431 static void
1432 xgell_initiate_stop(xgelldev_t *lldev)
1433 {
1434 	xge_hal_status_e status;
1435 
1436 	lldev->is_initialized = 0;
1437 
1438 	status = xge_hal_device_disable(lldev->devh);
1439 	if (status != XGE_HAL_OK) {
1440 		u64 adapter_status;
1441 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1442 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1443 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1444 		    XGELL_IFNAME, lldev->instance,
1445 		    (uint64_t)adapter_status, status);
1446 	}
1447 	xge_hal_device_intr_disable(lldev->devh);
1448 
1449 	xge_debug_ll(XGE_TRACE, "%s",
1450 	    "waiting for device irq to become quiescent...");
1451 	xge_os_mdelay(1500);
1452 
1453 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1454 
1455 	xge_hal_channel_close(lldev->ring_main.channelh,
1456 	    XGE_HAL_CHANNEL_OC_NORMAL);
1457 
1458 	xge_hal_channel_close(lldev->fifo_channel,
1459 	    XGE_HAL_CHANNEL_OC_NORMAL);
1460 }
1461 
1462 /*
1463  * xgell_m_start
1464  * @arg: pointer to device private strucutre(hldev)
1465  *
1466  * This function is called by MAC Layer to enable the XFRAME
1467  * firmware to generate interrupts and also prepare the
1468  * driver to call mac_rx for delivering receive packets
1469  * to MAC Layer.
1470  */
1471 static int
1472 xgell_m_start(void *arg)
1473 {
1474 	xge_hal_device_t *hldev = arg;
1475 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1476 	int ret;
1477 
1478 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1479 	    lldev->instance);
1480 
1481 	mutex_enter(&lldev->genlock);
1482 
1483 	if (lldev->is_initialized) {
1484 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1485 		    XGELL_IFNAME, lldev->instance);
1486 		mutex_exit(&lldev->genlock);
1487 		return (EINVAL);
1488 	}
1489 
1490 	hldev->terminating = 0;
1491 	if (ret = xgell_initiate_start(lldev)) {
1492 		mutex_exit(&lldev->genlock);
1493 		return (ret);
1494 	}
1495 
1496 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1497 
1498 	if (!lldev->timeout_id) {
1499 		xgell_initiate_stop(lldev);
1500 		mutex_exit(&lldev->genlock);
1501 		return (EINVAL);
1502 	}
1503 
1504 	mutex_exit(&lldev->genlock);
1505 
1506 	return (0);
1507 }
1508 
1509 /*
1510  * xgell_m_stop
1511  * @arg: pointer to device private data (hldev)
1512  *
1513  * This function is called by the MAC Layer to disable
1514  * the XFRAME firmware for generating any interrupts and
1515  * also stop the driver from calling mac_rx() for
1516  * delivering data packets to the MAC Layer.
1517  */
1518 static void
1519 xgell_m_stop(void *arg)
1520 {
1521 	xge_hal_device_t *hldev;
1522 	xgelldev_t *lldev;
1523 
1524 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1525 
1526 	hldev = arg;
1527 	xge_assert(hldev);
1528 
1529 	lldev = (xgelldev_t *)xge_hal_device_private(hldev);
1530 	xge_assert(lldev);
1531 
1532 	mutex_enter(&lldev->genlock);
1533 	if (!lldev->is_initialized) {
1534 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1535 		mutex_exit(&lldev->genlock);
1536 		return;
1537 	}
1538 
1539 	xge_hal_device_terminating(hldev);
1540 	xgell_initiate_stop(lldev);
1541 
1542 	/* reset device */
1543 	(void) xge_hal_device_reset(lldev->devh);
1544 
1545 	mutex_exit(&lldev->genlock);
1546 
1547 	(void) untimeout(lldev->timeout_id);
1548 
1549 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1550 }
1551 
1552 /*
1553  * xgell_onerr_reset
1554  * @lldev: pointer to xgelldev_t structure
1555  *
1556  * This function is called by HAL Event framework to reset the HW
1557  * This function is must be called with genlock taken.
1558  */
1559 int
1560 xgell_onerr_reset(xgelldev_t *lldev)
1561 {
1562 	int rc = 0;
1563 
1564 	if (!lldev->is_initialized) {
1565 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1566 		    XGELL_IFNAME, lldev->instance);
1567 		return (rc);
1568 	}
1569 
1570 	lldev->in_reset = 1;
1571 	xgell_initiate_stop(lldev);
1572 
1573 	/* reset device */
1574 	(void) xge_hal_device_reset(lldev->devh);
1575 
1576 	rc = xgell_initiate_start(lldev);
1577 	lldev->in_reset = 0;
1578 
1579 	return (rc);
1580 }
1581 
1582 
1583 /*
1584  * xgell_m_unicst
1585  * @arg: pointer to device private strucutre(hldev)
1586  * @mac_addr:
1587  *
1588  * This function is called by MAC Layer to set the physical address
1589  * of the XFRAME firmware.
1590  */
1591 static int
1592 xgell_m_unicst(void *arg, const uint8_t *macaddr)
1593 {
1594 	xge_hal_status_e status;
1595 	xge_hal_device_t *hldev = arg;
1596 	xgelldev_t *lldev = (xgelldev_t *)xge_hal_device_private(hldev);
1597 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1598 
1599 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1600 
1601 	mutex_enter(&lldev->genlock);
1602 
1603 	xge_debug_ll(XGE_TRACE,
1604 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1605 	    macaddr[0], macaddr[1], macaddr[2],
1606 	    macaddr[3], macaddr[4], macaddr[5]);
1607 
1608 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1609 	if (status != XGE_HAL_OK) {
1610 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1611 		    XGELL_IFNAME, lldev->instance);
1612 		mutex_exit(&lldev->genlock);
1613 		return (EIO);
1614 	}
1615 
1616 	mutex_exit(&lldev->genlock);
1617 
1618 	return (0);
1619 }
1620 
1621 
1622 /*
1623  * xgell_m_multicst
1624  * @arg: pointer to device private strucutre(hldev)
1625  * @add:
1626  * @mc_addr:
1627  *
1628  * This function is called by MAC Layer to enable or
1629  * disable device-level reception of specific multicast addresses.
1630  */
1631 static int
1632 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1633 {
1634 	xge_hal_status_e status;
1635 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1636 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1637 
1638 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1639 
1640 	mutex_enter(&lldev->genlock);
1641 
1642 	if (!lldev->is_initialized) {
1643 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1644 		    XGELL_IFNAME, lldev->instance);
1645 		mutex_exit(&lldev->genlock);
1646 		return (EIO);
1647 	}
1648 
1649 	/* FIXME: missing HAL functionality: enable_one() */
1650 
1651 	status = (add) ?
1652 	    xge_hal_device_mcast_enable(hldev) :
1653 	    xge_hal_device_mcast_disable(hldev);
1654 
1655 	if (status != XGE_HAL_OK) {
1656 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1657 		    add ? "enable" : "disable", status);
1658 		mutex_exit(&lldev->genlock);
1659 		return (EIO);
1660 	}
1661 
1662 	mutex_exit(&lldev->genlock);
1663 
1664 	return (0);
1665 }
1666 
1667 
1668 /*
1669  * xgell_m_promisc
1670  * @arg: pointer to device private strucutre(hldev)
1671  * @on:
1672  *
1673  * This function is called by MAC Layer to enable or
1674  * disable the reception of all the packets on the medium
1675  */
1676 static int
1677 xgell_m_promisc(void *arg, boolean_t on)
1678 {
1679 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1680 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1681 
1682 	mutex_enter(&lldev->genlock);
1683 
1684 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1685 
1686 	if (!lldev->is_initialized) {
1687 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1688 		    XGELL_IFNAME, lldev->instance);
1689 		mutex_exit(&lldev->genlock);
1690 		return (EIO);
1691 	}
1692 
1693 	if (on) {
1694 		xge_hal_device_promisc_enable(hldev);
1695 	} else {
1696 		xge_hal_device_promisc_disable(hldev);
1697 	}
1698 
1699 	mutex_exit(&lldev->genlock);
1700 
1701 	return (0);
1702 }
1703 
1704 /*
1705  * xgell_m_stats
1706  * @arg: pointer to device private strucutre(hldev)
1707  * @msp: pointer to mac_stats_t strucutre
1708  *
1709  * This function is called by MAC Layer to get  network statistics
1710  * from the driver.
1711  */
1712 static uint64_t
1713 xgell_m_stat(void *arg, enum mac_stat stat)
1714 {
1715 	xge_hal_stats_hw_info_t *hw_info;
1716 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1717 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1718 	uint64_t val;
1719 
1720 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1721 
1722 	if (!mutex_tryenter(&lldev->genlock))
1723 		return (0);
1724 
1725 	if (!lldev->is_initialized) {
1726 		mutex_exit(&lldev->genlock);
1727 		return (0);
1728 	}
1729 
1730 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1731 		mutex_exit(&lldev->genlock);
1732 		return (0);
1733 	}
1734 
1735 	switch (stat) {
1736 	case MAC_STAT_IFSPEED:
1737 		val = 10000000000ull; /* 10G */
1738 		break;
1739 
1740 	case MAC_STAT_LINK_DUPLEX:
1741 		val = LINK_DUPLEX_FULL;
1742 		break;
1743 
1744 	case MAC_STAT_MULTIRCV:
1745 		val = hw_info->rmac_vld_mcst_frms;
1746 		break;
1747 
1748 	case MAC_STAT_BRDCSTRCV:
1749 		val = hw_info->rmac_vld_bcst_frms;
1750 		break;
1751 
1752 	case MAC_STAT_MULTIXMT:
1753 		val = hw_info->tmac_mcst_frms;
1754 		break;
1755 
1756 	case MAC_STAT_BRDCSTXMT:
1757 		val = hw_info->tmac_bcst_frms;
1758 		break;
1759 
1760 	case MAC_STAT_RBYTES:
1761 		val = hw_info->rmac_ttl_octets;
1762 		break;
1763 
1764 	case MAC_STAT_NORCVBUF:
1765 		val = hw_info->rmac_drop_frms;
1766 		break;
1767 
1768 	case MAC_STAT_IERRORS:
1769 		val = hw_info->rmac_discarded_frms;
1770 		break;
1771 
1772 	case MAC_STAT_OBYTES:
1773 		val = hw_info->tmac_ttl_octets;
1774 		break;
1775 
1776 	case MAC_STAT_NOXMTBUF:
1777 		val = hw_info->tmac_drop_frms;
1778 		break;
1779 
1780 	case MAC_STAT_OERRORS:
1781 		val = hw_info->tmac_any_err_frms;
1782 		break;
1783 
1784 	case MAC_STAT_IPACKETS:
1785 		val = hw_info->rmac_vld_frms;
1786 		break;
1787 
1788 	case MAC_STAT_OPACKETS:
1789 		val = hw_info->tmac_frms;
1790 		break;
1791 
1792 	case MAC_STAT_FCS_ERRORS:
1793 		val = hw_info->rmac_fcs_err_frms;
1794 		break;
1795 
1796 	case MAC_STAT_TOOLONG_ERRORS:
1797 		val = hw_info->rmac_long_frms;
1798 		break;
1799 
1800 	default:
1801 		ASSERT(B_FALSE);
1802 	}
1803 
1804 	mutex_exit(&lldev->genlock);
1805 
1806 	return (val);
1807 }
1808 
1809 /*
1810  * xgell_device_alloc - Allocate new LL device
1811  */
1812 int
1813 xgell_device_alloc(xge_hal_device_h devh,
1814     dev_info_t *dev_info, xgelldev_t **lldev_out)
1815 {
1816 	xgelldev_t *lldev;
1817 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1818 	int instance = ddi_get_instance(dev_info);
1819 
1820 	*lldev_out = NULL;
1821 
1822 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1823 	    XGELL_IFNAME, instance);
1824 
1825 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1826 
1827 	/* allocate mac */
1828 	lldev->macp = kmem_zalloc(sizeof (mac_t), KM_SLEEP);
1829 	lldev->devh = hldev;
1830 	lldev->instance = instance;
1831 	lldev->dev_info = dev_info;
1832 
1833 	*lldev_out = lldev;
1834 
1835 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1836 
1837 	return (DDI_SUCCESS);
1838 }
1839 
1840 /*
1841  * xgell_device_free
1842  */
1843 void
1844 xgell_device_free(xgelldev_t *lldev)
1845 {
1846 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1847 	    XGELL_IFNAME, lldev->instance);
1848 
1849 	kmem_free(lldev->macp, sizeof (*(lldev->macp)));
1850 
1851 	kmem_free(lldev, sizeof (xgelldev_t));
1852 }
1853 
1854 /*
1855  * xgell_ioctl
1856  */
1857 static void
1858 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1859 {
1860 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1861 	xgelldev_t *lldev = (xgelldev_t *)xge_hal_device_private(hldev);
1862 	struct iocblk *iocp;
1863 	int err = 0;
1864 	int cmd;
1865 	int need_privilege = 1;
1866 	int ret = 0;
1867 
1868 
1869 	iocp = (struct iocblk *)mp->b_rptr;
1870 	iocp->ioc_error = 0;
1871 	cmd = iocp->ioc_cmd;
1872 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1873 	switch (cmd) {
1874 	case ND_GET:
1875 		need_privilege = 0;
1876 		/* FALLTHRU */
1877 	case ND_SET:
1878 		break;
1879 	default:
1880 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1881 		miocnak(wq, mp, 0, EINVAL);
1882 		return;
1883 	}
1884 
1885 	if (need_privilege) {
1886 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1887 		if (err != 0) {
1888 			xge_debug_ll(XGE_ERR,
1889 			    "drv_priv(): rejected cmd 0x%x, err %d",
1890 			    cmd, err);
1891 			miocnak(wq, mp, 0, err);
1892 			return;
1893 		}
1894 	}
1895 
1896 	switch (cmd) {
1897 	case ND_GET:
1898 		/*
1899 		 * If nd_getset() returns B_FALSE, the command was
1900 		 * not valid (e.g. unknown name), so we just tell the
1901 		 * top-level ioctl code to send a NAK (with code EINVAL).
1902 		 *
1903 		 * Otherwise, nd_getset() will have built the reply to
1904 		 * be sent (but not actually sent it), so we tell the
1905 		 * caller to send the prepared reply.
1906 		 */
1907 		ret = nd_getset(wq, lldev->ndp, mp);
1908 		xge_debug_ll(XGE_TRACE, "got ndd get ioctl");
1909 		break;
1910 
1911 	case ND_SET:
1912 		ret = nd_getset(wq, lldev->ndp, mp);
1913 		xge_debug_ll(XGE_TRACE, "got ndd set ioctl");
1914 		break;
1915 
1916 	default:
1917 		break;
1918 	}
1919 
1920 	if (ret == B_FALSE) {
1921 		xge_debug_ll(XGE_ERR,
1922 		    "nd_getset(): rejected cmd 0x%x, err %d",
1923 		    cmd, err);
1924 		miocnak(wq, mp, 0, EINVAL);
1925 	} else {
1926 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1927 		    M_IOCACK : M_IOCNAK;
1928 		qreply(wq, mp);
1929 	}
1930 }
1931 
1932 static void
1933 xgell_m_blank(void *arg, time_t ticks, uint_t count)
1934 {
1935 }
1936 
1937 #define	XGE_RX_INTPT_TIME	128
1938 #define	XGE_RX_PKT_CNT		8
1939 
1940 static void
1941 xgell_m_resources(void *arg)
1942 {
1943 	xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
1944 	xgelldev_t *lldev = xge_hal_device_private(hldev);
1945 	mac_rx_fifo_t mrf;
1946 
1947 	mrf.mrf_type = MAC_RX_FIFO;
1948 	mrf.mrf_blank = xgell_m_blank;
1949 	mrf.mrf_arg = (void *)hldev;
1950 	mrf.mrf_normal_blank_time = XGE_RX_INTPT_TIME;
1951 	mrf.mrf_normal_pkt_count = XGE_RX_PKT_CNT;
1952 
1953 	lldev->ring_main.handle = mac_resource_add(lldev->macp,
1954 	    (mac_resource_t *)&mrf);
1955 }
1956 
1957 static int
1958 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
1959 {
1960 	xgelldev_t *lldev = (xgelldev_t *)cp;
1961 	xge_hal_status_e status;
1962 	int count = 0, retsize;
1963 	char *buf;
1964 
1965 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
1966 	if (buf == NULL) {
1967 		return (ENOSPC);
1968 	}
1969 
1970 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
1971 	    buf, &retsize);
1972 	if (status != XGE_HAL_OK) {
1973 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1974 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
1975 		return (EINVAL);
1976 	}
1977 	count += retsize;
1978 
1979 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
1980 	    XGELL_STATS_BUFSIZE - count,
1981 	    buf+count, &retsize);
1982 	if (status != XGE_HAL_OK) {
1983 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1984 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
1985 		return (EINVAL);
1986 	}
1987 	count += retsize;
1988 
1989 	status = xge_hal_aux_stats_pci_read(lldev->devh,
1990 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
1991 	if (status != XGE_HAL_OK) {
1992 		kmem_free(buf, XGELL_STATS_BUFSIZE);
1993 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
1994 		return (EINVAL);
1995 	}
1996 	count += retsize;
1997 
1998 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
1999 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2000 	if (status != XGE_HAL_OK) {
2001 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2002 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2003 		return (EINVAL);
2004 	}
2005 	count += retsize;
2006 
2007 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2008 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2009 	if (status != XGE_HAL_OK) {
2010 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2011 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2012 		return (EINVAL);
2013 	}
2014 	count += retsize;
2015 
2016 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2017 	(void) mi_mpprintf(mp, "%s", buf);
2018 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2019 
2020 	return (0);
2021 }
2022 
2023 static int
2024 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2025 {
2026 	xgelldev_t *lldev = (xgelldev_t *)cp;
2027 	xge_hal_status_e status;
2028 	int retsize;
2029 	char *buf;
2030 
2031 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2032 	if (buf == NULL) {
2033 		return (ENOSPC);
2034 	}
2035 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2036 	    buf, &retsize);
2037 	if (status != XGE_HAL_OK) {
2038 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2039 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2040 		return (EINVAL);
2041 	}
2042 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2043 	(void) mi_mpprintf(mp, "%s", buf);
2044 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2045 
2046 	return (0);
2047 }
2048 
2049 static int
2050 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2051 {
2052 	xgelldev_t *lldev = (xgelldev_t *)cp;
2053 	xge_hal_status_e status;
2054 	int retsize;
2055 	char *buf;
2056 
2057 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2058 	if (buf == NULL) {
2059 		return (ENOSPC);
2060 	}
2061 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2062 	    buf, &retsize);
2063 	if (status != XGE_HAL_OK) {
2064 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2065 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2066 		return (EINVAL);
2067 	}
2068 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2069 	(void) mi_mpprintf(mp, "%s", buf);
2070 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2071 
2072 	return (0);
2073 }
2074 
2075 static unsigned long bar0_offset = 0x110; /* adapter_control */
2076 
2077 static int
2078 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2079 {
2080 	xgelldev_t *lldev = (xgelldev_t *)cp;
2081 	xge_hal_status_e status;
2082 	int retsize;
2083 	char *buf;
2084 
2085 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2086 	if (buf == NULL) {
2087 		return (ENOSPC);
2088 	}
2089 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2090 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2091 	if (status != XGE_HAL_OK) {
2092 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2093 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2094 		return (EINVAL);
2095 	}
2096 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2097 	(void) mi_mpprintf(mp, "%s", buf);
2098 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2099 
2100 	return (0);
2101 }
2102 
2103 static int
2104 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2105 {
2106 	unsigned long old_offset = bar0_offset;
2107 	char *end;
2108 
2109 	if (value && *value == '0' &&
2110 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2111 		value += 2;
2112 	}
2113 
2114 	bar0_offset = mi_strtol(value, &end, 16);
2115 	if (end == value) {
2116 		bar0_offset = old_offset;
2117 		return (EINVAL);
2118 	}
2119 
2120 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2121 
2122 	return (0);
2123 }
2124 
2125 static int
2126 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2127 {
2128 	char *buf;
2129 
2130 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2131 	if (buf == NULL) {
2132 		return (ENOSPC);
2133 	}
2134 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2135 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2136 
2137 	return (0);
2138 }
2139 
2140 static int
2141 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2142     cred_t *credp)
2143 {
2144 	int level;
2145 	char *end;
2146 
2147 	level = mi_strtol(value, &end, 10);
2148 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2149 		return (EINVAL);
2150 	}
2151 
2152 	xge_hal_driver_debug_level_set(level);
2153 
2154 	return (0);
2155 }
2156 
2157 static int
2158 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2159 {
2160 	char *buf;
2161 
2162 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2163 	if (buf == NULL) {
2164 		return (ENOSPC);
2165 	}
2166 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2167 	    xge_hal_driver_debug_module_mask());
2168 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2169 
2170 	return (0);
2171 }
2172 
2173 static int
2174 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2175 			    cred_t *credp)
2176 {
2177 	u32 mask;
2178 	char *end;
2179 
2180 	if (value && *value == '0' &&
2181 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2182 		value += 2;
2183 	}
2184 
2185 	mask = mi_strtol(value, &end, 16);
2186 	if (end == value) {
2187 		return (EINVAL);
2188 	}
2189 
2190 	xge_hal_driver_debug_module_mask_set(mask);
2191 
2192 	return (0);
2193 }
2194 
2195 static int
2196 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2197 {
2198 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2199 	xge_hal_status_e status;
2200 	int retsize;
2201 	char *buf;
2202 
2203 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2204 	if (buf == NULL) {
2205 		return (ENOSPC);
2206 	}
2207 	status = xge_hal_aux_device_config_read(lldev->devh,
2208 						XGELL_DEVCONF_BUFSIZE,
2209 						buf, &retsize);
2210 	if (status != XGE_HAL_OK) {
2211 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2212 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2213 		    status);
2214 		return (EINVAL);
2215 	}
2216 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2217 	(void) mi_mpprintf(mp, "%s", buf);
2218 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2219 
2220 	return (0);
2221 }
2222 
2223 /*
2224  * xgell_device_register
2225  * @devh: pointer on HAL device
2226  * @config: pointer on this network device configuration
2227  * @ll_out: output pointer. Will be assigned to valid LL device.
2228  *
2229  * This function will allocate and register network device
2230  */
2231 int
2232 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2233 {
2234 	mac_t *macp = lldev->macp;
2235 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2236 	mac_info_t *mip;
2237 
2238 	mip = &(macp->m_info);
2239 
2240 	mip->mi_media = DL_ETHER;
2241 	mip->mi_sdu_min = 0;
2242 	mip->mi_sdu_max = hldev->config.mtu;
2243 	mip->mi_cksum = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2244 	    HCKSUM_IPHDRCKSUM;
2245 
2246 	/*
2247 	 * When xgell_m_blank() has a valid implementation, this
2248 	 * should be changed to enable polling by add DL_CAPAB_POLL
2249 	 * to mp_poll.
2250 	 */
2251 	mip->mi_poll = 0;
2252 	mip->mi_addr_length = ETHERADDRL;
2253 	bcopy(xge_broadcast_addr, mip->mi_brdcst_addr, ETHERADDRL);
2254 	bcopy(&hldev->macaddr[0], mip->mi_unicst_addr, ETHERADDRL);
2255 
2256 	MAC_STAT_MIB(mip->mi_stat);
2257 	mip->mi_stat[MAC_STAT_UNKNOWNS] = B_FALSE;
2258 	mip->mi_stat[MAC_STAT_COLLISIONS] = B_FALSE;
2259 
2260 	mip->mi_stat[MAC_STAT_FCS_ERRORS] = B_TRUE;
2261 	mip->mi_stat[MAC_STAT_TOOLONG_ERRORS] = B_TRUE;
2262 
2263 	mip->mi_stat[MAC_STAT_LINK_DUPLEX] = B_TRUE;
2264 
2265 	macp->m_stat = xgell_m_stat;
2266 	macp->m_stop = xgell_m_stop;
2267 	macp->m_start = xgell_m_start;
2268 	macp->m_unicst = xgell_m_unicst;
2269 	macp->m_multicst = xgell_m_multicst;
2270 	macp->m_promisc = xgell_m_promisc;
2271 	macp->m_tx = xgell_m_tx;
2272 	macp->m_resources = xgell_m_resources;
2273 	macp->m_ioctl = xgell_m_ioctl;
2274 
2275 	macp->m_dip = hldev->pdev;
2276 	macp->m_driver = (caddr_t)hldev;
2277 	macp->m_ident = MAC_IDENT;
2278 
2279 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2280 	    (caddr_t)lldev) == B_FALSE) {
2281 		nd_free(&lldev->ndp);
2282 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2283 		return (DDI_FAILURE);
2284 	}
2285 
2286 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2287 	    (caddr_t)lldev) == B_FALSE) {
2288 		nd_free(&lldev->ndp);
2289 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2290 		return (DDI_FAILURE);
2291 	}
2292 
2293 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2294 	    (caddr_t)lldev) == B_FALSE) {
2295 		nd_free(&lldev->ndp);
2296 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2297 		return (DDI_FAILURE);
2298 	}
2299 
2300 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2301 	    (caddr_t)lldev) == B_FALSE) {
2302 		nd_free(&lldev->ndp);
2303 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2304 		return (DDI_FAILURE);
2305 	}
2306 
2307 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2308 		    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE) {
2309 		nd_free(&lldev->ndp);
2310 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2311 		return (DDI_FAILURE);
2312 	}
2313 
2314 	if (nd_load(&lldev->ndp, "debug_module_mask",
2315 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2316 	    (caddr_t)lldev) == B_FALSE) {
2317 		nd_free(&lldev->ndp);
2318 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2319 		return (DDI_FAILURE);
2320 	}
2321 
2322 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2323 	    (caddr_t)lldev) == B_FALSE) {
2324 		nd_free(&lldev->ndp);
2325 		xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2326 		return (DDI_FAILURE);
2327 	}
2328 
2329 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2330 
2331 	if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) {
2332 		nd_free(&lldev->ndp);
2333 		xge_debug_ll(XGE_ERR, "unable to create RX buffer pool");
2334 		return (DDI_FAILURE);
2335 	}
2336 
2337 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh);
2338 
2339 	/*
2340 	 * Finally, we're ready to register ourselves with the Nemo
2341 	 * interface; if this succeeds, we're all ready to start()
2342 	 */
2343 	if (mac_register(macp) != 0) {
2344 		nd_free(&lldev->ndp);
2345 		mutex_destroy(&lldev->genlock);
2346 		/* Ignore return value, since RX not start */
2347 		(void) xgell_rx_destroy_buffer_pool(lldev);
2348 		xge_debug_ll(XGE_ERR, "%s",
2349 		    "unable to register networking device");
2350 		return (DDI_FAILURE);
2351 	}
2352 
2353 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2354 	    XGELL_IFNAME, lldev->instance);
2355 
2356 	return (DDI_SUCCESS);
2357 }
2358 
2359 /*
2360  * xgell_device_unregister
2361  * @devh: pointer on HAL device
2362  * @lldev: pointer to valid LL device.
2363  *
2364  * This function will unregister and free network device
2365  */
2366 int
2367 xgell_device_unregister(xgelldev_t *lldev)
2368 {
2369 	/*
2370 	 * Destroy RX buffer pool.
2371 	 */
2372 	if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) {
2373 		return (DDI_FAILURE);
2374 	}
2375 
2376 	if (mac_unregister(lldev->macp) != 0) {
2377 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2378 		    XGELL_IFNAME, lldev->instance);
2379 		return (DDI_FAILURE);
2380 	}
2381 
2382 	mutex_destroy(&lldev->genlock);
2383 
2384 	nd_free(&lldev->ndp);
2385 
2386 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2387 	    XGELL_IFNAME, lldev->instance);
2388 
2389 	return (DDI_SUCCESS);
2390 }
2391