xref: /titanic_41/usr/src/uts/common/io/xge/drv/xgell.c (revision 0d6bb4c6728fd20087fe25f4028a3838250e6e9c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *  Copyright (c) 2002-2009 Neterion, Inc.
29  *  All right Reserved.
30  *
31  *  FileName :    xgell.c
32  *
33  *  Description:  Xge Link Layer data path implementation
34  *
35  */
36 
37 #include "xgell.h"
38 
39 #include <netinet/ip.h>
40 #include <netinet/tcp.h>
41 #include <netinet/udp.h>
42 
43 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
44     sizeof (struct ether_vlan_header))
45 
46 #define	HEADROOM		2	/* for DIX-only packets */
47 
header_free_func(void * arg)48 void header_free_func(void *arg) { }
49 frtn_t header_frtn = {header_free_func, NULL};
50 
51 /* DMA attributes used for Tx side */
52 static struct ddi_dma_attr tx_dma_attr = {
53 	DMA_ATTR_V0,			/* dma_attr_version */
54 	0x0ULL,				/* dma_attr_addr_lo */
55 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
56 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
57 #if defined(__sparc)
58 	0x2000,				/* dma_attr_align */
59 #else
60 	0x1000,				/* dma_attr_align */
61 #endif
62 	0xFC00FC,			/* dma_attr_burstsizes */
63 	0x1,				/* dma_attr_minxfer */
64 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
65 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
66 	18,				/* dma_attr_sgllen */
67 	(unsigned int)1,		/* dma_attr_granular */
68 	0				/* dma_attr_flags */
69 };
70 
71 /*
72  * DMA attributes used when using ddi_dma_mem_alloc to
73  * allocat HAL descriptors and Rx buffers during replenish
74  */
75 static struct ddi_dma_attr hal_dma_attr = {
76 	DMA_ATTR_V0,			/* dma_attr_version */
77 	0x0ULL,				/* dma_attr_addr_lo */
78 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
79 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
80 #if defined(__sparc)
81 	0x2000,				/* dma_attr_align */
82 #else
83 	0x1000,				/* dma_attr_align */
84 #endif
85 	0xFC00FC,			/* dma_attr_burstsizes */
86 	0x1,				/* dma_attr_minxfer */
87 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
88 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
89 	1,				/* dma_attr_sgllen */
90 	(unsigned int)1,		/* dma_attr_sgllen */
91 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
92 };
93 
94 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
95 
96 static int		xgell_m_stat(void *, uint_t, uint64_t *);
97 static int		xgell_m_start(void *);
98 static void		xgell_m_stop(void *);
99 static int		xgell_m_promisc(void *, boolean_t);
100 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
101 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
102 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
103 
104 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
105 
106 static mac_callbacks_t xgell_m_callbacks = {
107 	XGELL_M_CALLBACK_FLAGS,
108 	xgell_m_stat,
109 	xgell_m_start,
110 	xgell_m_stop,
111 	xgell_m_promisc,
112 	xgell_m_multicst,
113 	NULL,
114 	NULL,
115 	NULL,
116 	xgell_m_ioctl,
117 	xgell_m_getcapab
118 };
119 
120 /*
121  * xge_device_poll
122  *
123  * Timeout should call me every 1s. xge_callback_event_queued should call me
124  * when HAL hope event was rescheduled.
125  */
126 /*ARGSUSED*/
127 void
xge_device_poll(void * data)128 xge_device_poll(void *data)
129 {
130 	xgelldev_t *lldev = xge_hal_device_private(data);
131 
132 	mutex_enter(&lldev->genlock);
133 	if (lldev->is_initialized) {
134 		xge_hal_device_poll(data);
135 		lldev->timeout_id = timeout(xge_device_poll, data,
136 		    XGE_DEV_POLL_TICKS);
137 	} else if (lldev->in_reset == 1) {
138 		lldev->timeout_id = timeout(xge_device_poll, data,
139 		    XGE_DEV_POLL_TICKS);
140 	} else {
141 		lldev->timeout_id = 0;
142 	}
143 	mutex_exit(&lldev->genlock);
144 }
145 
146 /*
147  * xge_device_poll_now
148  *
149  * Will call xge_device_poll() immediately
150  */
151 void
xge_device_poll_now(void * data)152 xge_device_poll_now(void *data)
153 {
154 	xgelldev_t *lldev = xge_hal_device_private(data);
155 
156 	mutex_enter(&lldev->genlock);
157 	if (lldev->is_initialized) {
158 		xge_hal_device_poll(data);
159 	}
160 	mutex_exit(&lldev->genlock);
161 }
162 
163 /*
164  * xgell_callback_link_up
165  *
166  * This function called by HAL to notify HW link up state change.
167  */
168 void
xgell_callback_link_up(void * userdata)169 xgell_callback_link_up(void *userdata)
170 {
171 	xgelldev_t *lldev = (xgelldev_t *)userdata;
172 
173 	mac_link_update(lldev->mh, LINK_STATE_UP);
174 }
175 
176 /*
177  * xgell_callback_link_down
178  *
179  * This function called by HAL to notify HW link down state change.
180  */
181 void
xgell_callback_link_down(void * userdata)182 xgell_callback_link_down(void *userdata)
183 {
184 	xgelldev_t *lldev = (xgelldev_t *)userdata;
185 
186 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
187 }
188 
189 /*
190  * xgell_rx_buffer_replenish_all
191  *
192  * To replenish all freed dtr(s) with buffers in free pool. It's called by
193  * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
194  * Must be called with pool_lock held.
195  */
196 static void
xgell_rx_buffer_replenish_all(xgell_rx_ring_t * ring)197 xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
198 {
199 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
200 	xge_hal_dtr_h dtr;
201 	xgell_rx_buffer_t *rx_buffer;
202 	xgell_rxd_priv_t *rxd_priv;
203 
204 	xge_assert(mutex_owned(&bf_pool->pool_lock));
205 
206 	while ((bf_pool->free > 0) &&
207 	    (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
208 		xge_assert(bf_pool->head);
209 
210 		rx_buffer = bf_pool->head;
211 
212 		bf_pool->head = rx_buffer->next;
213 		bf_pool->free--;
214 
215 		xge_assert(rx_buffer->dma_addr);
216 
217 		rxd_priv = (xgell_rxd_priv_t *)
218 		    xge_hal_ring_dtr_private(ring->channelh, dtr);
219 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
220 		    bf_pool->size);
221 
222 		rxd_priv->rx_buffer = rx_buffer;
223 		xge_hal_ring_dtr_post(ring->channelh, dtr);
224 	}
225 }
226 
227 /*
228  * xgell_rx_buffer_release
229  *
230  * The only thing done here is to put the buffer back to the pool.
231  * Calling this function need be protected by mutex, bf_pool.pool_lock.
232  */
233 static void
xgell_rx_buffer_release(xgell_rx_buffer_t * rx_buffer)234 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
235 {
236 	xgell_rx_ring_t *ring = rx_buffer->ring;
237 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
238 
239 	xge_assert(mutex_owned(&bf_pool->pool_lock));
240 
241 	/* Put the buffer back to pool */
242 	rx_buffer->next = bf_pool->head;
243 	bf_pool->head = rx_buffer;
244 
245 	bf_pool->free++;
246 }
247 
248 /*
249  * xgell_rx_buffer_recycle
250  *
251  * Called by desballoc() to "free" the resource.
252  * We will try to replenish all descripters.
253  */
254 
255 /*
256  * Previously there were much lock contention between xgell_rx_1b_compl() and
257  * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
258  * effect on rx performance. A separate recycle list is introduced to overcome
259  * this. The recycle list is used to record the rx buffer that has been recycled
260  * and these buffers will be retuned back to the free list in bulk instead of
261  * one-by-one.
262  */
263 
264 static void
xgell_rx_buffer_recycle(char * arg)265 xgell_rx_buffer_recycle(char *arg)
266 {
267 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
268 	xgell_rx_ring_t *ring = rx_buffer->ring;
269 	xgelldev_t *lldev = ring->lldev;
270 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
271 
272 	mutex_enter(&bf_pool->recycle_lock);
273 
274 	rx_buffer->next = bf_pool->recycle_head;
275 	bf_pool->recycle_head = rx_buffer;
276 	if (bf_pool->recycle_tail == NULL)
277 		bf_pool->recycle_tail = rx_buffer;
278 	bf_pool->recycle++;
279 
280 	/*
281 	 * Before finding a good way to set this hiwat, just always call to
282 	 * replenish_all. *TODO*
283 	 */
284 	if ((lldev->is_initialized != 0) && (ring->live) &&
285 	    (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
286 		mutex_enter(&bf_pool->pool_lock);
287 		bf_pool->recycle_tail->next = bf_pool->head;
288 		bf_pool->head = bf_pool->recycle_head;
289 		bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
290 		bf_pool->post -= bf_pool->recycle;
291 		bf_pool->free += bf_pool->recycle;
292 		bf_pool->recycle = 0;
293 		xgell_rx_buffer_replenish_all(ring);
294 		mutex_exit(&bf_pool->pool_lock);
295 	}
296 
297 	mutex_exit(&bf_pool->recycle_lock);
298 }
299 
300 /*
301  * xgell_rx_buffer_alloc
302  *
303  * Allocate one rx buffer and return with the pointer to the buffer.
304  * Return NULL if failed.
305  */
306 static xgell_rx_buffer_t *
xgell_rx_buffer_alloc(xgell_rx_ring_t * ring)307 xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
308 {
309 	xgelldev_t *lldev = ring->lldev;
310 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
311 	xge_hal_device_t *hldev;
312 	void *vaddr;
313 	ddi_dma_handle_t dma_handle;
314 	ddi_acc_handle_t dma_acch;
315 	dma_addr_t dma_addr;
316 	uint_t ncookies;
317 	ddi_dma_cookie_t dma_cookie;
318 	size_t real_size;
319 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
320 	xgell_rx_buffer_t *rx_buffer;
321 
322 	hldev = (xge_hal_device_t *)lldev->devh;
323 
324 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
325 	    0, &dma_handle) != DDI_SUCCESS) {
326 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
327 		    XGELL_IFNAME, lldev->instance);
328 		goto handle_failed;
329 	}
330 
331 	/* reserve some space at the end of the buffer for recycling */
332 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
333 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
334 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
335 	    DDI_SUCCESS) {
336 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
337 		    XGELL_IFNAME, lldev->instance);
338 		goto mem_failed;
339 	}
340 
341 	if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
342 	    real_size) {
343 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
344 		    XGELL_IFNAME, lldev->instance);
345 		goto bind_failed;
346 	}
347 
348 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
349 	    bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
350 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
351 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
352 		    XGELL_IFNAME, lldev->instance);
353 		goto bind_failed;
354 	}
355 
356 	if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
357 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
358 		    XGELL_IFNAME, lldev->instance);
359 		goto check_failed;
360 	}
361 
362 	dma_addr = dma_cookie.dmac_laddress;
363 
364 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
365 	    sizeof (xgell_rx_buffer_t));
366 	rx_buffer->next = NULL;
367 	rx_buffer->vaddr = vaddr;
368 	rx_buffer->dma_addr = dma_addr;
369 	rx_buffer->dma_handle = dma_handle;
370 	rx_buffer->dma_acch = dma_acch;
371 	rx_buffer->ring = ring;
372 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
373 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
374 
375 	return (rx_buffer);
376 
377 check_failed:
378 	(void) ddi_dma_unbind_handle(dma_handle);
379 bind_failed:
380 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
381 	ddi_dma_mem_free(&dma_acch);
382 mem_failed:
383 	ddi_dma_free_handle(&dma_handle);
384 handle_failed:
385 
386 	return (NULL);
387 }
388 
389 /*
390  * xgell_rx_destroy_buffer_pool
391  *
392  * Destroy buffer pool. If there is still any buffer hold by upper layer,
393  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
394  */
395 static boolean_t
xgell_rx_destroy_buffer_pool(xgell_rx_ring_t * ring)396 xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
397 {
398 	xgelldev_t *lldev = ring->lldev;
399 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
400 	xgell_rx_buffer_t *rx_buffer;
401 	ddi_dma_handle_t  dma_handle;
402 	ddi_acc_handle_t  dma_acch;
403 	int i;
404 
405 	/*
406 	 * If the pool has been destroied, just return B_TRUE
407 	 */
408 	if (!bf_pool->live)
409 		return (B_TRUE);
410 
411 	mutex_enter(&bf_pool->recycle_lock);
412 	if (bf_pool->recycle > 0) {
413 		mutex_enter(&bf_pool->pool_lock);
414 		bf_pool->recycle_tail->next = bf_pool->head;
415 		bf_pool->head = bf_pool->recycle_head;
416 		bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
417 		bf_pool->post -= bf_pool->recycle;
418 		bf_pool->free += bf_pool->recycle;
419 		bf_pool->recycle = 0;
420 		mutex_exit(&bf_pool->pool_lock);
421 	}
422 	mutex_exit(&bf_pool->recycle_lock);
423 
424 	/*
425 	 * If there is any posted buffer, the driver should reject to be
426 	 * detached. Need notice upper layer to release them.
427 	 */
428 	if (bf_pool->post != 0) {
429 		xge_debug_ll(XGE_ERR,
430 		    "%s%d has some buffers not be recycled, try later!",
431 		    XGELL_IFNAME, lldev->instance);
432 		return (B_FALSE);
433 	}
434 
435 	/*
436 	 * Release buffers one by one.
437 	 */
438 	for (i = bf_pool->total; i > 0; i--) {
439 		rx_buffer = bf_pool->head;
440 		xge_assert(rx_buffer != NULL);
441 
442 		bf_pool->head = rx_buffer->next;
443 
444 		dma_handle = rx_buffer->dma_handle;
445 		dma_acch = rx_buffer->dma_acch;
446 
447 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
448 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
449 			bf_pool->head = rx_buffer;
450 			return (B_FALSE);
451 		}
452 		ddi_dma_mem_free(&dma_acch);
453 		ddi_dma_free_handle(&dma_handle);
454 
455 		bf_pool->total--;
456 		bf_pool->free--;
457 	}
458 
459 	xge_assert(!mutex_owned(&bf_pool->pool_lock));
460 
461 	mutex_destroy(&bf_pool->recycle_lock);
462 	mutex_destroy(&bf_pool->pool_lock);
463 	bf_pool->live = B_FALSE;
464 
465 	return (B_TRUE);
466 }
467 
468 /*
469  * xgell_rx_create_buffer_pool
470  *
471  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
472  */
473 static boolean_t
xgell_rx_create_buffer_pool(xgell_rx_ring_t * ring)474 xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
475 {
476 	xgelldev_t *lldev = ring->lldev;
477 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
478 	xge_hal_device_t *hldev;
479 	xgell_rx_buffer_t *rx_buffer;
480 	int i;
481 
482 	if (bf_pool->live)
483 		return (B_TRUE);
484 
485 	hldev = (xge_hal_device_t *)lldev->devh;
486 
487 	bf_pool->total = 0;
488 	bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
489 	bf_pool->head = NULL;
490 	bf_pool->free = 0;
491 	bf_pool->post = 0;
492 	bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
493 	bf_pool->recycle = 0;
494 	bf_pool->recycle_head = NULL;
495 	bf_pool->recycle_tail = NULL;
496 	bf_pool->live = B_TRUE;
497 
498 	mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
499 	    DDI_INTR_PRI(hldev->irqh));
500 	mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
501 	    DDI_INTR_PRI(hldev->irqh));
502 
503 	/*
504 	 * Allocate buffers one by one. If failed, destroy whole pool by
505 	 * call to xgell_rx_destroy_buffer_pool().
506 	 */
507 
508 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
509 		if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
510 			(void) xgell_rx_destroy_buffer_pool(ring);
511 			return (B_FALSE);
512 		}
513 
514 		rx_buffer->next = bf_pool->head;
515 		bf_pool->head = rx_buffer;
516 
517 		bf_pool->total++;
518 		bf_pool->free++;
519 	}
520 
521 	return (B_TRUE);
522 }
523 
524 /*
525  * xgell_rx_dtr_replenish
526  *
527  * Replenish descriptor with rx_buffer in RX buffer pool.
528  * The dtr should be post right away.
529  */
530 xge_hal_status_e
xgell_rx_dtr_replenish(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,int index,void * userdata,xge_hal_channel_reopen_e reopen)531 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
532     void *userdata, xge_hal_channel_reopen_e reopen)
533 {
534 	xgell_rx_ring_t *ring = userdata;
535 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
536 	xgell_rx_buffer_t *rx_buffer;
537 	xgell_rxd_priv_t *rxd_priv;
538 
539 	mutex_enter(&bf_pool->pool_lock);
540 	if (bf_pool->head == NULL) {
541 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
542 		return (XGE_HAL_FAIL);
543 	}
544 	rx_buffer = bf_pool->head;
545 	xge_assert(rx_buffer);
546 	xge_assert(rx_buffer->dma_addr);
547 
548 	bf_pool->head = rx_buffer->next;
549 	bf_pool->free--;
550 	mutex_exit(&bf_pool->pool_lock);
551 
552 	rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
553 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
554 
555 	rxd_priv->rx_buffer = rx_buffer;
556 
557 	return (XGE_HAL_OK);
558 }
559 
560 /*
561  * xgell_get_ip_offset
562  *
563  * Calculate the offset to IP header.
564  */
565 static inline int
xgell_get_ip_offset(xge_hal_dtr_info_t * ext_info)566 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
567 {
568 	int ip_off;
569 
570 	/* get IP-header offset */
571 	switch (ext_info->frame) {
572 	case XGE_HAL_FRAME_TYPE_DIX:
573 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
574 		break;
575 	case XGE_HAL_FRAME_TYPE_IPX:
576 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
577 		    XGE_HAL_HEADER_802_2_SIZE +
578 		    XGE_HAL_HEADER_SNAP_SIZE);
579 		break;
580 	case XGE_HAL_FRAME_TYPE_LLC:
581 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
582 		    XGE_HAL_HEADER_802_2_SIZE);
583 		break;
584 	case XGE_HAL_FRAME_TYPE_SNAP:
585 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
586 		    XGE_HAL_HEADER_SNAP_SIZE);
587 		break;
588 	default:
589 		ip_off = 0;
590 		break;
591 	}
592 
593 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
594 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
595 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
596 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
597 	}
598 
599 	return (ip_off);
600 }
601 
602 /*
603  * xgell_rx_hcksum_assoc
604  *
605  * Judge the packet type and then call to hcksum_assoc() to associate
606  * h/w checksum information.
607  */
608 static inline void
xgell_rx_hcksum_assoc(mblk_t * mp,char * vaddr,int pkt_length,xge_hal_dtr_info_t * ext_info)609 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
610     xge_hal_dtr_info_t *ext_info)
611 {
612 	int cksum_flags = 0;
613 
614 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
615 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
616 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
617 				cksum_flags |= HCK_IPV4_HDRCKSUM_OK;
618 			}
619 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
620 				cksum_flags |= HCK_FULLCKSUM_OK;
621 			}
622 			if (cksum_flags != 0) {
623 				mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
624 			}
625 		}
626 	} else if (ext_info->proto &
627 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
628 		/*
629 		 * Just pass the partial cksum up to IP.
630 		 */
631 		int ip_off = xgell_get_ip_offset(ext_info);
632 		int start, end = pkt_length - ip_off;
633 
634 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
635 			struct ip *ip =
636 			    (struct ip *)(vaddr + ip_off);
637 			start = ip->ip_hl * 4;
638 		} else {
639 			start = 40;
640 		}
641 		cksum_flags |= HCK_PARTIALCKSUM;
642 		mac_hcksum_set(mp, start, 0, end,
643 		    ntohs(ext_info->l4_cksum), cksum_flags);
644 	}
645 }
646 
647 /*
648  * xgell_rx_1b_msg_alloc
649  *
650  * Allocate message header for data buffer, and decide if copy the packet to
651  * new data buffer to release big rx_buffer to save memory.
652  *
653  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
654  * new message and copy the payload in.
655  */
656 static mblk_t *
xgell_rx_1b_msg_alloc(xgell_rx_ring_t * ring,xgell_rx_buffer_t * rx_buffer,int pkt_length,xge_hal_dtr_info_t * ext_info,boolean_t * copyit)657 xgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
658     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
659 {
660 	xgelldev_t *lldev = ring->lldev;
661 	mblk_t *mp;
662 	char *vaddr;
663 
664 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
665 	/*
666 	 * Copy packet into new allocated message buffer, if pkt_length
667 	 * is less than XGELL_RX_DMA_LOWAT
668 	 */
669 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
670 		if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
671 			return (NULL);
672 		}
673 		mp->b_rptr += HEADROOM;
674 		bcopy(vaddr, mp->b_rptr, pkt_length);
675 		mp->b_wptr = mp->b_rptr + pkt_length;
676 		*copyit = B_TRUE;
677 		return (mp);
678 	}
679 
680 	/*
681 	 * Just allocate mblk for current data buffer
682 	 */
683 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
684 	    &rx_buffer->frtn)) == NULL) {
685 		/* Drop it */
686 		return (NULL);
687 	}
688 	/*
689 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
690 	 */
691 	mp->b_wptr += pkt_length;
692 
693 	return (mp);
694 }
695 
696 /*
697  * xgell_rx_1b_callback
698  *
699  * If the interrupt is because of a received frame or if the receive ring
700  * contains fresh as yet un-processed frames, this function is called.
701  */
702 static xge_hal_status_e
xgell_rx_1b_callback(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,u8 t_code,void * userdata)703 xgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
704     void *userdata)
705 {
706 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
707 	xgelldev_t *lldev = ring->lldev;
708 	xgell_rx_buffer_t *rx_buffer;
709 	mblk_t *mp_head = NULL;
710 	mblk_t *mp_end  = NULL;
711 	int pkt_burst = 0;
712 
713 	xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
714 
715 	mutex_enter(&ring->bf_pool.pool_lock);
716 	do {
717 		int pkt_length;
718 		dma_addr_t dma_data;
719 		mblk_t *mp;
720 		boolean_t copyit = B_FALSE;
721 
722 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
723 		    xge_hal_ring_dtr_private(channelh, dtr));
724 		xge_hal_dtr_info_t ext_info;
725 
726 		rx_buffer = rxd_priv->rx_buffer;
727 
728 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
729 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
730 
731 		xge_assert(dma_data == rx_buffer->dma_addr);
732 
733 		if (t_code != 0) {
734 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
735 			    " completed due to error t_code %01x", XGELL_IFNAME,
736 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
737 
738 			(void) xge_hal_device_handle_tcode(channelh, dtr,
739 			    t_code);
740 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
741 			xgell_rx_buffer_release(rx_buffer);
742 			continue;
743 		}
744 
745 		/*
746 		 * Sync the DMA memory
747 		 */
748 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
749 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
750 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
751 			    XGELL_IFNAME, lldev->instance);
752 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
753 			xgell_rx_buffer_release(rx_buffer);
754 			continue;
755 		}
756 
757 		/*
758 		 * Allocate message for the packet.
759 		 */
760 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
761 			copyit = B_TRUE;
762 		} else {
763 			copyit = B_FALSE;
764 		}
765 
766 		mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
767 		    &ext_info, &copyit);
768 
769 		xge_hal_ring_dtr_free(channelh, dtr);
770 
771 		/*
772 		 * Release the buffer and recycle it later
773 		 */
774 		if ((mp == NULL) || copyit) {
775 			xgell_rx_buffer_release(rx_buffer);
776 		} else {
777 			/*
778 			 * Count it since the buffer should be loaned up.
779 			 */
780 			ring->bf_pool.post++;
781 		}
782 		if (mp == NULL) {
783 			xge_debug_ll(XGE_ERR,
784 			    "%s%d: rx: can not allocate mp mblk",
785 			    XGELL_IFNAME, lldev->instance);
786 			continue;
787 		}
788 
789 		/*
790 		 * Associate cksum_flags per packet type and h/w
791 		 * cksum flags.
792 		 */
793 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
794 		    pkt_length, &ext_info);
795 
796 		ring->rx_pkts++;
797 		ring->rx_bytes += pkt_length;
798 
799 		if (mp_head == NULL) {
800 			mp_head = mp;
801 			mp_end = mp;
802 		} else {
803 			mp_end->b_next = mp;
804 			mp_end = mp;
805 		}
806 
807 		/*
808 		 * Inlined implemented polling function.
809 		 */
810 		if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
811 			ring->poll_mp = mp_head;
812 		}
813 		if (ring->poll_mp != NULL) {
814 			if ((ring->poll_bytes -= pkt_length) <= 0) {
815 				/* have polled enough packets. */
816 				break;
817 			} else {
818 				/* continue polling packets. */
819 				continue;
820 			}
821 		}
822 
823 		/*
824 		 * We're not in polling mode, so try to chain more messages
825 		 * or send the chain up according to pkt_burst.
826 		 */
827 		if (++pkt_burst < lldev->config.rx_pkt_burst)
828 			continue;
829 
830 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
831 			/* Replenish rx buffers */
832 			xgell_rx_buffer_replenish_all(ring);
833 		}
834 		mutex_exit(&ring->bf_pool.pool_lock);
835 		if (mp_head != NULL) {
836 			mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
837 			    ring->ring_gen_num);
838 		}
839 		mp_head = mp_end  = NULL;
840 		pkt_burst = 0;
841 		mutex_enter(&ring->bf_pool.pool_lock);
842 
843 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
844 	    XGE_HAL_OK);
845 
846 	/*
847 	 * Always call replenish_all to recycle rx_buffers.
848 	 */
849 	xgell_rx_buffer_replenish_all(ring);
850 	mutex_exit(&ring->bf_pool.pool_lock);
851 
852 	/*
853 	 * If we're not in polling cycle, call mac_rx(), otherwise
854 	 * just return while leaving packets chained to ring->poll_mp.
855 	 */
856 	if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
857 		mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
858 		    ring->ring_gen_num);
859 	}
860 
861 	return (XGE_HAL_OK);
862 }
863 
864 mblk_t *
xgell_rx_poll(void * arg,int bytes_to_pickup)865 xgell_rx_poll(void *arg, int bytes_to_pickup)
866 {
867 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)arg;
868 	int got_rx = 0;
869 	mblk_t *mp;
870 
871 	xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
872 
873 	ring->poll_mp = NULL;
874 	ring->poll_bytes = bytes_to_pickup;
875 	(void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
876 
877 	mp = ring->poll_mp;
878 	ring->poll_bytes = -1;
879 	ring->polled_bytes += got_rx;
880 	ring->poll_mp = NULL;
881 
882 	return (mp);
883 }
884 
885 /*
886  * xgell_xmit_compl
887  *
888  * If an interrupt was raised to indicate DMA complete of the Tx packet,
889  * this function is called. It identifies the last TxD whose buffer was
890  * freed and frees all skbs whose data have already DMA'ed into the NICs
891  * internal memory.
892  */
893 static xge_hal_status_e
xgell_xmit_compl(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,u8 t_code,void * userdata)894 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
895     void *userdata)
896 {
897 	xgell_tx_ring_t *ring = userdata;
898 	xgelldev_t *lldev = ring->lldev;
899 
900 	do {
901 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
902 		    xge_hal_fifo_dtr_private(dtr));
903 		int i;
904 
905 		if (t_code) {
906 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
907 			    " completed due to error t_code %01x", XGELL_IFNAME,
908 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
909 
910 			(void) xge_hal_device_handle_tcode(channelh, dtr,
911 			    t_code);
912 		}
913 
914 		for (i = 0; i < txd_priv->handle_cnt; i++) {
915 			if (txd_priv->dma_handles[i] != NULL) {
916 				xge_assert(txd_priv->dma_handles[i]);
917 				(void) ddi_dma_unbind_handle(
918 				    txd_priv->dma_handles[i]);
919 				ddi_dma_free_handle(&txd_priv->dma_handles[i]);
920 				txd_priv->dma_handles[i] = 0;
921 			}
922 		}
923 		txd_priv->handle_cnt = 0;
924 
925 		xge_hal_fifo_dtr_free(channelh, dtr);
926 
927 		if (txd_priv->mblk != NULL) {
928 			freemsg(txd_priv->mblk);
929 			txd_priv->mblk = NULL;
930 		}
931 
932 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
933 	    XGE_HAL_OK);
934 
935 	if (ring->need_resched)
936 		mac_tx_ring_update(lldev->mh, ring->ring_handle);
937 
938 	return (XGE_HAL_OK);
939 }
940 
941 mblk_t *
xgell_ring_tx(void * arg,mblk_t * mp)942 xgell_ring_tx(void *arg, mblk_t *mp)
943 {
944 	xgell_tx_ring_t *ring = (xgell_tx_ring_t *)arg;
945 	mblk_t *bp;
946 	xgelldev_t *lldev = ring->lldev;
947 	xge_hal_device_t *hldev = lldev->devh;
948 	xge_hal_status_e status;
949 	xge_hal_dtr_h dtr;
950 	xgell_txd_priv_t *txd_priv;
951 	uint32_t hckflags;
952 	uint32_t lsoflags;
953 	uint32_t mss;
954 	int handle_cnt, frag_cnt, ret, i, copied;
955 	boolean_t used_copy;
956 	uint64_t sent_bytes;
957 
958 _begin:
959 	handle_cnt = frag_cnt = 0;
960 	sent_bytes = 0;
961 
962 	if (!lldev->is_initialized || lldev->in_reset)
963 		return (mp);
964 
965 	/*
966 	 * If the free Tx dtrs count reaches the lower threshold,
967 	 * inform the gld to stop sending more packets till the free
968 	 * dtrs count exceeds higher threshold. Driver informs the
969 	 * gld through gld_sched call, when the free dtrs count exceeds
970 	 * the higher threshold.
971 	 */
972 	if (xge_hal_channel_dtr_count(ring->channelh)
973 	    <= XGELL_TX_LEVEL_LOW) {
974 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
975 		    "free descriptors count at low threshold %d",
976 		    XGELL_IFNAME, lldev->instance,
977 		    ((xge_hal_channel_t *)ring->channelh)->post_qid,
978 		    XGELL_TX_LEVEL_LOW);
979 		goto _exit;
980 	}
981 
982 	status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
983 	if (status != XGE_HAL_OK) {
984 		switch (status) {
985 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
986 			xge_debug_ll(XGE_ERR,
987 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
988 			    lldev->instance,
989 			    ((xge_hal_channel_t *)
990 			    ring->channelh)->post_qid);
991 			goto _exit;
992 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
993 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
994 			    " out of descriptors.", XGELL_IFNAME,
995 			    lldev->instance,
996 			    ((xge_hal_channel_t *)
997 			    ring->channelh)->post_qid);
998 			goto _exit;
999 		default:
1000 			return (mp);
1001 		}
1002 	}
1003 
1004 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1005 	txd_priv->mblk = mp;
1006 
1007 	/*
1008 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1009 	 * do insertion.
1010 	 *
1011 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1012 	 * is the other implementation for xge. The driver can simple bcopy()
1013 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1014 	 * automatically, since it's impossible that GLD sends down mp(s) with
1015 	 * splited ether_vlan_header.
1016 	 *
1017 	 * struct ether_vlan_header *evhp;
1018 	 * uint16_t tci;
1019 	 *
1020 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1021 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1022 	 *	tci = ntohs(evhp->ether_tci);
1023 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1024 	 *	    2 * ETHERADDRL);
1025 	 *	mp->b_rptr += VLAN_TAGSZ;
1026 	 *
1027 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1028 	 * }
1029 	 */
1030 
1031 	copied = 0;
1032 	used_copy = B_FALSE;
1033 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1034 		int mblen;
1035 		uint_t ncookies;
1036 		ddi_dma_cookie_t dma_cookie;
1037 		ddi_dma_handle_t dma_handle;
1038 
1039 		/* skip zero-length message blocks */
1040 		mblen = MBLKL(bp);
1041 		if (mblen == 0) {
1042 			continue;
1043 		}
1044 
1045 		sent_bytes += mblen;
1046 
1047 		/*
1048 		 * Check the message length to decide to DMA or bcopy() data
1049 		 * to tx descriptor(s).
1050 		 */
1051 		if (mblen < lldev->config.tx_dma_lowat &&
1052 		    (copied + mblen) < lldev->tx_copied_max) {
1053 			xge_hal_status_e rc;
1054 			rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
1055 			    dtr, bp->b_rptr, mblen);
1056 			if (rc == XGE_HAL_OK) {
1057 				used_copy = B_TRUE;
1058 				copied += mblen;
1059 				continue;
1060 			} else if (used_copy) {
1061 				xge_hal_fifo_dtr_buffer_finalize(
1062 				    ring->channelh, dtr, frag_cnt++);
1063 				used_copy = B_FALSE;
1064 			}
1065 		} else if (used_copy) {
1066 			xge_hal_fifo_dtr_buffer_finalize(ring->channelh,
1067 			    dtr, frag_cnt++);
1068 			used_copy = B_FALSE;
1069 		}
1070 
1071 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1072 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1073 		if (ret != DDI_SUCCESS) {
1074 			xge_debug_ll(XGE_ERR,
1075 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1076 			    lldev->instance);
1077 			goto _exit_cleanup;
1078 		}
1079 
1080 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1081 		    (caddr_t)bp->b_rptr, mblen,
1082 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1083 		    &dma_cookie, &ncookies);
1084 
1085 		switch (ret) {
1086 		case DDI_DMA_MAPPED:
1087 			/* everything's fine */
1088 			break;
1089 
1090 		case DDI_DMA_NORESOURCES:
1091 			xge_debug_ll(XGE_ERR,
1092 			    "%s%d: can not bind dma address",
1093 			    XGELL_IFNAME, lldev->instance);
1094 			ddi_dma_free_handle(&dma_handle);
1095 			goto _exit_cleanup;
1096 
1097 		case DDI_DMA_NOMAPPING:
1098 		case DDI_DMA_INUSE:
1099 		case DDI_DMA_TOOBIG:
1100 		default:
1101 			/* drop packet, don't retry */
1102 			xge_debug_ll(XGE_ERR,
1103 			    "%s%d: can not map message buffer",
1104 			    XGELL_IFNAME, lldev->instance);
1105 			ddi_dma_free_handle(&dma_handle);
1106 			goto _exit_cleanup;
1107 		}
1108 
1109 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1110 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1111 			    "requested c:%d+f:%d", XGELL_IFNAME,
1112 			    lldev->instance, ncookies, frag_cnt);
1113 			(void) ddi_dma_unbind_handle(dma_handle);
1114 			ddi_dma_free_handle(&dma_handle);
1115 			goto _exit_cleanup;
1116 		}
1117 
1118 		/* setup the descriptors for this data buffer */
1119 		while (ncookies) {
1120 			xge_hal_fifo_dtr_buffer_set(ring->channelh, dtr,
1121 			    frag_cnt++, dma_cookie.dmac_laddress,
1122 			    dma_cookie.dmac_size);
1123 			if (--ncookies) {
1124 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1125 			}
1126 
1127 		}
1128 
1129 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1130 
1131 		if (bp->b_cont &&
1132 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1133 		    hldev->config.fifo.max_frags)) {
1134 			mblk_t *nmp;
1135 
1136 			xge_debug_ll(XGE_TRACE,
1137 			    "too many FRAGs [%d], pull up them", frag_cnt);
1138 
1139 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1140 				/* Drop packet, don't retry */
1141 				xge_debug_ll(XGE_ERR,
1142 				    "%s%d: can not pullup message buffer",
1143 				    XGELL_IFNAME, lldev->instance);
1144 				goto _exit_cleanup;
1145 			}
1146 			freemsg(bp->b_cont);
1147 			bp->b_cont = nmp;
1148 		}
1149 	}
1150 
1151 	/* finalize unfinished copies */
1152 	if (used_copy) {
1153 		xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
1154 		    frag_cnt++);
1155 	}
1156 
1157 	txd_priv->handle_cnt = handle_cnt;
1158 
1159 	/*
1160 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1161 	 * do all necessary work.
1162 	 */
1163 	mac_lso_get(mp, &mss, &lsoflags);
1164 
1165 	if (lsoflags & HW_LSO) {
1166 		xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
1167 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1168 	}
1169 
1170 	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags);
1171 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1172 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1173 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1174 	}
1175 	if (hckflags & HCK_FULLCKSUM) {
1176 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1177 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1178 	}
1179 
1180 	xge_hal_fifo_dtr_post(ring->channelh, dtr);
1181 
1182 	/* Update per-ring tx statistics */
1183 	atomic_inc_64(&ring->tx_pkts);
1184 	atomic_add_64(&ring->tx_bytes, sent_bytes);
1185 
1186 	return (NULL);
1187 
1188 _exit_cleanup:
1189 	/*
1190 	 * Could not successfully transmit but have changed the message,
1191 	 * so just free it and return NULL
1192 	 */
1193 	for (i = 0; i < handle_cnt; i++) {
1194 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1195 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1196 		txd_priv->dma_handles[i] = 0;
1197 	}
1198 
1199 	xge_hal_fifo_dtr_free(ring->channelh, dtr);
1200 
1201 	freemsg(mp);
1202 	return (NULL);
1203 
1204 _exit:
1205 	ring->need_resched = B_TRUE;
1206 	return (mp);
1207 }
1208 
1209 /*
1210  * xgell_ring_macaddr_init
1211  */
1212 static void
xgell_rx_ring_maddr_init(xgell_rx_ring_t * ring)1213 xgell_rx_ring_maddr_init(xgell_rx_ring_t *ring)
1214 {
1215 	int i;
1216 	xgelldev_t *lldev = ring->lldev;
1217 	xge_hal_device_t *hldev = lldev->devh;
1218 	int slot_start;
1219 
1220 	xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
1221 
1222 	ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
1223 	ring->mmac.naddrfree = ring->mmac.naddr;
1224 
1225 	/*
1226 	 * For the default rx ring, the first MAC address is the factory one.
1227 	 * This will be set by the framework, so need to clear it for now.
1228 	 */
1229 	(void) xge_hal_device_macaddr_clear(hldev, 0);
1230 
1231 	/*
1232 	 * Read the MAC address Configuration Memory from HAL.
1233 	 * The first slot will hold a factory MAC address, contents in other
1234 	 * slots will be FF:FF:FF:FF:FF:FF.
1235 	 */
1236 	slot_start = ring->index * 32;
1237 	for (i = 0; i < ring->mmac.naddr; i++) {
1238 		(void) xge_hal_device_macaddr_get(hldev, slot_start + i,
1239 		    ring->mmac.mac_addr + i);
1240 		ring->mmac.mac_addr_set[i] = B_FALSE;
1241 	}
1242 }
1243 
1244 static int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
1245 
1246 static int
xgell_addmac(void * arg,const uint8_t * mac_addr)1247 xgell_addmac(void *arg, const uint8_t *mac_addr)
1248 {
1249 	xgell_rx_ring_t *ring = arg;
1250 	xgelldev_t *lldev = ring->lldev;
1251 	xge_hal_device_t *hldev = lldev->devh;
1252 	int slot;
1253 	int slot_start;
1254 
1255 	xge_debug_ll(XGE_TRACE, "%s", "xgell_addmac");
1256 
1257 	mutex_enter(&lldev->genlock);
1258 
1259 	if (ring->mmac.naddrfree == 0) {
1260 		mutex_exit(&lldev->genlock);
1261 		return (ENOSPC);
1262 	}
1263 
1264 	/* First slot is for factory MAC address */
1265 	for (slot = 0; slot < ring->mmac.naddr; slot++) {
1266 		if (ring->mmac.mac_addr_set[slot] == B_FALSE) {
1267 			break;
1268 		}
1269 	}
1270 
1271 	ASSERT(slot < ring->mmac.naddr);
1272 
1273 	slot_start = ring->index * 32;
1274 
1275 	if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
1276 	    0) {
1277 		mutex_exit(&lldev->genlock);
1278 		return (EIO);
1279 	}
1280 
1281 	/* Simply enable RTS for the whole section. */
1282 	(void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
1283 
1284 	/*
1285 	 * Read back the MAC address from HAL to keep the array up to date.
1286 	 */
1287 	if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
1288 	    ring->mmac.mac_addr + slot) != XGE_HAL_OK) {
1289 		(void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1290 		return (EIO);
1291 	}
1292 
1293 	ring->mmac.mac_addr_set[slot] = B_TRUE;
1294 	ring->mmac.naddrfree--;
1295 
1296 	mutex_exit(&lldev->genlock);
1297 
1298 	return (0);
1299 }
1300 
1301 static int
xgell_remmac(void * arg,const uint8_t * mac_addr)1302 xgell_remmac(void *arg, const uint8_t *mac_addr)
1303 {
1304 	xgell_rx_ring_t *ring = arg;
1305 	xgelldev_t *lldev = ring->lldev;
1306 	xge_hal_device_t *hldev = lldev->devh;
1307 	xge_hal_status_e status;
1308 	int slot;
1309 	int slot_start;
1310 
1311 	xge_debug_ll(XGE_TRACE, "%s", "xgell_remmac");
1312 
1313 	slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
1314 	if (slot == -1)
1315 		return (EINVAL);
1316 
1317 	slot_start = ring->index * 32;
1318 
1319 	/*
1320 	 * Adjust slot to the offset in the MAC array of this ring (group).
1321 	 */
1322 	slot -= slot_start;
1323 
1324 	/*
1325 	 * Only can remove a pre-set MAC address for this ring (group).
1326 	 */
1327 	if (slot < 0 || slot >= ring->mmac.naddr)
1328 		return (EINVAL);
1329 
1330 
1331 	xge_assert(ring->mmac.mac_addr_set[slot]);
1332 
1333 	mutex_enter(&lldev->genlock);
1334 	if (!ring->mmac.mac_addr_set[slot]) {
1335 		mutex_exit(&lldev->genlock);
1336 		/*
1337 		 * The result will be unexpected when reach here. WARNING!
1338 		 */
1339 		xge_debug_ll(XGE_ERR,
1340 		    "%s%d: caller is trying to remove an unset MAC address",
1341 		    XGELL_IFNAME, lldev->instance);
1342 		return (ENXIO);
1343 	}
1344 
1345 	status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1346 	if (status != XGE_HAL_OK) {
1347 		mutex_exit(&lldev->genlock);
1348 		return (EIO);
1349 	}
1350 
1351 	ring->mmac.mac_addr_set[slot] = B_FALSE;
1352 	ring->mmac.naddrfree++;
1353 
1354 	/*
1355 	 * TODO: Disable MAC RTS if all addresses have been cleared.
1356 	 */
1357 
1358 	/*
1359 	 * Read back the MAC address from HAL to keep the array up to date.
1360 	 */
1361 	(void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
1362 	    ring->mmac.mac_addr + slot);
1363 	mutex_exit(&lldev->genlock);
1364 
1365 	return (0);
1366 }
1367 
1368 /*
1369  * Temporarily calling hal function.
1370  *
1371  * With MSI-X implementation, no lock is needed, so that the interrupt
1372  * handling could be faster.
1373  */
1374 int
xgell_rx_ring_intr_enable(mac_intr_handle_t ih)1375 xgell_rx_ring_intr_enable(mac_intr_handle_t ih)
1376 {
1377 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1378 
1379 	mutex_enter(&ring->ring_lock);
1380 	xge_hal_device_rx_channel_disable_polling(ring->channelh);
1381 	mutex_exit(&ring->ring_lock);
1382 
1383 	return (0);
1384 }
1385 
1386 int
xgell_rx_ring_intr_disable(mac_intr_handle_t ih)1387 xgell_rx_ring_intr_disable(mac_intr_handle_t ih)
1388 {
1389 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1390 
1391 	mutex_enter(&ring->ring_lock);
1392 	xge_hal_device_rx_channel_enable_polling(ring->channelh);
1393 	mutex_exit(&ring->ring_lock);
1394 
1395 	return (0);
1396 }
1397 
1398 static int
xgell_rx_ring_start(mac_ring_driver_t rh,uint64_t mr_gen_num)1399 xgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1400 {
1401 	xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
1402 
1403 	rx_ring->ring_gen_num = mr_gen_num;
1404 
1405 	return (0);
1406 }
1407 
1408 /*ARGSUSED*/
1409 static void
xgell_rx_ring_stop(mac_ring_driver_t rh)1410 xgell_rx_ring_stop(mac_ring_driver_t rh)
1411 {
1412 }
1413 
1414 /*ARGSUSED*/
1415 static int
xgell_tx_ring_start(mac_ring_driver_t rh,uint64_t useless)1416 xgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
1417 {
1418 	return (0);
1419 }
1420 
1421 /*ARGSUSED*/
1422 static void
xgell_tx_ring_stop(mac_ring_driver_t rh)1423 xgell_tx_ring_stop(mac_ring_driver_t rh)
1424 {
1425 }
1426 
1427 /*
1428  * Callback funtion for MAC layer to register all rings.
1429  *
1430  * Xframe hardware doesn't support grouping explicitly, so the driver needs
1431  * to pretend having resource groups. We may also optionally group all 8 rx
1432  * rings into a single group for increased scalability on CMT architectures,
1433  * or group one rx ring per group for maximum virtualization.
1434  *
1435  * TX grouping is actually done by framework, so, just register all TX
1436  * resources without grouping them.
1437  */
1438 void
xgell_fill_ring(void * arg,mac_ring_type_t rtype,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)1439 xgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1440     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1441 {
1442 	xgelldev_t *lldev = (xgelldev_t *)arg;
1443 	mac_intr_t *mintr;
1444 
1445 	switch (rtype) {
1446 	case MAC_RING_TYPE_RX: {
1447 		xgell_rx_ring_t *rx_ring;
1448 
1449 		xge_assert(index < lldev->init_rx_rings);
1450 		xge_assert(rg_index < lldev->init_rx_groups);
1451 
1452 		/*
1453 		 * Performance vs. Virtualization
1454 		 */
1455 		if (lldev->init_rx_rings == lldev->init_rx_groups)
1456 			rx_ring = lldev->rx_ring + rg_index;
1457 		else
1458 			rx_ring = lldev->rx_ring + index;
1459 
1460 		rx_ring->ring_handle = rh;
1461 
1462 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
1463 		infop->mri_start = xgell_rx_ring_start;
1464 		infop->mri_stop = xgell_rx_ring_stop;
1465 		infop->mri_poll = xgell_rx_poll;
1466 		infop->mri_stat = xgell_rx_ring_stat;
1467 
1468 		mintr = &infop->mri_intr;
1469 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1470 		mintr->mi_enable = xgell_rx_ring_intr_enable;
1471 		mintr->mi_disable = xgell_rx_ring_intr_disable;
1472 
1473 		break;
1474 	}
1475 	case MAC_RING_TYPE_TX: {
1476 		xgell_tx_ring_t *tx_ring;
1477 
1478 		xge_assert(rg_index == -1);
1479 
1480 		xge_assert((index >= 0) && (index < lldev->init_tx_rings));
1481 
1482 		tx_ring = lldev->tx_ring + index;
1483 		tx_ring->ring_handle = rh;
1484 
1485 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
1486 		infop->mri_start = xgell_tx_ring_start;
1487 		infop->mri_stop = xgell_tx_ring_stop;
1488 		infop->mri_tx = xgell_ring_tx;
1489 		infop->mri_stat = xgell_tx_ring_stat;
1490 
1491 		break;
1492 	}
1493 	default:
1494 		break;
1495 	}
1496 }
1497 
1498 void
xgell_fill_group(void * arg,mac_ring_type_t rtype,const int index,mac_group_info_t * infop,mac_group_handle_t gh)1499 xgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
1500     mac_group_info_t *infop, mac_group_handle_t gh)
1501 {
1502 	xgelldev_t *lldev = (xgelldev_t *)arg;
1503 
1504 	switch (rtype) {
1505 	case MAC_RING_TYPE_RX: {
1506 		xgell_rx_ring_t *rx_ring;
1507 
1508 		xge_assert(index < lldev->init_rx_groups);
1509 
1510 		rx_ring = lldev->rx_ring + index;
1511 
1512 		rx_ring->group_handle = gh;
1513 
1514 		infop->mgi_driver = (mac_group_driver_t)rx_ring;
1515 		infop->mgi_start = NULL;
1516 		infop->mgi_stop = NULL;
1517 		infop->mgi_addmac = xgell_addmac;
1518 		infop->mgi_remmac = xgell_remmac;
1519 		infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
1520 
1521 		break;
1522 	}
1523 	case MAC_RING_TYPE_TX:
1524 		xge_assert(0);
1525 		break;
1526 	default:
1527 		break;
1528 	}
1529 }
1530 
1531 /*
1532  * xgell_macaddr_set
1533  */
1534 static int
xgell_maddr_set(xgelldev_t * lldev,int index,uint8_t * macaddr)1535 xgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
1536 {
1537 	xge_hal_device_t *hldev = lldev->devh;
1538 	xge_hal_status_e status;
1539 
1540 	xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
1541 
1542 	xge_debug_ll(XGE_TRACE,
1543 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1544 	    macaddr[0], macaddr[1], macaddr[2],
1545 	    macaddr[3], macaddr[4], macaddr[5]);
1546 
1547 	status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
1548 
1549 	if (status != XGE_HAL_OK) {
1550 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1551 		    XGELL_IFNAME, lldev->instance);
1552 		return (EIO);
1553 	}
1554 
1555 	return (0);
1556 }
1557 
1558 /*
1559  * xgell_rx_dtr_term
1560  *
1561  * Function will be called by HAL to terminate all DTRs for
1562  * Ring(s) type of channels.
1563  */
1564 static void
xgell_rx_dtr_term(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_state_e state,void * userdata,xge_hal_channel_reopen_e reopen)1565 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1566     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1567 {
1568 	xgell_rxd_priv_t *rxd_priv =
1569 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1570 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1571 
1572 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1573 		xgell_rx_ring_t *ring = rx_buffer->ring;
1574 
1575 		mutex_enter(&ring->bf_pool.pool_lock);
1576 		xge_hal_ring_dtr_free(channelh, dtrh);
1577 		xgell_rx_buffer_release(rx_buffer);
1578 		mutex_exit(&ring->bf_pool.pool_lock);
1579 	}
1580 }
1581 
1582 /*
1583  * To open a rx ring.
1584  */
1585 static boolean_t
xgell_rx_ring_open(xgell_rx_ring_t * rx_ring)1586 xgell_rx_ring_open(xgell_rx_ring_t *rx_ring)
1587 {
1588 	xge_hal_status_e status;
1589 	xge_hal_channel_attr_t attr;
1590 	xgelldev_t *lldev = rx_ring->lldev;
1591 	xge_hal_device_t *hldev = lldev->devh;
1592 
1593 	if (rx_ring->live)
1594 		return (B_TRUE);
1595 
1596 	/* Create the buffer pool first */
1597 	if (!xgell_rx_create_buffer_pool(rx_ring)) {
1598 		xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
1599 		    rx_ring->index);
1600 		return (B_FALSE);
1601 	}
1602 
1603 	/* Default ring initialization */
1604 	attr.post_qid		= rx_ring->index;
1605 	attr.compl_qid		= 0;
1606 	attr.callback		= xgell_rx_1b_callback;
1607 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1608 	attr.flags		= 0;
1609 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1610 	attr.dtr_init		= xgell_rx_dtr_replenish;
1611 	attr.dtr_term		= xgell_rx_dtr_term;
1612 	attr.userdata		= rx_ring;
1613 
1614 	status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
1615 	    XGE_HAL_CHANNEL_OC_NORMAL);
1616 	if (status != XGE_HAL_OK) {
1617 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1618 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1619 		(void) xgell_rx_destroy_buffer_pool(rx_ring);
1620 		return (B_FALSE);
1621 	}
1622 
1623 	xgell_rx_ring_maddr_init(rx_ring);
1624 
1625 	mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
1626 	    DDI_INTR_PRI(hldev->irqh));
1627 
1628 	rx_ring->poll_bytes = -1;
1629 	rx_ring->polled_bytes = 0;
1630 	rx_ring->poll_mp = NULL;
1631 	rx_ring->live = B_TRUE;
1632 
1633 	xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
1634 	    rx_ring->index);
1635 
1636 	return (B_TRUE);
1637 }
1638 
1639 static void
xgell_rx_ring_close(xgell_rx_ring_t * rx_ring)1640 xgell_rx_ring_close(xgell_rx_ring_t *rx_ring)
1641 {
1642 	if (!rx_ring->live)
1643 		return;
1644 	xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1645 	rx_ring->channelh = NULL;
1646 	/* This may not clean up all used buffers, driver will handle it */
1647 	if (xgell_rx_destroy_buffer_pool(rx_ring))
1648 		rx_ring->live = B_FALSE;
1649 
1650 	mutex_destroy(&rx_ring->ring_lock);
1651 }
1652 
1653 /*
1654  * xgell_rx_open
1655  * @lldev: the link layer object
1656  *
1657  * Initialize and open all RX channels.
1658  */
1659 static boolean_t
xgell_rx_open(xgelldev_t * lldev)1660 xgell_rx_open(xgelldev_t *lldev)
1661 {
1662 	xgell_rx_ring_t *rx_ring;
1663 	int i;
1664 
1665 	if (lldev->live_rx_rings != 0)
1666 		return (B_TRUE);
1667 
1668 	lldev->live_rx_rings = 0;
1669 
1670 	/*
1671 	 * Initialize all rings
1672 	 */
1673 	for (i = 0; i < lldev->init_rx_rings; i++) {
1674 		rx_ring = &lldev->rx_ring[i];
1675 		rx_ring->index = i;
1676 		rx_ring->lldev = lldev;
1677 		rx_ring->live = B_FALSE;
1678 
1679 		if (!xgell_rx_ring_open(rx_ring))
1680 			return (B_FALSE);
1681 
1682 		lldev->live_rx_rings++;
1683 	}
1684 
1685 	return (B_TRUE);
1686 }
1687 
1688 static void
xgell_rx_close(xgelldev_t * lldev)1689 xgell_rx_close(xgelldev_t *lldev)
1690 {
1691 	xgell_rx_ring_t *rx_ring;
1692 	int i;
1693 
1694 	if (lldev->live_rx_rings == 0)
1695 		return;
1696 
1697 	/*
1698 	 * Close all rx rings
1699 	 */
1700 	for (i = 0; i < lldev->init_rx_rings; i++) {
1701 		rx_ring = &lldev->rx_ring[i];
1702 
1703 		if (rx_ring->live) {
1704 			xgell_rx_ring_close(rx_ring);
1705 			lldev->live_rx_rings--;
1706 		}
1707 	}
1708 
1709 	xge_assert(lldev->live_rx_rings == 0);
1710 }
1711 
1712 /*
1713  * xgell_tx_term
1714  *
1715  * Function will be called by HAL to terminate all DTRs for
1716  * Fifo(s) type of channels.
1717  */
1718 static void
xgell_tx_term(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_state_e state,void * userdata,xge_hal_channel_reopen_e reopen)1719 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1720     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1721 {
1722 	xgell_txd_priv_t *txd_priv =
1723 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1724 	mblk_t *mp = txd_priv->mblk;
1725 	int i;
1726 
1727 	/*
1728 	 * for Tx we must clean up the DTR *only* if it has been
1729 	 * posted!
1730 	 */
1731 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1732 		return;
1733 	}
1734 
1735 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1736 		xge_assert(txd_priv->dma_handles[i]);
1737 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1738 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1739 		txd_priv->dma_handles[i] = 0;
1740 	}
1741 
1742 	xge_hal_fifo_dtr_free(channelh, dtrh);
1743 
1744 	if (mp) {
1745 		txd_priv->mblk = NULL;
1746 		freemsg(mp);
1747 	}
1748 }
1749 
1750 static boolean_t
xgell_tx_ring_open(xgell_tx_ring_t * tx_ring)1751 xgell_tx_ring_open(xgell_tx_ring_t *tx_ring)
1752 {
1753 	xge_hal_status_e status;
1754 	xge_hal_channel_attr_t attr;
1755 	xgelldev_t *lldev = tx_ring->lldev;
1756 
1757 	if (tx_ring->live)
1758 		return (B_TRUE);
1759 
1760 	attr.post_qid		= tx_ring->index;
1761 	attr.compl_qid		= 0;
1762 	attr.callback		= xgell_xmit_compl;
1763 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1764 	attr.flags		= 0;
1765 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1766 	attr.dtr_init		= NULL;
1767 	attr.dtr_term		= xgell_tx_term;
1768 	attr.userdata		= tx_ring;
1769 
1770 	status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
1771 	    XGE_HAL_CHANNEL_OC_NORMAL);
1772 	if (status != XGE_HAL_OK) {
1773 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
1774 		    "code %d", XGELL_IFNAME, lldev->instance, status);
1775 		return (B_FALSE);
1776 	}
1777 
1778 	tx_ring->live = B_TRUE;
1779 
1780 	return (B_TRUE);
1781 }
1782 
1783 static void
xgell_tx_ring_close(xgell_tx_ring_t * tx_ring)1784 xgell_tx_ring_close(xgell_tx_ring_t *tx_ring)
1785 {
1786 	if (!tx_ring->live)
1787 		return;
1788 	xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1789 	tx_ring->live = B_FALSE;
1790 }
1791 
1792 /*
1793  * xgell_tx_open
1794  * @lldev: the link layer object
1795  *
1796  * Initialize and open all TX channels.
1797  */
1798 static boolean_t
xgell_tx_open(xgelldev_t * lldev)1799 xgell_tx_open(xgelldev_t *lldev)
1800 {
1801 	xgell_tx_ring_t *tx_ring;
1802 	int i;
1803 
1804 	if (lldev->live_tx_rings != 0)
1805 		return (B_TRUE);
1806 
1807 	lldev->live_tx_rings = 0;
1808 
1809 	/*
1810 	 * Enable rings by reserve sequence to match the h/w sequences.
1811 	 */
1812 	for (i = 0; i < lldev->init_tx_rings; i++) {
1813 		tx_ring = &lldev->tx_ring[i];
1814 		tx_ring->index = i;
1815 		tx_ring->lldev = lldev;
1816 		tx_ring->live = B_FALSE;
1817 
1818 		if (!xgell_tx_ring_open(tx_ring))
1819 			return (B_FALSE);
1820 
1821 		lldev->live_tx_rings++;
1822 	}
1823 
1824 	return (B_TRUE);
1825 }
1826 
1827 static void
xgell_tx_close(xgelldev_t * lldev)1828 xgell_tx_close(xgelldev_t *lldev)
1829 {
1830 	xgell_tx_ring_t *tx_ring;
1831 	int i;
1832 
1833 	if (lldev->live_tx_rings == 0)
1834 		return;
1835 
1836 	/*
1837 	 * Enable rings by reserve sequence to match the h/w sequences.
1838 	 */
1839 	for (i = 0; i < lldev->init_tx_rings; i++) {
1840 		tx_ring = &lldev->tx_ring[i];
1841 		if (tx_ring->live) {
1842 			xgell_tx_ring_close(tx_ring);
1843 			lldev->live_tx_rings--;
1844 		}
1845 	}
1846 }
1847 
1848 static int
xgell_initiate_start(xgelldev_t * lldev)1849 xgell_initiate_start(xgelldev_t *lldev)
1850 {
1851 	xge_hal_status_e status;
1852 	xge_hal_device_t *hldev = lldev->devh;
1853 	int maxpkt = hldev->config.mtu;
1854 
1855 	/* check initial mtu before enabling the device */
1856 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1857 	if (status != XGE_HAL_OK) {
1858 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1859 		    XGELL_IFNAME, lldev->instance, maxpkt);
1860 		return (EINVAL);
1861 	}
1862 
1863 	/* set initial mtu before enabling the device */
1864 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1865 	if (status != XGE_HAL_OK) {
1866 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1867 		    XGELL_IFNAME, lldev->instance, maxpkt);
1868 		return (EIO);
1869 	}
1870 
1871 	/* tune jumbo/normal frame UFC counters */
1872 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
1873 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1874 	    XGE_HAL_DEFAULT_RX_UFC_B_J :
1875 	    XGE_HAL_DEFAULT_RX_UFC_B_N;
1876 
1877 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
1878 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1879 	    XGE_HAL_DEFAULT_RX_UFC_C_J :
1880 	    XGE_HAL_DEFAULT_RX_UFC_C_N;
1881 
1882 	/* now, enable the device */
1883 	status = xge_hal_device_enable(lldev->devh);
1884 	if (status != XGE_HAL_OK) {
1885 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1886 		    XGELL_IFNAME, lldev->instance);
1887 		return (EIO);
1888 	}
1889 
1890 	if (!xgell_rx_open(lldev)) {
1891 		status = xge_hal_device_disable(lldev->devh);
1892 		if (status != XGE_HAL_OK) {
1893 			u64 adapter_status;
1894 			(void) xge_hal_device_status(lldev->devh,
1895 			    &adapter_status);
1896 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1897 			    "the device. adaper status 0x%"PRIx64
1898 			    " returned status %d",
1899 			    XGELL_IFNAME, lldev->instance,
1900 			    (uint64_t)adapter_status, status);
1901 		}
1902 		xgell_rx_close(lldev);
1903 		xge_os_mdelay(1500);
1904 		return (ENOMEM);
1905 	}
1906 
1907 	if (!xgell_tx_open(lldev)) {
1908 		status = xge_hal_device_disable(lldev->devh);
1909 		if (status != XGE_HAL_OK) {
1910 			u64 adapter_status;
1911 			(void) xge_hal_device_status(lldev->devh,
1912 			    &adapter_status);
1913 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1914 			    "the device. adaper status 0x%"PRIx64
1915 			    " returned status %d",
1916 			    XGELL_IFNAME, lldev->instance,
1917 			    (uint64_t)adapter_status, status);
1918 		}
1919 		xgell_tx_close(lldev);
1920 		xgell_rx_close(lldev);
1921 		xge_os_mdelay(1500);
1922 		return (ENOMEM);
1923 	}
1924 
1925 	/* time to enable interrupts */
1926 	(void) xge_enable_intrs(lldev);
1927 	xge_hal_device_intr_enable(lldev->devh);
1928 
1929 	lldev->is_initialized = 1;
1930 
1931 	return (0);
1932 }
1933 
1934 static void
xgell_initiate_stop(xgelldev_t * lldev)1935 xgell_initiate_stop(xgelldev_t *lldev)
1936 {
1937 	xge_hal_status_e status;
1938 
1939 	lldev->is_initialized = 0;
1940 
1941 	status = xge_hal_device_disable(lldev->devh);
1942 	if (status != XGE_HAL_OK) {
1943 		u64 adapter_status;
1944 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1945 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1946 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1947 		    XGELL_IFNAME, lldev->instance,
1948 		    (uint64_t)adapter_status, status);
1949 	}
1950 	xge_hal_device_intr_disable(lldev->devh);
1951 	/* disable OS ISR's */
1952 	xge_disable_intrs(lldev);
1953 
1954 	xge_debug_ll(XGE_TRACE, "%s",
1955 	    "waiting for device irq to become quiescent...");
1956 	xge_os_mdelay(1500);
1957 
1958 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1959 
1960 	xgell_rx_close(lldev);
1961 	xgell_tx_close(lldev);
1962 }
1963 
1964 /*
1965  * xgell_m_start
1966  * @arg: pointer to device private strucutre(hldev)
1967  *
1968  * This function is called by MAC Layer to enable the XFRAME
1969  * firmware to generate interrupts and also prepare the
1970  * driver to call mac_rx for delivering receive packets
1971  * to MAC Layer.
1972  */
1973 static int
xgell_m_start(void * arg)1974 xgell_m_start(void *arg)
1975 {
1976 	xgelldev_t *lldev = arg;
1977 	xge_hal_device_t *hldev = lldev->devh;
1978 	int ret;
1979 
1980 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1981 	    lldev->instance);
1982 
1983 	mutex_enter(&lldev->genlock);
1984 
1985 	if (lldev->is_initialized) {
1986 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1987 		    XGELL_IFNAME, lldev->instance);
1988 		mutex_exit(&lldev->genlock);
1989 		return (EINVAL);
1990 	}
1991 
1992 	hldev->terminating = 0;
1993 	if (ret = xgell_initiate_start(lldev)) {
1994 		mutex_exit(&lldev->genlock);
1995 		return (ret);
1996 	}
1997 
1998 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1999 
2000 	mutex_exit(&lldev->genlock);
2001 
2002 	return (0);
2003 }
2004 
2005 /*
2006  * xgell_m_stop
2007  * @arg: pointer to device private data (hldev)
2008  *
2009  * This function is called by the MAC Layer to disable
2010  * the XFRAME firmware for generating any interrupts and
2011  * also stop the driver from calling mac_rx() for
2012  * delivering data packets to the MAC Layer.
2013  */
2014 static void
xgell_m_stop(void * arg)2015 xgell_m_stop(void *arg)
2016 {
2017 	xgelldev_t *lldev = arg;
2018 	xge_hal_device_t *hldev = lldev->devh;
2019 
2020 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
2021 
2022 	mutex_enter(&lldev->genlock);
2023 	if (!lldev->is_initialized) {
2024 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
2025 		mutex_exit(&lldev->genlock);
2026 		return;
2027 	}
2028 
2029 	xge_hal_device_terminating(hldev);
2030 	xgell_initiate_stop(lldev);
2031 
2032 	/* reset device */
2033 	(void) xge_hal_device_reset(lldev->devh);
2034 
2035 	mutex_exit(&lldev->genlock);
2036 
2037 	if (lldev->timeout_id != 0) {
2038 		(void) untimeout(lldev->timeout_id);
2039 	}
2040 
2041 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
2042 }
2043 
2044 /*
2045  * xgell_onerr_reset
2046  * @lldev: pointer to xgelldev_t structure
2047  *
2048  * This function is called by HAL Event framework to reset the HW
2049  * This function is must be called with genlock taken.
2050  */
2051 int
xgell_onerr_reset(xgelldev_t * lldev)2052 xgell_onerr_reset(xgelldev_t *lldev)
2053 {
2054 	int rc = 0;
2055 
2056 	if (!lldev->is_initialized) {
2057 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
2058 		    XGELL_IFNAME, lldev->instance);
2059 		return (rc);
2060 	}
2061 
2062 	lldev->in_reset = 1;
2063 	xgell_initiate_stop(lldev);
2064 
2065 	/* reset device */
2066 	(void) xge_hal_device_reset(lldev->devh);
2067 
2068 	rc = xgell_initiate_start(lldev);
2069 	lldev->in_reset = 0;
2070 
2071 	return (rc);
2072 }
2073 
2074 /*
2075  * xgell_m_multicst
2076  * @arg: pointer to device private strucutre(hldev)
2077  * @add:
2078  * @mc_addr:
2079  *
2080  * This function is called by MAC Layer to enable or
2081  * disable device-level reception of specific multicast addresses.
2082  */
2083 static int
xgell_m_multicst(void * arg,boolean_t add,const uint8_t * mc_addr)2084 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
2085 {
2086 	xge_hal_status_e status;
2087 	xgelldev_t *lldev = (xgelldev_t *)arg;
2088 	xge_hal_device_t *hldev = lldev->devh;
2089 
2090 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
2091 
2092 	mutex_enter(&lldev->genlock);
2093 
2094 	if (!lldev->is_initialized) {
2095 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
2096 		    XGELL_IFNAME, lldev->instance);
2097 		mutex_exit(&lldev->genlock);
2098 		return (EIO);
2099 	}
2100 
2101 	/* FIXME: missing HAL functionality: enable_one() */
2102 
2103 	status = (add) ?
2104 	    xge_hal_device_mcast_enable(hldev) :
2105 	    xge_hal_device_mcast_disable(hldev);
2106 
2107 	if (status != XGE_HAL_OK) {
2108 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
2109 		    add ? "enable" : "disable", status);
2110 		mutex_exit(&lldev->genlock);
2111 		return (EIO);
2112 	}
2113 
2114 	mutex_exit(&lldev->genlock);
2115 
2116 	return (0);
2117 }
2118 
2119 
2120 /*
2121  * xgell_m_promisc
2122  * @arg: pointer to device private strucutre(hldev)
2123  * @on:
2124  *
2125  * This function is called by MAC Layer to enable or
2126  * disable the reception of all the packets on the medium
2127  */
2128 static int
xgell_m_promisc(void * arg,boolean_t on)2129 xgell_m_promisc(void *arg, boolean_t on)
2130 {
2131 	xgelldev_t *lldev = (xgelldev_t *)arg;
2132 	xge_hal_device_t *hldev = lldev->devh;
2133 
2134 	mutex_enter(&lldev->genlock);
2135 
2136 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
2137 
2138 	if (!lldev->is_initialized) {
2139 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
2140 		    XGELL_IFNAME, lldev->instance);
2141 		mutex_exit(&lldev->genlock);
2142 		return (EIO);
2143 	}
2144 
2145 	if (on) {
2146 		xge_hal_device_promisc_enable(hldev);
2147 	} else {
2148 		xge_hal_device_promisc_disable(hldev);
2149 	}
2150 
2151 	mutex_exit(&lldev->genlock);
2152 
2153 	return (0);
2154 }
2155 
2156 /*
2157  * xgell_m_stat
2158  * @arg: pointer to device private strucutre(hldev)
2159  *
2160  * This function is called by MAC Layer to get network statistics
2161  * from the driver.
2162  */
2163 static int
xgell_m_stat(void * arg,uint_t stat,uint64_t * val)2164 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
2165 {
2166 	xge_hal_stats_hw_info_t *hw_info;
2167 	xgelldev_t *lldev = (xgelldev_t *)arg;
2168 	xge_hal_device_t *hldev = lldev->devh;
2169 
2170 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
2171 
2172 	mutex_enter(&lldev->genlock);
2173 
2174 	if (!lldev->is_initialized) {
2175 		mutex_exit(&lldev->genlock);
2176 		return (EAGAIN);
2177 	}
2178 
2179 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
2180 		mutex_exit(&lldev->genlock);
2181 		return (EAGAIN);
2182 	}
2183 
2184 	switch (stat) {
2185 	case MAC_STAT_IFSPEED:
2186 		*val = 10000000000ull; /* 10G */
2187 		break;
2188 
2189 	case MAC_STAT_MULTIRCV:
2190 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
2191 		    hw_info->rmac_vld_mcst_frms;
2192 		break;
2193 
2194 	case MAC_STAT_BRDCSTRCV:
2195 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
2196 		    hw_info->rmac_vld_bcst_frms;
2197 		break;
2198 
2199 	case MAC_STAT_MULTIXMT:
2200 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
2201 		    hw_info->tmac_mcst_frms;
2202 		break;
2203 
2204 	case MAC_STAT_BRDCSTXMT:
2205 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
2206 		    hw_info->tmac_bcst_frms;
2207 		break;
2208 
2209 	case MAC_STAT_RBYTES:
2210 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
2211 		    hw_info->rmac_ttl_octets;
2212 		break;
2213 
2214 	case MAC_STAT_NORCVBUF:
2215 		*val = hw_info->rmac_drop_frms;
2216 		break;
2217 
2218 	case MAC_STAT_IERRORS:
2219 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
2220 		    hw_info->rmac_discarded_frms;
2221 		break;
2222 
2223 	case MAC_STAT_OBYTES:
2224 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
2225 		    hw_info->tmac_ttl_octets;
2226 		break;
2227 
2228 	case MAC_STAT_NOXMTBUF:
2229 		*val = hw_info->tmac_drop_frms;
2230 		break;
2231 
2232 	case MAC_STAT_OERRORS:
2233 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
2234 		    hw_info->tmac_any_err_frms;
2235 		break;
2236 
2237 	case MAC_STAT_IPACKETS:
2238 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
2239 		    hw_info->rmac_vld_frms;
2240 		break;
2241 
2242 	case MAC_STAT_OPACKETS:
2243 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
2244 		    hw_info->tmac_frms;
2245 		break;
2246 
2247 	case ETHER_STAT_FCS_ERRORS:
2248 		*val = hw_info->rmac_fcs_err_frms;
2249 		break;
2250 
2251 	case ETHER_STAT_TOOLONG_ERRORS:
2252 		*val = hw_info->rmac_long_frms;
2253 		break;
2254 
2255 	case ETHER_STAT_LINK_DUPLEX:
2256 		*val = LINK_DUPLEX_FULL;
2257 		break;
2258 
2259 	default:
2260 		mutex_exit(&lldev->genlock);
2261 		return (ENOTSUP);
2262 	}
2263 
2264 	mutex_exit(&lldev->genlock);
2265 
2266 	return (0);
2267 }
2268 
2269 /*
2270  * Retrieve a value for one of the statistics for a particular rx ring
2271  */
2272 int
xgell_rx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)2273 xgell_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2274 {
2275 	xgell_rx_ring_t	*rx_ring = (xgell_rx_ring_t *)rh;
2276 
2277 	switch (stat) {
2278 	case MAC_STAT_RBYTES:
2279 		*val = rx_ring->rx_bytes;
2280 		break;
2281 
2282 	case MAC_STAT_IPACKETS:
2283 		*val = rx_ring->rx_pkts;
2284 		break;
2285 
2286 	default:
2287 		*val = 0;
2288 		return (ENOTSUP);
2289 	}
2290 
2291 	return (0);
2292 }
2293 
2294 /*
2295  * Retrieve a value for one of the statistics for a particular tx ring
2296  */
2297 int
xgell_tx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)2298 xgell_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2299 {
2300 	xgell_tx_ring_t	*tx_ring = (xgell_tx_ring_t *)rh;
2301 
2302 	switch (stat) {
2303 	case MAC_STAT_OBYTES:
2304 		*val = tx_ring->tx_bytes;
2305 		break;
2306 
2307 	case MAC_STAT_OPACKETS:
2308 		*val = tx_ring->tx_pkts;
2309 		break;
2310 
2311 	default:
2312 		*val = 0;
2313 		return (ENOTSUP);
2314 	}
2315 
2316 	return (0);
2317 }
2318 
2319 /*
2320  * xgell_device_alloc - Allocate new LL device
2321  */
2322 int
xgell_device_alloc(xge_hal_device_h devh,dev_info_t * dev_info,xgelldev_t ** lldev_out)2323 xgell_device_alloc(xge_hal_device_h devh,
2324     dev_info_t *dev_info, xgelldev_t **lldev_out)
2325 {
2326 	xgelldev_t *lldev;
2327 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2328 	int instance = ddi_get_instance(dev_info);
2329 
2330 	*lldev_out = NULL;
2331 
2332 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
2333 	    XGELL_IFNAME, instance);
2334 
2335 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
2336 
2337 	lldev->devh = hldev;
2338 	lldev->instance = instance;
2339 	lldev->dev_info = dev_info;
2340 
2341 	*lldev_out = lldev;
2342 
2343 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
2344 
2345 	return (DDI_SUCCESS);
2346 }
2347 
2348 /*
2349  * xgell_device_free
2350  */
2351 void
xgell_device_free(xgelldev_t * lldev)2352 xgell_device_free(xgelldev_t *lldev)
2353 {
2354 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
2355 	    XGELL_IFNAME, lldev->instance);
2356 
2357 	kmem_free(lldev, sizeof (xgelldev_t));
2358 }
2359 
2360 /*
2361  * xgell_ioctl
2362  */
2363 static void
xgell_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2364 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2365 {
2366 	xgelldev_t *lldev = arg;
2367 	struct iocblk *iocp;
2368 	int err = 0;
2369 	int cmd;
2370 	int need_privilege = 1;
2371 	int ret = 0;
2372 
2373 
2374 	iocp = (struct iocblk *)mp->b_rptr;
2375 	iocp->ioc_error = 0;
2376 	cmd = iocp->ioc_cmd;
2377 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
2378 	switch (cmd) {
2379 	case ND_GET:
2380 		need_privilege = 0;
2381 		/* FALLTHRU */
2382 	case ND_SET:
2383 		break;
2384 	default:
2385 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
2386 		miocnak(wq, mp, 0, EINVAL);
2387 		return;
2388 	}
2389 
2390 	if (need_privilege) {
2391 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2392 		if (err != 0) {
2393 			xge_debug_ll(XGE_ERR,
2394 			    "drv_priv(): rejected cmd 0x%x, err %d",
2395 			    cmd, err);
2396 			miocnak(wq, mp, 0, err);
2397 			return;
2398 		}
2399 	}
2400 
2401 	switch (cmd) {
2402 	case ND_GET:
2403 		/*
2404 		 * If nd_getset() returns B_FALSE, the command was
2405 		 * not valid (e.g. unknown name), so we just tell the
2406 		 * top-level ioctl code to send a NAK (with code EINVAL).
2407 		 *
2408 		 * Otherwise, nd_getset() will have built the reply to
2409 		 * be sent (but not actually sent it), so we tell the
2410 		 * caller to send the prepared reply.
2411 		 */
2412 		ret = nd_getset(wq, lldev->ndp, mp);
2413 		xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2414 		break;
2415 
2416 	case ND_SET:
2417 		ret = nd_getset(wq, lldev->ndp, mp);
2418 		xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2419 		break;
2420 
2421 	default:
2422 		break;
2423 	}
2424 
2425 	if (ret == B_FALSE) {
2426 		xge_debug_ll(XGE_ERR,
2427 		    "nd_getset(): rejected cmd 0x%x, err %d",
2428 		    cmd, err);
2429 		miocnak(wq, mp, 0, EINVAL);
2430 	} else {
2431 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
2432 		    M_IOCACK : M_IOCNAK;
2433 		qreply(wq, mp);
2434 	}
2435 }
2436 
2437 
2438 static boolean_t
xgell_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)2439 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2440 {
2441 	xgelldev_t *lldev = arg;
2442 
2443 	xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
2444 
2445 	switch (cap) {
2446 	case MAC_CAPAB_HCKSUM: {
2447 		uint32_t *hcksum_txflags = cap_data;
2448 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2449 		    HCKSUM_IPHDRCKSUM;
2450 		break;
2451 	}
2452 	case MAC_CAPAB_LSO: {
2453 		mac_capab_lso_t *cap_lso = cap_data;
2454 
2455 		if (lldev->config.lso_enable) {
2456 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2457 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
2458 			break;
2459 		} else {
2460 			return (B_FALSE);
2461 		}
2462 	}
2463 	case MAC_CAPAB_RINGS: {
2464 		mac_capab_rings_t *cap_rings = cap_data;
2465 
2466 		switch (cap_rings->mr_type) {
2467 		case MAC_RING_TYPE_RX:
2468 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2469 			cap_rings->mr_rnum = lldev->init_rx_rings;
2470 			cap_rings->mr_gnum = lldev->init_rx_groups;
2471 			cap_rings->mr_rget = xgell_fill_ring;
2472 			cap_rings->mr_gget = xgell_fill_group;
2473 			break;
2474 		case MAC_RING_TYPE_TX:
2475 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2476 			cap_rings->mr_rnum = lldev->init_tx_rings;
2477 			cap_rings->mr_gnum = 0;
2478 			cap_rings->mr_rget = xgell_fill_ring;
2479 			cap_rings->mr_gget = NULL;
2480 			break;
2481 		default:
2482 			break;
2483 		}
2484 		break;
2485 	}
2486 	default:
2487 		return (B_FALSE);
2488 	}
2489 	return (B_TRUE);
2490 }
2491 
2492 static int
xgell_stats_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2493 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2494 {
2495 	xgelldev_t *lldev = (xgelldev_t *)cp;
2496 	xge_hal_status_e status;
2497 	int count = 0, retsize;
2498 	char *buf;
2499 
2500 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2501 	if (buf == NULL) {
2502 		return (ENOSPC);
2503 	}
2504 
2505 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2506 	    buf, &retsize);
2507 	if (status != XGE_HAL_OK) {
2508 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2509 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2510 		return (EINVAL);
2511 	}
2512 	count += retsize;
2513 
2514 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2515 	    XGELL_STATS_BUFSIZE - count,
2516 	    buf+count, &retsize);
2517 	if (status != XGE_HAL_OK) {
2518 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2519 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2520 		return (EINVAL);
2521 	}
2522 	count += retsize;
2523 
2524 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2525 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2526 	if (status != XGE_HAL_OK) {
2527 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2528 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2529 		return (EINVAL);
2530 	}
2531 	count += retsize;
2532 
2533 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2534 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2535 	if (status != XGE_HAL_OK) {
2536 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2537 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2538 		return (EINVAL);
2539 	}
2540 	count += retsize;
2541 
2542 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2543 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2544 	if (status != XGE_HAL_OK) {
2545 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2546 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2547 		return (EINVAL);
2548 	}
2549 	count += retsize;
2550 
2551 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2552 	(void) mi_mpprintf(mp, "%s", buf);
2553 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2554 
2555 	return (0);
2556 }
2557 
2558 static int
xgell_pciconf_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2559 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2560 {
2561 	xgelldev_t *lldev = (xgelldev_t *)cp;
2562 	xge_hal_status_e status;
2563 	int retsize;
2564 	char *buf;
2565 
2566 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2567 	if (buf == NULL) {
2568 		return (ENOSPC);
2569 	}
2570 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2571 	    buf, &retsize);
2572 	if (status != XGE_HAL_OK) {
2573 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2574 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2575 		return (EINVAL);
2576 	}
2577 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2578 	(void) mi_mpprintf(mp, "%s", buf);
2579 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2580 
2581 	return (0);
2582 }
2583 
2584 static int
xgell_about_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2585 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2586 {
2587 	xgelldev_t *lldev = (xgelldev_t *)cp;
2588 	xge_hal_status_e status;
2589 	int retsize;
2590 	char *buf;
2591 
2592 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2593 	if (buf == NULL) {
2594 		return (ENOSPC);
2595 	}
2596 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2597 	    buf, &retsize);
2598 	if (status != XGE_HAL_OK) {
2599 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2600 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2601 		return (EINVAL);
2602 	}
2603 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2604 	(void) mi_mpprintf(mp, "%s", buf);
2605 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2606 
2607 	return (0);
2608 }
2609 
2610 static unsigned long bar0_offset = 0x110; /* adapter_control */
2611 
2612 static int
xgell_bar0_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2613 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2614 {
2615 	xgelldev_t *lldev = (xgelldev_t *)cp;
2616 	xge_hal_status_e status;
2617 	int retsize;
2618 	char *buf;
2619 
2620 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2621 	if (buf == NULL) {
2622 		return (ENOSPC);
2623 	}
2624 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2625 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2626 	if (status != XGE_HAL_OK) {
2627 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2628 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2629 		return (EINVAL);
2630 	}
2631 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2632 	(void) mi_mpprintf(mp, "%s", buf);
2633 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2634 
2635 	return (0);
2636 }
2637 
2638 static int
xgell_bar0_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2639 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2640 {
2641 	unsigned long old_offset = bar0_offset;
2642 	char *end;
2643 
2644 	if (value && *value == '0' &&
2645 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2646 		value += 2;
2647 	}
2648 
2649 	bar0_offset = mi_strtol(value, &end, 16);
2650 	if (end == value) {
2651 		bar0_offset = old_offset;
2652 		return (EINVAL);
2653 	}
2654 
2655 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2656 
2657 	return (0);
2658 }
2659 
2660 static int
xgell_debug_level_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2661 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2662 {
2663 	char *buf;
2664 
2665 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2666 	if (buf == NULL) {
2667 		return (ENOSPC);
2668 	}
2669 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2670 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2671 
2672 	return (0);
2673 }
2674 
2675 static int
xgell_debug_level_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2676 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2677     cred_t *credp)
2678 {
2679 	int level;
2680 	char *end;
2681 
2682 	level = mi_strtol(value, &end, 10);
2683 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2684 		return (EINVAL);
2685 	}
2686 
2687 	xge_hal_driver_debug_level_set(level);
2688 
2689 	return (0);
2690 }
2691 
2692 static int
xgell_debug_module_mask_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2693 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2694 {
2695 	char *buf;
2696 
2697 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2698 	if (buf == NULL) {
2699 		return (ENOSPC);
2700 	}
2701 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2702 	    xge_hal_driver_debug_module_mask());
2703 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2704 
2705 	return (0);
2706 }
2707 
2708 static int
xgell_debug_module_mask_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2709 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2710 			    cred_t *credp)
2711 {
2712 	u32 mask;
2713 	char *end;
2714 
2715 	if (value && *value == '0' &&
2716 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2717 		value += 2;
2718 	}
2719 
2720 	mask = mi_strtol(value, &end, 16);
2721 	if (end == value) {
2722 		return (EINVAL);
2723 	}
2724 
2725 	xge_hal_driver_debug_module_mask_set(mask);
2726 
2727 	return (0);
2728 }
2729 
2730 static int
xgell_devconfig_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2731 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2732 {
2733 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2734 	xge_hal_status_e status;
2735 	int retsize;
2736 	char *buf;
2737 
2738 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2739 	if (buf == NULL) {
2740 		return (ENOSPC);
2741 	}
2742 	status = xge_hal_aux_device_config_read(lldev->devh,
2743 	    XGELL_DEVCONF_BUFSIZE, buf, &retsize);
2744 	if (status != XGE_HAL_OK) {
2745 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2746 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2747 		    status);
2748 		return (EINVAL);
2749 	}
2750 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2751 	(void) mi_mpprintf(mp, "%s", buf);
2752 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2753 
2754 	return (0);
2755 }
2756 
2757 /*
2758  * xgell_device_register
2759  * @devh: pointer on HAL device
2760  * @config: pointer on this network device configuration
2761  * @ll_out: output pointer. Will be assigned to valid LL device.
2762  *
2763  * This function will allocate and register network device
2764  */
2765 int
xgell_device_register(xgelldev_t * lldev,xgell_config_t * config)2766 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2767 {
2768 	mac_register_t *macp = NULL;
2769 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2770 
2771 	/*
2772 	 * Initialize some NDD interface for internal debug.
2773 	 */
2774 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2775 	    (caddr_t)lldev) == B_FALSE)
2776 		goto xgell_ndd_fail;
2777 
2778 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2779 	    (caddr_t)lldev) == B_FALSE)
2780 		goto xgell_ndd_fail;
2781 
2782 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2783 	    (caddr_t)lldev) == B_FALSE)
2784 		goto xgell_ndd_fail;
2785 
2786 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2787 	    (caddr_t)lldev) == B_FALSE)
2788 		goto xgell_ndd_fail;
2789 
2790 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2791 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2792 		goto xgell_ndd_fail;
2793 
2794 	if (nd_load(&lldev->ndp, "debug_module_mask",
2795 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2796 	    (caddr_t)lldev) == B_FALSE)
2797 		goto xgell_ndd_fail;
2798 
2799 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2800 	    (caddr_t)lldev) == B_FALSE)
2801 		goto xgell_ndd_fail;
2802 
2803 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2804 
2805 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
2806 	    DDI_INTR_PRI(hldev->irqh));
2807 
2808 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2809 		goto xgell_register_fail;
2810 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2811 	macp->m_driver = lldev;
2812 	macp->m_dip = lldev->dev_info;
2813 	macp->m_src_addr = hldev->macaddr[0];
2814 	macp->m_callbacks = &xgell_m_callbacks;
2815 	macp->m_min_sdu = 0;
2816 	macp->m_max_sdu = hldev->config.mtu;
2817 	macp->m_margin = VLAN_TAGSZ;
2818 	macp->m_v12n = MAC_VIRT_LEVEL1;
2819 
2820 	/*
2821 	 * MAC Registration.
2822 	 */
2823 	if (mac_register(macp, &lldev->mh) != 0)
2824 		goto xgell_register_fail;
2825 
2826 	/* Always free the macp after register */
2827 	if (macp != NULL)
2828 		mac_free(macp);
2829 
2830 	/* Calculate tx_copied_max here ??? */
2831 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2832 	    hldev->config.fifo.alignment_size *
2833 	    hldev->config.fifo.max_aligned_frags;
2834 
2835 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2836 	    XGELL_IFNAME, lldev->instance);
2837 
2838 	return (DDI_SUCCESS);
2839 
2840 xgell_ndd_fail:
2841 	nd_free(&lldev->ndp);
2842 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2843 	return (DDI_FAILURE);
2844 
2845 xgell_register_fail:
2846 	if (macp != NULL)
2847 		mac_free(macp);
2848 	nd_free(&lldev->ndp);
2849 	mutex_destroy(&lldev->genlock);
2850 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2851 	return (DDI_FAILURE);
2852 }
2853 
2854 /*
2855  * xgell_device_unregister
2856  * @devh: pointer on HAL device
2857  * @lldev: pointer to valid LL device.
2858  *
2859  * This function will unregister and free network device
2860  */
2861 int
xgell_device_unregister(xgelldev_t * lldev)2862 xgell_device_unregister(xgelldev_t *lldev)
2863 {
2864 	if (mac_unregister(lldev->mh) != 0) {
2865 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2866 		    XGELL_IFNAME, lldev->instance);
2867 		return (DDI_FAILURE);
2868 	}
2869 
2870 	mutex_destroy(&lldev->genlock);
2871 
2872 	nd_free(&lldev->ndp);
2873 
2874 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2875 	    XGELL_IFNAME, lldev->instance);
2876 
2877 	return (DDI_SUCCESS);
2878 }
2879