xref: /illumos-gate/usr/src/uts/common/io/ntxn/unm_nic_main.c (revision 257873cfc1dd3337766407f80397db60a56f2f5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 NetXen, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 #include <sys/types.h>
30 #include <sys/conf.h>
31 #include <sys/debug.h>
32 #include <sys/stropts.h>
33 #include <sys/stream.h>
34 #include <sys/strlog.h>
35 #include <sys/kmem.h>
36 #include <sys/stat.h>
37 #include <sys/kstat.h>
38 #include <sys/vtrace.h>
39 #include <sys/dlpi.h>
40 #include <sys/strsun.h>
41 #include <sys/ethernet.h>
42 #include <sys/modctl.h>
43 #include <sys/errno.h>
44 #include <sys/dditypes.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sysmacros.h>
48 #include <sys/pci.h>
49 
50 #include <sys/gld.h>
51 #include <netinet/in.h>
52 #include <inet/ip.h>
53 #include <inet/tcp.h>
54 
55 #include <sys/rwlock.h>
56 #include <sys/mutex.h>
57 #include <sys/pattr.h>
58 #include <sys/strsubr.h>
59 #include <sys/ddi_impldefs.h>
60 #include<sys/task.h>
61 
62 #include "unm_nic_hw.h"
63 #include "unm_nic.h"
64 
65 #include "nic_phan_reg.h"
66 #include "unm_nic_ioctl.h"
67 #include "nic_cmn.h"
68 #include "unm_version.h"
69 #include "unm_brdcfg.h"
70 
71 #if defined(lint)
72 #undef MBLKL
73 #define	MBLKL(_mp_)	((uintptr_t)(_mp_)->b_wptr - (uintptr_t)(_mp_)->b_rptr)
74 #endif /* lint */
75 
76 #undef UNM_LOOPBACK
77 #undef SINGLE_DMA_BUF
78 
79 #define	UNM_ADAPTER_UP_MAGIC	777
80 #define	VLAN_TAGSZ		0x4
81 
82 #define	index2rxbuf(_rdp_, _idx_)	((_rdp_)->rx_buf_pool + (_idx_))
83 #define	rxbuf2index(_rdp_, _bufp_)	((_bufp_) - (_rdp_)->rx_buf_pool)
84 
85 /*
86  * Receive ISR processes NX_RX_MAXBUFS incoming packets at most, then posts
87  * as many buffers as packets processed. This loop repeats as required to
88  * process all incoming packets delivered in a single interrupt. Higher
89  * value of NX_RX_MAXBUFS improves performance by posting rx buffers less
90  * frequently, but at the cost of not posting quickly enough when card is
91  * running out of rx buffers.
92  */
93 #define	NX_RX_THRESHOLD		32
94 #define	NX_RX_MAXBUFS		128
95 #define	NX_MAX_TXCOMPS		256
96 
97 extern void unm_free_tx_buffers(unm_adapter *adapter);
98 extern void unm_free_tx_dmahdl(unm_adapter *adapter);
99 extern void unm_destroy_rx_ring(unm_rcv_desc_ctx_t *rcv_desc);
100 
101 static void unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
102     uint32_t ringid);
103 static mblk_t *unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc);
104 static int unm_process_rcv_ring(unm_adapter *, int);
105 static int unm_process_cmd_ring(struct unm_adapter_s *adapter);
106 
107 static int unm_nic_do_ioctl(unm_adapter *adapter, queue_t *q, mblk_t *mp);
108 static void unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q,
109     mblk_t *mp);
110 
111 /* GLDv3 interface functions */
112 static int ntxn_m_start(void *);
113 static void ntxn_m_stop(void *);
114 static int ntxn_m_multicst(void *, boolean_t, const uint8_t *);
115 static int ntxn_m_promisc(void *, boolean_t);
116 static int ntxn_m_stat(void *arg, uint_t stat, uint64_t *val);
117 static mblk_t *ntxn_m_tx(void *, mblk_t *);
118 static void ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
119 static boolean_t ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data);
120 
121 /*
122  * Allocates DMA handle, virtual memory and binds them
123  * returns size of actual memory binded and the physical address.
124  */
125 int
126 unm_pci_alloc_consistent(unm_adapter *adapter,
127 		int size, caddr_t *address, ddi_dma_cookie_t *cookie,
128 		ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep)
129 {
130 	int			err;
131 	uint32_t		ncookies;
132 	size_t			ring_len;
133 	uint_t			dma_flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
134 
135 	*dma_handle = NULL;
136 
137 	if (size <= 0)
138 		return (DDI_ENOMEM);
139 
140 	err = ddi_dma_alloc_handle(adapter->dip,
141 	    &adapter->gc_dma_attr_desc,
142 	    DDI_DMA_DONTWAIT, NULL, dma_handle);
143 	if (err != DDI_SUCCESS) {
144 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_alloc_handle FAILED:"
145 		    " %d", unm_nic_driver_name, __func__, err);
146 		return (DDI_ENOMEM);
147 	}
148 
149 	err = ddi_dma_mem_alloc(*dma_handle,
150 	    size, &adapter->gc_attr_desc,
151 	    dma_flags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
152 	    DDI_DMA_DONTWAIT, NULL, address, &ring_len,
153 	    handlep);
154 	if (err != DDI_SUCCESS) {
155 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_mem_alloc failed:"
156 		    "ret %d, request size: %d",
157 		    unm_nic_driver_name, __func__, err, size);
158 		ddi_dma_free_handle(dma_handle);
159 		return (DDI_ENOMEM);
160 	}
161 
162 	if (ring_len < size) {
163 		cmn_err(CE_WARN, "%s: %s: could not allocate required "
164 		    "memory :%d\n", unm_nic_driver_name,
165 		    __func__, err);
166 		ddi_dma_mem_free(handlep);
167 		ddi_dma_free_handle(dma_handle);
168 		return (DDI_FAILURE);
169 	}
170 
171 	(void) memset(*address, 0, size);
172 
173 	if (((err = ddi_dma_addr_bind_handle(*dma_handle,
174 	    NULL, *address, ring_len,
175 	    dma_flags,
176 	    DDI_DMA_DONTWAIT, NULL,
177 	    cookie, &ncookies)) != DDI_DMA_MAPPED) ||
178 	    (ncookies != 1)) {
179 		cmn_err(CE_WARN,
180 		    "!%s: %s: ddi_dma_addr_bind_handle FAILED: %d",
181 		    unm_nic_driver_name, __func__, err);
182 		ddi_dma_mem_free(handlep);
183 		ddi_dma_free_handle(dma_handle);
184 		return (DDI_FAILURE);
185 	}
186 
187 	return (DDI_SUCCESS);
188 }
189 
190 /*
191  * Unbinds the memory, frees the DMA handle and at the end, frees the memory
192  */
193 void
194 unm_pci_free_consistent(ddi_dma_handle_t *dma_handle,
195     ddi_acc_handle_t *acc_handle)
196 {
197 	int err;
198 
199 	err = ddi_dma_unbind_handle(*dma_handle);
200 	if (err != DDI_SUCCESS) {
201 		cmn_err(CE_WARN, "%s: Error unbinding memory\n", __func__);
202 		return;
203 	}
204 
205 	ddi_dma_mem_free(acc_handle);
206 	ddi_dma_free_handle(dma_handle);
207 }
208 
209 static uint32_t msi_tgt_status[] = {
210     ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
211     ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
212     ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
213     ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
214 };
215 
216 static void
217 unm_nic_disable_int(unm_adapter *adapter)
218 {
219 	__uint32_t	temp = 0;
220 
221 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
222 	    &temp, 4);
223 }
224 
225 static inline int
226 unm_nic_clear_int(unm_adapter *adapter)
227 {
228 	uint32_t	mask, temp, our_int, status;
229 
230 	UNM_READ_LOCK(&adapter->adapter_lock);
231 
232 	/* check whether it's our interrupt */
233 	if (!UNM_IS_MSI_FAMILY(adapter)) {
234 
235 		/* Legacy Interrupt case */
236 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
237 		    &status);
238 
239 		if (!(status & adapter->legacy_intr.int_vec_bit)) {
240 			UNM_READ_UNLOCK(&adapter->adapter_lock);
241 			return (-1);
242 		}
243 
244 		if (adapter->ahw.revision_id >= NX_P3_B1) {
245 			adapter->unm_nic_pci_read_immediate(adapter,
246 			    ISR_INT_STATE_REG, &temp);
247 			if (!ISR_IS_LEGACY_INTR_TRIGGERED(temp)) {
248 				UNM_READ_UNLOCK(&adapter->adapter_lock);
249 				return (-1);
250 			}
251 		} else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
252 			our_int = adapter->unm_nic_pci_read_normalize(adapter,
253 			    CRB_INT_VECTOR);
254 
255 			/* FIXME: Assumes pci_func is same as ctx */
256 			if ((our_int & (0x80 << adapter->portnum)) == 0) {
257 				if (our_int != 0) {
258 					/* not our interrupt */
259 					UNM_READ_UNLOCK(&adapter->adapter_lock);
260 					return (-1);
261 				}
262 			}
263 			temp = our_int & ~((u32)(0x80 << adapter->portnum));
264 			adapter->unm_nic_pci_write_normalize(adapter,
265 			    CRB_INT_VECTOR, temp);
266 		}
267 
268 		if (adapter->fw_major < 4)
269 			unm_nic_disable_int(adapter);
270 
271 		/* claim interrupt */
272 		temp = 0xffffffff;
273 		adapter->unm_nic_pci_write_immediate(adapter,
274 		    adapter->legacy_intr.tgt_status_reg, &temp);
275 
276 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
277 		    &mask);
278 
279 		/*
280 		 * Read again to make sure the legacy interrupt message got
281 		 * flushed out
282 		 */
283 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
284 		    &mask);
285 	} else if (adapter->flags & UNM_NIC_MSI_ENABLED) {
286 		/* clear interrupt */
287 		temp = 0xffffffff;
288 		adapter->unm_nic_pci_write_immediate(adapter,
289 		    msi_tgt_status[adapter->ahw.pci_func], &temp);
290 	}
291 
292 	UNM_READ_UNLOCK(&adapter->adapter_lock);
293 
294 	return (0);
295 }
296 
297 static void
298 unm_nic_enable_int(unm_adapter *adapter)
299 {
300 	u32	temp = 1;
301 
302 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
303 	    &temp, 4);
304 
305 	if (!UNM_IS_MSI_FAMILY(adapter)) {
306 		u32	mask = 0xfbff;
307 
308 		adapter->unm_nic_pci_write_immediate(adapter,
309 		    adapter->legacy_intr.tgt_mask_reg, &mask);
310 	}
311 }
312 
313 static void
314 unm_free_hw_resources(unm_adapter *adapter)
315 {
316 	unm_recv_context_t *recv_ctx;
317 	unm_rcv_desc_ctx_t *rcv_desc;
318 	int ctx, ring;
319 
320 	if (adapter->context_alloced == 1) {
321 		netxen_destroy_rxtx(adapter);
322 		adapter->context_alloced = 0;
323 	}
324 
325 	if (adapter->ctxDesc != NULL) {
326 		unm_pci_free_consistent(&adapter->ctxDesc_dma_handle,
327 		    &adapter->ctxDesc_acc_handle);
328 		adapter->ctxDesc = NULL;
329 	}
330 
331 	if (adapter->ahw.cmdDescHead != NULL) {
332 		unm_pci_free_consistent(&adapter->ahw.cmd_desc_dma_handle,
333 		    &adapter->ahw.cmd_desc_acc_handle);
334 		adapter->ahw.cmdDesc_physAddr = NULL;
335 		adapter->ahw.cmdDescHead = NULL;
336 	}
337 
338 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
339 		recv_ctx = &adapter->recv_ctx[ctx];
340 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
341 			rcv_desc = &recv_ctx->rcv_desc[ring];
342 
343 			if (rcv_desc->desc_head != NULL) {
344 				unm_pci_free_consistent(
345 				    &rcv_desc->rx_desc_dma_handle,
346 				    &rcv_desc->rx_desc_acc_handle);
347 				rcv_desc->desc_head = NULL;
348 				rcv_desc->phys_addr = NULL;
349 			}
350 		}
351 
352 		if (recv_ctx->rcvStatusDescHead != NULL) {
353 			unm_pci_free_consistent(
354 			    &recv_ctx->status_desc_dma_handle,
355 			    &recv_ctx->status_desc_acc_handle);
356 			recv_ctx->rcvStatusDesc_physAddr = NULL;
357 			recv_ctx->rcvStatusDescHead = NULL;
358 		}
359 	}
360 }
361 
362 static void
363 cleanup_adapter(struct unm_adapter_s *adapter)
364 {
365 	if (adapter->cmd_buf_arr != NULL)
366 		kmem_free(adapter->cmd_buf_arr,
367 		    sizeof (struct unm_cmd_buffer) * adapter->MaxTxDescCount);
368 
369 	ddi_regs_map_free(&(adapter->regs_handle));
370 	ddi_regs_map_free(&(adapter->db_handle));
371 	kmem_free(adapter, sizeof (unm_adapter));
372 
373 }
374 
375 void
376 unm_nic_remove(unm_adapter *adapter)
377 {
378 	unm_recv_context_t *recv_ctx;
379 	unm_rcv_desc_ctx_t	*rcv_desc;
380 	int ctx, ring;
381 
382 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
383 	unm_nic_stop_port(adapter);
384 
385 	if (adapter->interrupt_crb) {
386 		UNM_READ_LOCK(&adapter->adapter_lock);
387 		unm_nic_disable_int(adapter);
388 		UNM_READ_UNLOCK(&adapter->adapter_lock);
389 	}
390 	(void) untimeout(adapter->watchdog_timer);
391 
392 	unm_free_hw_resources(adapter);
393 	unm_free_tx_buffers(adapter);
394 	unm_free_tx_dmahdl(adapter);
395 
396 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
397 		recv_ctx = &adapter->recv_ctx[ctx];
398 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
399 			rcv_desc = &recv_ctx->rcv_desc[ring];
400 			if (rcv_desc->rx_buf_pool != NULL)
401 				unm_destroy_rx_ring(rcv_desc);
402 		}
403 	}
404 
405 	if (adapter->portnum == 0)
406 		unm_free_dummy_dma(adapter);
407 
408 	unm_destroy_intr(adapter);
409 
410 	ddi_set_driver_private(adapter->dip, NULL);
411 	cleanup_adapter(adapter);
412 }
413 
414 static int
415 init_firmware(unm_adapter *adapter)
416 {
417 	uint32_t state = 0, loops = 0, tempout;
418 
419 	/* Window 1 call */
420 	UNM_READ_LOCK(&adapter->adapter_lock);
421 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_CMDPEG_STATE);
422 	UNM_READ_UNLOCK(&adapter->adapter_lock);
423 
424 	if (state == PHAN_INITIALIZE_ACK)
425 		return (0);
426 
427 	while (state != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
428 		drv_usecwait(100);
429 		/* Window 1 call */
430 		UNM_READ_LOCK(&adapter->adapter_lock);
431 		state = adapter->unm_nic_pci_read_normalize(adapter,
432 		    CRB_CMDPEG_STATE);
433 		UNM_READ_UNLOCK(&adapter->adapter_lock);
434 		loops++;
435 	}
436 
437 	if (loops >= 200000) {
438 		cmn_err(CE_WARN, "%s%d: CmdPeg init incomplete:%x\n",
439 		    adapter->name, adapter->instance, state);
440 		return (-EIO);
441 	}
442 
443 	/* Window 1 call */
444 	UNM_READ_LOCK(&adapter->adapter_lock);
445 	tempout = INTR_SCHEME_PERPORT;
446 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_CAPABILITIES_HOST,
447 	    &tempout, 4);
448 	tempout = MSI_MODE_MULTIFUNC;
449 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_MSI_MODE_HOST,
450 	    &tempout, 4);
451 	tempout = MPORT_MULTI_FUNCTION_MODE;
452 	adapter->unm_nic_hw_write_wx(adapter, CRB_MPORT_MODE, &tempout, 4);
453 	tempout = PHAN_INITIALIZE_ACK;
454 	adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, &tempout, 4);
455 	UNM_READ_UNLOCK(&adapter->adapter_lock);
456 
457 	return (0);
458 }
459 
460 /*
461  * Utility to synchronize with receive peg.
462  *  Returns   0 on sucess
463  *         -EIO on error
464  */
465 int
466 receive_peg_ready(struct unm_adapter_s *adapter)
467 {
468 	uint32_t state = 0;
469 	int loops = 0, err = 0;
470 
471 	/* Window 1 call */
472 	UNM_READ_LOCK(&adapter->adapter_lock);
473 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_RCVPEG_STATE);
474 	UNM_READ_UNLOCK(&adapter->adapter_lock);
475 
476 	while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 20000)) {
477 		drv_usecwait(100);
478 		/* Window 1 call */
479 
480 		UNM_READ_LOCK(&adapter->adapter_lock);
481 		state = adapter->unm_nic_pci_read_normalize(adapter,
482 		    CRB_RCVPEG_STATE);
483 		UNM_READ_UNLOCK(&adapter->adapter_lock);
484 
485 		loops++;
486 	}
487 
488 	if (loops >= 20000) {
489 		cmn_err(CE_WARN, "Receive Peg initialization incomplete 0x%x\n",
490 		    state);
491 		err = -EIO;
492 	}
493 
494 	return (err);
495 }
496 
497 /*
498  * check if the firmware has been downloaded and ready to run  and
499  * setup the address for the descriptors in the adapter
500  */
501 static int
502 unm_nic_hw_resources(unm_adapter *adapter)
503 {
504 	hardware_context	*hw = &adapter->ahw;
505 	void			*addr;
506 	int			err;
507 	int			ctx, ring;
508 	unm_recv_context_t	*recv_ctx;
509 	unm_rcv_desc_ctx_t	*rcv_desc;
510 	ddi_dma_cookie_t	cookie;
511 	int			size;
512 
513 	if (err = receive_peg_ready(adapter))
514 		return (err);
515 
516 	size = (sizeof (RingContext) + sizeof (uint32_t));
517 
518 	err = unm_pci_alloc_consistent(adapter,
519 	    size, (caddr_t *)&addr, &cookie,
520 	    &adapter->ctxDesc_dma_handle,
521 	    &adapter->ctxDesc_acc_handle);
522 	if (err != DDI_SUCCESS) {
523 		cmn_err(CE_WARN, "Failed to allocate HW context\n");
524 		return (err);
525 	}
526 
527 	adapter->ctxDesc_physAddr = cookie.dmac_laddress;
528 
529 	(void) memset(addr, 0, sizeof (RingContext));
530 
531 	adapter->ctxDesc = (RingContext *) addr;
532 	adapter->ctxDesc->CtxId = adapter->portnum;
533 	adapter->ctxDesc->CMD_CONSUMER_OFFSET =
534 	    adapter->ctxDesc_physAddr + sizeof (RingContext);
535 	adapter->cmdConsumer =
536 	    (uint32_t *)(uintptr_t)(((char *)addr) + sizeof (RingContext));
537 
538 	ASSERT(!((unsigned long)adapter->ctxDesc_physAddr & 0x3f));
539 
540 	/*
541 	 * Allocate command descriptor ring.
542 	 */
543 	size = (sizeof (cmdDescType0_t) * adapter->MaxTxDescCount);
544 	err = unm_pci_alloc_consistent(adapter,
545 	    size, (caddr_t *)&addr, &cookie,
546 	    &hw->cmd_desc_dma_handle,
547 	    &hw->cmd_desc_acc_handle);
548 	if (err != DDI_SUCCESS) {
549 		cmn_err(CE_WARN, "Failed to allocate cmd desc ring\n");
550 		return (err);
551 	}
552 
553 	hw->cmdDesc_physAddr = cookie.dmac_laddress;
554 	hw->cmdDescHead = (cmdDescType0_t *)addr;
555 
556 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
557 		recv_ctx = &adapter->recv_ctx[ctx];
558 
559 		size = (sizeof (statusDesc_t)* adapter->MaxRxDescCount);
560 		err = unm_pci_alloc_consistent(adapter,
561 		    size, (caddr_t *)&addr,
562 		    &recv_ctx->status_desc_dma_cookie,
563 		    &recv_ctx->status_desc_dma_handle,
564 		    &recv_ctx->status_desc_acc_handle);
565 		if (err != DDI_SUCCESS) {
566 			cmn_err(CE_WARN, "Failed to allocate sts desc ring\n");
567 			goto free_cmd_desc;
568 		}
569 
570 		(void) memset(addr, 0, size);
571 		recv_ctx->rcvStatusDesc_physAddr =
572 		    recv_ctx->status_desc_dma_cookie.dmac_laddress;
573 		recv_ctx->rcvStatusDescHead = (statusDesc_t *)addr;
574 
575 		/* rds rings */
576 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
577 			rcv_desc = &recv_ctx->rcv_desc[ring];
578 
579 			size = (sizeof (rcvDesc_t) * adapter->MaxRxDescCount);
580 			err = unm_pci_alloc_consistent(adapter,
581 			    size, (caddr_t *)&addr,
582 			    &rcv_desc->rx_desc_dma_cookie,
583 			    &rcv_desc->rx_desc_dma_handle,
584 			    &rcv_desc->rx_desc_acc_handle);
585 			if (err != DDI_SUCCESS) {
586 				cmn_err(CE_WARN, "Failed to allocate "
587 				    "rx desc ring %d\n", ring);
588 				goto free_status_desc;
589 			}
590 
591 			rcv_desc->phys_addr =
592 			    rcv_desc->rx_desc_dma_cookie.dmac_laddress;
593 			rcv_desc->desc_head = (rcvDesc_t *)addr;
594 		}
595 	}
596 
597 	if (err = netxen_create_rxtx(adapter))
598 		goto free_statusrx_desc;
599 	adapter->context_alloced = 1;
600 
601 	return (DDI_SUCCESS);
602 
603 free_statusrx_desc:
604 free_status_desc:
605 free_cmd_desc:
606 	unm_free_hw_resources(adapter);
607 
608 	return (err);
609 }
610 
611 void unm_desc_dma_sync(ddi_dma_handle_t handle, uint_t start, uint_t count,
612     uint_t range, uint_t unit_size, uint_t direction)
613 {
614 	if ((start + count) < range) {
615 		(void) ddi_dma_sync(handle, start * unit_size,
616 		    count * unit_size, direction);
617 	} else {
618 		(void) ddi_dma_sync(handle, start * unit_size, 0, direction);
619 		(void) ddi_dma_sync(handle, 0,
620 		    (start + count - range) * unit_size, DDI_DMA_SYNC_FORCPU);
621 	}
622 }
623 
624 static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET,
625     CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2,
626     CRB_CMD_PRODUCER_OFFSET_3 };
627 
628 static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET,
629     CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2,
630     CRB_CMD_CONSUMER_OFFSET_3 };
631 
632 void
633 unm_nic_update_cmd_producer(struct unm_adapter_s *adapter,
634     uint32_t crb_producer)
635 {
636 	int data = crb_producer;
637 
638 	if (adapter->crb_addr_cmd_producer) {
639 		UNM_READ_LOCK(&adapter->adapter_lock);
640 		adapter->unm_nic_hw_write_wx(adapter,
641 		    adapter->crb_addr_cmd_producer, &data, 4);
642 		UNM_READ_UNLOCK(&adapter->adapter_lock);
643 	}
644 }
645 
646 static void
647 unm_nic_update_cmd_consumer(struct unm_adapter_s *adapter,
648     uint32_t crb_producer)
649 {
650 	int data = crb_producer;
651 
652 	if (adapter->crb_addr_cmd_consumer)
653 		adapter->unm_nic_hw_write_wx(adapter,
654 		    adapter->crb_addr_cmd_consumer, &data, 4);
655 }
656 
657 /*
658  * Looks for type of packet and sets opcode accordingly
659  * so that checksum offload can be used.
660  */
661 static void
662 unm_tx_csum(cmdDescType0_t *desc, mblk_t *mp, pktinfo_t *pktinfo)
663 {
664 	if (pktinfo->mac_hlen == sizeof (struct ether_vlan_header))
665 		desc->u1.s1.flags = FLAGS_VLAN_TAGGED;
666 
667 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
668 		uint32_t	start, flags;
669 
670 		hcksum_retrieve(mp, NULL, NULL, &start, NULL, NULL, NULL,
671 		    &flags);
672 		if ((flags & (HCK_FULLCKSUM | HCK_IPV4_HDRCKSUM)) == 0)
673 			return;
674 
675 		/*
676 		 * For TCP/UDP, ask hardware to do both IP header and
677 		 * full checksum, even if stack has already done one or
678 		 * the other. Hardware will always get it correct even
679 		 * if stack has already done it.
680 		 */
681 		switch (pktinfo->l4_proto) {
682 			case IPPROTO_TCP:
683 				desc->u1.s1.opcode = TX_TCP_PKT;
684 				break;
685 			case IPPROTO_UDP:
686 				desc->u1.s1.opcode = TX_UDP_PKT;
687 				break;
688 			default:
689 				/* Must be here with HCK_IPV4_HDRCKSUM */
690 				desc->u1.s1.opcode = TX_IP_PKT;
691 				return;
692 		}
693 
694 		desc->u1.s1.ipHdrOffset = pktinfo->mac_hlen;
695 		desc->u1.s1.tcpHdrOffset = pktinfo->mac_hlen + pktinfo->ip_hlen;
696 	}
697 }
698 
699 /*
700  * For IP/UDP/TCP checksum offload, this checks for MAC+IP header in one
701  * contiguous block ending at 8 byte aligned address as required by hardware.
702  * Caller assumes pktinfo->total_len will be updated by this function and
703  * if pktinfo->etype is set to 0, it will need to linearize the mblk and
704  * invoke unm_update_pkt_info() to determine ethertype, IP header len and
705  * protocol.
706  */
707 static boolean_t
708 unm_get_pkt_info(mblk_t *mp, pktinfo_t *pktinfo)
709 {
710 	mblk_t		*bp;
711 	ushort_t	type;
712 
713 	(void) memset(pktinfo, 0, sizeof (pktinfo_t));
714 
715 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
716 		if (MBLKL(bp) == 0)
717 			continue;
718 		pktinfo->mblk_no++;
719 		pktinfo->total_len += MBLKL(bp);
720 	}
721 
722 	if (MBLKL(mp) < (sizeof (struct ether_header) + sizeof (ipha_t)))
723 		return (B_FALSE);
724 
725 	/*
726 	 * We just need non 1 byte aligned address, since ether_type is
727 	 * ushort.
728 	 */
729 	if ((uintptr_t)mp->b_rptr & 1)
730 		return (B_FALSE);
731 
732 	type = ((struct ether_header *)(uintptr_t)mp->b_rptr)->ether_type;
733 	if (type == htons(ETHERTYPE_VLAN)) {
734 		if (MBLKL(mp) < (sizeof (struct ether_vlan_header) +
735 		    sizeof (ipha_t)))
736 			return (B_FALSE);
737 		type = ((struct ether_vlan_header *) \
738 		    (uintptr_t)mp->b_rptr)->ether_type;
739 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
740 	} else {
741 		pktinfo->mac_hlen = sizeof (struct ether_header);
742 	}
743 	pktinfo->etype = type;
744 
745 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
746 		uchar_t *ip_off = mp->b_rptr + pktinfo->mac_hlen;
747 
748 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ip_off);
749 		pktinfo->l4_proto =
750 		    ((ipha_t *)(uintptr_t)ip_off)->ipha_protocol;
751 
752 		/* IP header not aligned to quadward boundary? */
753 		if ((unsigned long)(ip_off + pktinfo->ip_hlen) % 8 != 0)
754 			return (B_FALSE);
755 	}
756 
757 	return (B_TRUE);
758 }
759 
760 static void
761 unm_update_pkt_info(char *ptr, pktinfo_t *pktinfo)
762 {
763 	ushort_t	type;
764 
765 	type = ((struct ether_header *)(uintptr_t)ptr)->ether_type;
766 	if (type == htons(ETHERTYPE_VLAN)) {
767 		type = ((struct ether_vlan_header *)(uintptr_t)ptr)->ether_type;
768 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
769 	} else {
770 		pktinfo->mac_hlen = sizeof (struct ether_header);
771 	}
772 	pktinfo->etype = type;
773 
774 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
775 		char *ipp = ptr + pktinfo->mac_hlen;
776 
777 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ipp);
778 		pktinfo->l4_proto = ((ipha_t *)(uintptr_t)ipp)->ipha_protocol;
779 	}
780 }
781 
782 static boolean_t
783 unm_send_copy(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
784 {
785 	hardware_context *hw;
786 	u32				producer = 0;
787 	cmdDescType0_t			*hwdesc;
788 	struct unm_cmd_buffer		*pbuf = NULL;
789 	u32				mblen;
790 	int				no_of_desc = 1;
791 	int				MaxTxDescCount;
792 	mblk_t				*bp;
793 	char				*txb;
794 
795 	hw = &adapter->ahw;
796 	MaxTxDescCount = adapter->MaxTxDescCount;
797 
798 	UNM_SPIN_LOCK(&adapter->tx_lock);
799 	membar_enter();
800 
801 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
802 	    MaxTxDescCount) <= 2) {
803 		adapter->stats.outofcmddesc++;
804 		adapter->resched_needed = 1;
805 		membar_exit();
806 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
807 		return (B_FALSE);
808 	}
809 	adapter->freecmds -= no_of_desc;
810 
811 	producer = adapter->cmdProducer;
812 
813 	adapter->cmdProducer = get_index_range(adapter->cmdProducer,
814 	    MaxTxDescCount, no_of_desc);
815 
816 	hwdesc = &hw->cmdDescHead[producer];
817 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
818 	pbuf = &adapter->cmd_buf_arr[producer];
819 
820 	pbuf->msg = NULL;
821 	pbuf->head = NULL;
822 	pbuf->tail = NULL;
823 
824 	txb = pbuf->dma_area.vaddr;
825 
826 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
827 		if ((mblen = MBLKL(bp)) == 0)
828 			continue;
829 		bcopy(bp->b_rptr, txb, mblen);
830 		txb += mblen;
831 	}
832 
833 	/*
834 	 * Determine metadata if not previously done due to fragmented mblk.
835 	 */
836 	if (pktinfo->etype == 0)
837 		unm_update_pkt_info(pbuf->dma_area.vaddr, pktinfo);
838 
839 	(void) ddi_dma_sync(pbuf->dma_area.dma_hdl,
840 	    0, pktinfo->total_len, DDI_DMA_SYNC_FORDEV);
841 
842 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
843 	/* hwdesc->mss = 0; */
844 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
845 	hwdesc->u3.s1.port = adapter->portnum;
846 	hwdesc->u3.s1.ctx_id = adapter->portnum;
847 
848 	hwdesc->u6.s1.buffer1Length = pktinfo->total_len;
849 	hwdesc->u5.AddrBuffer1 = pbuf->dma_area.dma_addr;
850 	hwdesc->u1.s1.numOfBuffers = 1;
851 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
852 
853 	unm_tx_csum(hwdesc, mp, pktinfo);
854 
855 	unm_desc_dma_sync(hw->cmd_desc_dma_handle,
856 	    producer,
857 	    no_of_desc,
858 	    MaxTxDescCount,
859 	    sizeof (cmdDescType0_t),
860 	    DDI_DMA_SYNC_FORDEV);
861 
862 	hw->cmdProducer = adapter->cmdProducer;
863 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
864 
865 	adapter->stats.txbytes += pktinfo->total_len;
866 	adapter->stats.xmitfinished++;
867 	adapter->stats.txcopyed++;
868 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
869 
870 	freemsg(mp);
871 	return (B_TRUE);
872 }
873 
874 /* Should be called with adapter->tx_lock held. */
875 static void
876 unm_return_dma_handle(unm_adapter *adapter, unm_dmah_node_t *head,
877     unm_dmah_node_t *tail, uint32_t num)
878 {
879 	ASSERT(tail != NULL);
880 	tail->next = adapter->dmahdl_pool;
881 	adapter->dmahdl_pool = head;
882 	adapter->freehdls += num;
883 }
884 
885 static unm_dmah_node_t *
886 unm_reserve_dma_handle(unm_adapter* adapter)
887 {
888 	unm_dmah_node_t *dmah = NULL;
889 
890 	dmah = adapter->dmahdl_pool;
891 	if (dmah != NULL) {
892 		adapter->dmahdl_pool = dmah->next;
893 		dmah->next = NULL;
894 		adapter->freehdls--;
895 		membar_exit();
896 	}
897 
898 	return (dmah);
899 }
900 
901 static boolean_t
902 unm_send_mapped(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
903 {
904 	hardware_context		*hw;
905 	u32				producer = 0;
906 	u32				saved_producer = 0;
907 	cmdDescType0_t			*hwdesc;
908 	struct unm_cmd_buffer		*pbuf = NULL;
909 	int				no_of_desc;
910 	int				k;
911 	int				MaxTxDescCount;
912 	mblk_t				*bp;
913 
914 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL, *hdlp;
915 	ddi_dma_cookie_t cookie[MAX_COOKIES_PER_CMD + 1];
916 	int ret, i;
917 	uint32_t hdl_reserved = 0;
918 	uint32_t mblen;
919 	uint32_t ncookies, index = 0, total_cookies = 0;
920 
921 	MaxTxDescCount = adapter->MaxTxDescCount;
922 
923 	UNM_SPIN_LOCK(&adapter->tx_lock);
924 
925 	/* bind all the mblks of the packet first */
926 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
927 		mblen = MBLKL(bp);
928 		if (mblen == 0)
929 			continue;
930 
931 		dmah = unm_reserve_dma_handle(adapter);
932 		if (dmah == NULL) {
933 			adapter->stats.outoftxdmahdl++;
934 			goto err_map;
935 		}
936 
937 		ret = ddi_dma_addr_bind_handle(dmah->dmahdl,
938 		    NULL, (caddr_t)bp->b_rptr, mblen,
939 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
940 		    DDI_DMA_DONTWAIT, NULL, &cookie[index], &ncookies);
941 
942 		if (ret != DDI_DMA_MAPPED)
943 			goto err_map;
944 
945 		if (tail == NULL) {
946 			head = tail = dmah;
947 		} else {
948 			tail->next = dmah;
949 			tail = dmah;
950 		}
951 		hdl_reserved++;
952 
953 		total_cookies += ncookies;
954 		if (total_cookies > MAX_COOKIES_PER_CMD) {
955 			dmah = NULL;
956 			goto err_map;
957 		}
958 
959 		if (index == 0) {
960 			size_t	hsize = cookie[0].dmac_size;
961 
962 			/*
963 			 * For TCP/UDP packets with checksum offload,
964 			 * MAC/IP headers need to be contiguous. Otherwise,
965 			 * there must be at least 16 bytes in the first
966 			 * descriptor.
967 			 */
968 			if ((pktinfo->l4_proto == IPPROTO_TCP) ||
969 			    (pktinfo->l4_proto == IPPROTO_UDP)) {
970 				if (hsize < (pktinfo->mac_hlen +
971 				    pktinfo->ip_hlen)) {
972 					dmah = NULL;
973 					goto err_map;
974 				}
975 			} else {
976 				if (hsize < 16) {
977 					dmah = NULL;
978 					goto err_map;
979 				}
980 			}
981 		}
982 
983 		index++;
984 		ncookies--;
985 		for (i = 0; i < ncookies; i++, index++)
986 			ddi_dma_nextcookie(dmah->dmahdl, &cookie[index]);
987 	}
988 
989 	dmah = NULL;
990 	hw = &adapter->ahw;
991 	no_of_desc = (total_cookies + 3) >> 2;
992 
993 	membar_enter();
994 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
995 	    MaxTxDescCount) < no_of_desc+2) {
996 		/*
997 		 * If we are going to be trying the copy path, no point
998 		 * scheduling an upcall when Tx resources are freed.
999 		 */
1000 		if (pktinfo->total_len > adapter->maxmtu) {
1001 			adapter->stats.outofcmddesc++;
1002 			adapter->resched_needed = 1;
1003 		}
1004 		membar_exit();
1005 		goto err_alloc_desc;
1006 	}
1007 	adapter->freecmds -= no_of_desc;
1008 
1009 	/* Copy the descriptors into the hardware    */
1010 	producer = adapter->cmdProducer;
1011 	saved_producer = producer;
1012 	hwdesc = &hw->cmdDescHead[producer];
1013 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1014 	pbuf = &adapter->cmd_buf_arr[producer];
1015 
1016 	pbuf->msg = mp;
1017 	pbuf->head = head;
1018 	pbuf->tail = tail;
1019 
1020 	hwdesc->u1.s1.numOfBuffers = total_cookies;
1021 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
1022 	hwdesc->u3.s1.port = adapter->portnum;
1023 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
1024 	/* hwdesc->mss = 0; */
1025 	hwdesc->u3.s1.ctx_id = adapter->portnum;
1026 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
1027 	unm_tx_csum(hwdesc, mp, pktinfo);
1028 
1029 	for (i = k = 0; i < total_cookies; i++) {
1030 		if (k == 4) {
1031 			/* Move to the next descriptor */
1032 			k = 0;
1033 			producer = get_next_index(producer, MaxTxDescCount);
1034 			hwdesc = &hw->cmdDescHead[producer];
1035 			(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1036 		}
1037 
1038 		switch (k) {
1039 		case 0:
1040 			hwdesc->u6.s1.buffer1Length = cookie[i].dmac_size;
1041 			hwdesc->u5.AddrBuffer1 = cookie[i].dmac_laddress;
1042 			break;
1043 		case 1:
1044 			hwdesc->u6.s1.buffer2Length = cookie[i].dmac_size;
1045 			hwdesc->u2.AddrBuffer2 = cookie[i].dmac_laddress;
1046 			break;
1047 		case 2:
1048 			hwdesc->u6.s1.buffer3Length = cookie[i].dmac_size;
1049 			hwdesc->u4.AddrBuffer3 = cookie[i].dmac_laddress;
1050 			break;
1051 		case 3:
1052 			hwdesc->u6.s1.buffer4Length = cookie[i].dmac_size;
1053 			hwdesc->u7.AddrBuffer4 = cookie[i].dmac_laddress;
1054 			break;
1055 		}
1056 		k++;
1057 	}
1058 
1059 	unm_desc_dma_sync(hw->cmd_desc_dma_handle, saved_producer, no_of_desc,
1060 	    MaxTxDescCount, sizeof (cmdDescType0_t), DDI_DMA_SYNC_FORDEV);
1061 
1062 	adapter->cmdProducer = get_next_index(producer, MaxTxDescCount);
1063 	hw->cmdProducer = adapter->cmdProducer;
1064 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
1065 
1066 	adapter->stats.txbytes += pktinfo->total_len;
1067 	adapter->stats.xmitfinished++;
1068 	adapter->stats.txmapped++;
1069 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1070 	return (B_TRUE);
1071 
1072 err_alloc_desc:
1073 err_map:
1074 
1075 	hdlp = head;
1076 	while (hdlp != NULL) {
1077 		(void) ddi_dma_unbind_handle(hdlp->dmahdl);
1078 		hdlp = hdlp->next;
1079 	}
1080 
1081 	/*
1082 	 * add the reserved but bind failed one to the list to be returned
1083 	 */
1084 	if (dmah != NULL) {
1085 		if (tail == NULL)
1086 			head = tail = dmah;
1087 		else {
1088 			tail->next = dmah;
1089 			tail = dmah;
1090 		}
1091 		hdl_reserved++;
1092 	}
1093 
1094 	if (head != NULL)
1095 		unm_return_dma_handle(adapter, head, tail, hdl_reserved);
1096 
1097 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1098 	return (B_FALSE);
1099 }
1100 
1101 static boolean_t
1102 unm_nic_xmit_frame(unm_adapter *adapter, mblk_t *mp)
1103 {
1104 	pktinfo_t	pktinfo;
1105 	boolean_t	status = B_FALSE, send_mapped;
1106 
1107 	adapter->stats.xmitcalled++;
1108 
1109 	send_mapped = unm_get_pkt_info(mp, &pktinfo);
1110 
1111 	if (pktinfo.total_len <= adapter->tx_bcopy_threshold ||
1112 	    pktinfo.mblk_no >= MAX_BUFFERS_PER_CMD)
1113 		send_mapped = B_FALSE;
1114 
1115 	if (send_mapped == B_TRUE)
1116 		status = unm_send_mapped(adapter, mp, &pktinfo);
1117 
1118 	if (status != B_TRUE) {
1119 		if (pktinfo.total_len <= adapter->maxmtu)
1120 			return (unm_send_copy(adapter, mp, &pktinfo));
1121 
1122 		/* message too large */
1123 		freemsg(mp);
1124 		adapter->stats.txdropped++;
1125 		status = B_TRUE;
1126 	}
1127 
1128 	return (status);
1129 }
1130 
1131 static int
1132 unm_nic_check_temp(struct unm_adapter_s *adapter)
1133 {
1134 	uint32_t temp, temp_state, temp_val;
1135 	int rv = 0;
1136 
1137 	if ((adapter->ahw.revision_id == NX_P3_A2) ||
1138 	    (adapter->ahw.revision_id == NX_P3_A0))
1139 		return (0);
1140 
1141 	temp = adapter->unm_nic_pci_read_normalize(adapter, CRB_TEMP_STATE);
1142 
1143 	temp_state = nx_get_temp_state(temp);
1144 	temp_val = nx_get_temp_val(temp);
1145 
1146 	if (temp_state == NX_TEMP_PANIC) {
1147 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds "
1148 		    "maximum allowed, device has been shut down\n",
1149 		    unm_nic_driver_name, temp_val);
1150 		rv = 1;
1151 	} else if (temp_state == NX_TEMP_WARN) {
1152 		if (adapter->temp == NX_TEMP_NORMAL) {
1153 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds"
1154 		    "operating range. Immediate action needed.\n",
1155 		    unm_nic_driver_name, temp_val);
1156 		}
1157 	} else {
1158 		if (adapter->temp == NX_TEMP_WARN) {
1159 			cmn_err(CE_WARN, "%s: Device temperature is now %d "
1160 			    "degrees C in normal range.\n",
1161 			    unm_nic_driver_name, temp_val);
1162 		}
1163 	}
1164 
1165 	adapter->temp = temp_state;
1166 	return (rv);
1167 }
1168 
1169 static void
1170 unm_watchdog(unsigned long v)
1171 {
1172 	unm_adapter *adapter = (unm_adapter *)v;
1173 
1174 	if ((adapter->portnum == 0) && unm_nic_check_temp(adapter)) {
1175 		/*
1176 		 * We return without turning on the netdev queue as there
1177 		 * was an overheated device
1178 		 */
1179 		return;
1180 	}
1181 
1182 	unm_nic_handle_phy_intr(adapter);
1183 
1184 	/*
1185 	 * This function schedules a call for itself.
1186 	 */
1187 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1188 	    (void *)adapter, 2 * drv_usectohz(1000000));
1189 
1190 }
1191 
1192 static void unm_nic_clear_stats(unm_adapter *adapter)
1193 {
1194 	(void) memset(&adapter->stats, 0, sizeof (adapter->stats));
1195 }
1196 
1197 static void
1198 unm_nic_poll(unm_adapter *adapter)
1199 {
1200 	int	work_done, tx_complete;
1201 
1202 	adapter->stats.polled++;
1203 
1204 loop:
1205 	tx_complete = unm_process_cmd_ring(adapter);
1206 	work_done = unm_process_rcv_ring(adapter, NX_RX_MAXBUFS);
1207 	if ((!tx_complete) || (!(work_done < NX_RX_MAXBUFS)))
1208 		goto loop;
1209 
1210 	UNM_READ_LOCK(&adapter->adapter_lock);
1211 	unm_nic_enable_int(adapter);
1212 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1213 }
1214 
1215 /* ARGSUSED */
1216 uint_t
1217 unm_intr(caddr_t data, caddr_t arg)
1218 {
1219 	unm_adapter	*adapter = (unm_adapter *)(uintptr_t)data;
1220 
1221 	if (unm_nic_clear_int(adapter))
1222 		return (DDI_INTR_UNCLAIMED);
1223 
1224 	unm_nic_poll(adapter);
1225 	return (DDI_INTR_CLAIMED);
1226 }
1227 
1228 /*
1229  * This is invoked from receive isr. Due to the single threaded nature
1230  * of the invocation, pool_lock acquisition is not neccesary to protect
1231  * pool_list.
1232  */
1233 static void
1234 unm_free_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc, unm_rx_buffer_t *rx_buffer)
1235 {
1236 	/* mutex_enter(rcv_desc->pool_lock); */
1237 	rx_buffer->next = rcv_desc->pool_list;
1238 	rcv_desc->pool_list = rx_buffer;
1239 	rcv_desc->rx_buf_free++;
1240 	/* mutex_exit(rcv_desc->pool_lock); */
1241 }
1242 
1243 /*
1244  * unm_process_rcv() send the received packet to the protocol stack.
1245  */
1246 static mblk_t *
1247 unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc)
1248 {
1249 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1250 	unm_rx_buffer_t		*rx_buffer;
1251 	mblk_t *mp;
1252 	u32			desc_ctx = desc->u1.s1.type;
1253 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
1254 	u32			pkt_length = desc->u1.s1.totalLength;
1255 	int			poff = desc->u1.s1.pkt_offset;
1256 	int			index, cksum_flags, docopy;
1257 	int			index_lo = desc->u1.s1.referenceHandle_lo;
1258 	char			*vaddr;
1259 
1260 	index = ((desc->u1.s1.referenceHandle_hi << 4) | index_lo);
1261 
1262 	rx_buffer = index2rxbuf(rcv_desc, index);
1263 
1264 	if (rx_buffer == NULL) {
1265 		cmn_err(CE_WARN, "\r\nNULL rx_buffer idx=%d", index);
1266 		return (NULL);
1267 	}
1268 	vaddr = (char *)rx_buffer->dma_info.vaddr;
1269 	if (vaddr == NULL) {
1270 		cmn_err(CE_WARN, "\r\nNULL vaddr");
1271 		return (NULL);
1272 	}
1273 	rcv_desc->rx_desc_handled++;
1274 	rcv_desc->rx_buf_card--;
1275 
1276 	(void) ddi_dma_sync(rx_buffer->dma_info.dma_hdl, 0,
1277 	    pkt_length + poff + (adapter->ahw.cut_through ? 0 :
1278 	    IP_ALIGNMENT_BYTES), DDI_DMA_SYNC_FORCPU);
1279 
1280 	/*
1281 	 * Copy packet into new allocated message buffer, if pkt_length
1282 	 * is below copy threshold.
1283 	 */
1284 	docopy = (pkt_length <= adapter->rx_bcopy_threshold) ? 1 : 0;
1285 
1286 	/*
1287 	 * If card is running out of rx buffers, then attempt to allocate
1288 	 * new mblk so we can feed this rx buffer back to card (we
1289 	 * _could_ look at what's pending on free and recycle lists).
1290 	 */
1291 	if (rcv_desc->rx_buf_card < NX_RX_THRESHOLD) {
1292 		docopy = 1;
1293 		adapter->stats.rxbufshort++;
1294 	}
1295 
1296 	if (docopy == 1) {
1297 		if ((mp = allocb(pkt_length + IP_ALIGNMENT_BYTES, 0)) == NULL) {
1298 			adapter->stats.allocbfailed++;
1299 			goto freebuf;
1300 		}
1301 
1302 		mp->b_rptr += IP_ALIGNMENT_BYTES;
1303 		vaddr += poff;
1304 		bcopy(vaddr, mp->b_rptr, pkt_length);
1305 		adapter->stats.rxcopyed++;
1306 		unm_free_rx_buffer(rcv_desc, rx_buffer);
1307 	} else {
1308 		mp = (mblk_t *)rx_buffer->mp;
1309 		if (mp == NULL) {
1310 			mp = desballoc(rx_buffer->dma_info.vaddr,
1311 			    rcv_desc->dma_size, 0, &rx_buffer->rx_recycle);
1312 			if (mp == NULL) {
1313 				adapter->stats.desballocfailed++;
1314 				goto freebuf;
1315 			}
1316 			rx_buffer->mp = mp;
1317 		}
1318 		mp->b_rptr += poff;
1319 		adapter->stats.rxmapped++;
1320 	}
1321 
1322 	mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + pkt_length);
1323 
1324 	if (desc->u1.s1.status == STATUS_CKSUM_OK) {
1325 		adapter->stats.csummed++;
1326 		cksum_flags =
1327 		    HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM | HCK_FULLCKSUM;
1328 	} else {
1329 		cksum_flags = 0;
1330 	}
1331 	(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, cksum_flags, 0);
1332 
1333 	adapter->stats.no_rcv++;
1334 	adapter->stats.rxbytes += pkt_length;
1335 	adapter->stats.uphappy++;
1336 
1337 	return (mp);
1338 
1339 freebuf:
1340 	unm_free_rx_buffer(rcv_desc, rx_buffer);
1341 	return (NULL);
1342 }
1343 
1344 /* Process Receive status ring */
1345 static int
1346 unm_process_rcv_ring(unm_adapter *adapter, int max)
1347 {
1348 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1349 	statusDesc_t		*desc_head = recv_ctx->rcvStatusDescHead;
1350 	statusDesc_t		*desc = NULL;
1351 	uint32_t		consumer, start;
1352 	int			count = 0, ring;
1353 	mblk_t *mp;
1354 
1355 	start = consumer = recv_ctx->statusRxConsumer;
1356 
1357 	unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start, max,
1358 	    adapter->MaxRxDescCount, sizeof (statusDesc_t),
1359 	    DDI_DMA_SYNC_FORCPU);
1360 
1361 	while (count < max) {
1362 		desc = &desc_head[consumer];
1363 		if (!(desc->u1.s1.owner & STATUS_OWNER_HOST))
1364 			break;
1365 
1366 		mp = unm_process_rcv(adapter, desc);
1367 		desc->u1.s1.owner = STATUS_OWNER_PHANTOM;
1368 
1369 		consumer = (consumer + 1) % adapter->MaxRxDescCount;
1370 		count++;
1371 		if (mp != NULL)
1372 			mac_rx(adapter->mach, NULL, mp);
1373 	}
1374 
1375 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1376 		if (recv_ctx->rcv_desc[ring].rx_desc_handled > 0)
1377 			unm_post_rx_buffers_nodb(adapter, ring);
1378 	}
1379 
1380 	if (count) {
1381 		unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start,
1382 		    count, adapter->MaxRxDescCount, sizeof (statusDesc_t),
1383 		    DDI_DMA_SYNC_FORDEV);
1384 
1385 		/* update the consumer index in phantom */
1386 		recv_ctx->statusRxConsumer = consumer;
1387 
1388 		UNM_READ_LOCK(&adapter->adapter_lock);
1389 		adapter->unm_nic_hw_write_wx(adapter,
1390 		    recv_ctx->host_sds_consumer, &consumer, 4);
1391 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1392 	}
1393 
1394 	return (count);
1395 }
1396 
1397 /* Process Command status ring */
1398 static int
1399 unm_process_cmd_ring(struct unm_adapter_s *adapter)
1400 {
1401 	u32			last_consumer;
1402 	u32			consumer;
1403 	int			count = 0;
1404 	struct unm_cmd_buffer	*buffer;
1405 	int			done;
1406 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL;
1407 	uint32_t	free_hdls = 0;
1408 
1409 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1410 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1411 
1412 	last_consumer = adapter->lastCmdConsumer;
1413 	consumer = *(adapter->cmdConsumer);
1414 
1415 	while (last_consumer != consumer) {
1416 		buffer = &adapter->cmd_buf_arr[last_consumer];
1417 		if (buffer->head != NULL) {
1418 			dmah = buffer->head;
1419 			while (dmah != NULL) {
1420 				(void) ddi_dma_unbind_handle(dmah->dmahdl);
1421 				dmah = dmah->next;
1422 				free_hdls++;
1423 			}
1424 
1425 			if (head == NULL) {
1426 				head = buffer->head;
1427 				tail = buffer->tail;
1428 			} else {
1429 				tail->next = buffer->head;
1430 				tail = buffer->tail;
1431 			}
1432 
1433 			buffer->head = NULL;
1434 			buffer->tail = NULL;
1435 
1436 			if (buffer->msg != NULL) {
1437 				freemsg(buffer->msg);
1438 				buffer->msg = NULL;
1439 			}
1440 		}
1441 
1442 		last_consumer = get_next_index(last_consumer,
1443 		    adapter->MaxTxDescCount);
1444 		if (++count > NX_MAX_TXCOMPS)
1445 			break;
1446 	}
1447 
1448 	if (count) {
1449 		int	doresched;
1450 
1451 		UNM_SPIN_LOCK(&adapter->tx_lock);
1452 		adapter->lastCmdConsumer = last_consumer;
1453 		adapter->freecmds += count;
1454 		membar_exit();
1455 
1456 		doresched = adapter->resched_needed;
1457 		if (doresched)
1458 			adapter->resched_needed = 0;
1459 
1460 		if (head != NULL)
1461 			unm_return_dma_handle(adapter, head, tail, free_hdls);
1462 
1463 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
1464 
1465 		if (doresched)
1466 			mac_tx_update(adapter->mach);
1467 	}
1468 
1469 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1470 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1471 
1472 	consumer = *(adapter->cmdConsumer);
1473 	done = (adapter->lastCmdConsumer == consumer);
1474 
1475 	return (done);
1476 }
1477 
1478 /*
1479  * This is invoked from receive isr, and at initialization time when no
1480  * rx buffers have been posted to card. Due to the single threaded nature
1481  * of the invocation, pool_lock acquisition is not neccesary to protect
1482  * pool_list.
1483  */
1484 static unm_rx_buffer_t *
1485 unm_reserve_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc)
1486 {
1487 	unm_rx_buffer_t *rx_buffer = NULL;
1488 
1489 	/* mutex_enter(rcv_desc->pool_lock); */
1490 	if (rcv_desc->rx_buf_free) {
1491 		rx_buffer = rcv_desc->pool_list;
1492 		rcv_desc->pool_list = rx_buffer->next;
1493 		rx_buffer->next = NULL;
1494 		rcv_desc->rx_buf_free--;
1495 	} else {
1496 		mutex_enter(rcv_desc->recycle_lock);
1497 
1498 		if (rcv_desc->rx_buf_recycle) {
1499 			rcv_desc->pool_list = rcv_desc->recycle_list;
1500 			rcv_desc->recycle_list = NULL;
1501 			rcv_desc->rx_buf_free += rcv_desc->rx_buf_recycle;
1502 			rcv_desc->rx_buf_recycle = 0;
1503 
1504 			rx_buffer = rcv_desc->pool_list;
1505 			rcv_desc->pool_list = rx_buffer->next;
1506 			rx_buffer->next = NULL;
1507 			rcv_desc->rx_buf_free--;
1508 		}
1509 
1510 		mutex_exit(rcv_desc->recycle_lock);
1511 	}
1512 
1513 	/* mutex_exit(rcv_desc->pool_lock); */
1514 	return (rx_buffer);
1515 }
1516 
1517 static void
1518 post_rx_doorbell(struct unm_adapter_s *adapter, uint32_t ringid, int count)
1519 {
1520 #define	UNM_RCV_PEG_DB_ID	2
1521 #define	UNM_RCV_PRODUCER_OFFSET	0
1522 	ctx_msg msg = {0};
1523 
1524 	/*
1525 	 * Write a doorbell msg to tell phanmon of change in
1526 	 * receive ring producer
1527 	 */
1528 	msg.PegId = UNM_RCV_PEG_DB_ID;
1529 	msg.privId = 1;
1530 	msg.Count = count;
1531 	msg.CtxId = adapter->portnum;
1532 	msg.Opcode = UNM_RCV_PRODUCER(ringid);
1533 	dbwritel(*((__uint32_t *)&msg),
1534 	    (void *)(DB_NORMALIZE(adapter, UNM_RCV_PRODUCER_OFFSET)));
1535 }
1536 
1537 static int
1538 unm_post_rx_buffers(struct unm_adapter_s *adapter, uint32_t ringid)
1539 {
1540 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1541 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1542 	unm_rx_buffer_t		*rx_buffer;
1543 	rcvDesc_t		*pdesc;
1544 	int			count;
1545 
1546 	for (count = 0; count < rcv_desc->MaxRxDescCount; count++) {
1547 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1548 		if (rx_buffer != NULL) {
1549 			pdesc = &rcv_desc->desc_head[count];
1550 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1551 			    rx_buffer);
1552 			pdesc->flags = ringid;
1553 			pdesc->bufferLength = rcv_desc->dma_size;
1554 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1555 		}
1556 		else
1557 			return (DDI_FAILURE);
1558 	}
1559 
1560 	rcv_desc->producer = count % rcv_desc->MaxRxDescCount;
1561 	count--;
1562 	unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle,
1563 	    0,		/* start */
1564 	    count,	/* count */
1565 	    count,	/* range */
1566 	    sizeof (rcvDesc_t),	/* unit_size */
1567 	    DDI_DMA_SYNC_FORDEV);	/* direction */
1568 
1569 	rcv_desc->rx_buf_card = rcv_desc->MaxRxDescCount;
1570 	UNM_READ_LOCK(&adapter->adapter_lock);
1571 	adapter->unm_nic_hw_write_wx(adapter, rcv_desc->host_rx_producer,
1572 	    &count, 4);
1573 	if (adapter->fw_major < 4)
1574 		post_rx_doorbell(adapter, ringid, count);
1575 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1576 
1577 	return (DDI_SUCCESS);
1578 }
1579 
1580 static void
1581 unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
1582     uint32_t ringid)
1583 {
1584 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1585 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1586 	struct unm_rx_buffer	*rx_buffer;
1587 	rcvDesc_t		*pdesc;
1588 	int 			count, producer = rcv_desc->producer;
1589 	int 			last_producer = producer;
1590 
1591 	for (count = 0; count < rcv_desc->rx_desc_handled; count++) {
1592 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1593 		if (rx_buffer != NULL) {
1594 			pdesc = &rcv_desc->desc_head[producer];
1595 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1596 			    rx_buffer);
1597 			pdesc->flags = ringid;
1598 			pdesc->bufferLength = rcv_desc->dma_size;
1599 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1600 		} else {
1601 			adapter->stats.outofrxbuf++;
1602 			break;
1603 		}
1604 		producer = get_next_index(producer, rcv_desc->MaxRxDescCount);
1605 	}
1606 
1607 	/* if we did allocate buffers, then write the count to Phantom */
1608 	if (count) {
1609 		/* Sync rx ring, considering case for wrap around */
1610 		unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle, last_producer,
1611 		    count, rcv_desc->MaxRxDescCount, sizeof (rcvDesc_t),
1612 		    DDI_DMA_SYNC_FORDEV);
1613 
1614 		rcv_desc->producer = producer;
1615 		rcv_desc->rx_desc_handled -= count;
1616 		rcv_desc->rx_buf_card += count;
1617 
1618 		producer = (producer - 1) % rcv_desc->MaxRxDescCount;
1619 		UNM_READ_LOCK(&adapter->adapter_lock);
1620 		adapter->unm_nic_hw_write_wx(adapter,
1621 		    rcv_desc->host_rx_producer, &producer, 4);
1622 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1623 	}
1624 }
1625 
1626 int
1627 unm_nic_fill_statistics_128M(struct unm_adapter_s *adapter,
1628 			    struct unm_statistics *unm_stats)
1629 {
1630 	void *addr;
1631 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1632 		UNM_WRITE_LOCK(&adapter->adapter_lock);
1633 		unm_nic_pci_change_crbwindow_128M(adapter, 0);
1634 
1635 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1636 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_BYTE_CNT,
1637 		    &(unm_stats->tx_bytes));
1638 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1639 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_FRAME_CNT,
1640 		    &(unm_stats->tx_packets));
1641 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1642 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_BYTE_CNT,
1643 		    &(unm_stats->rx_bytes));
1644 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1645 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_FRAME_CNT,
1646 		    &(unm_stats->rx_packets));
1647 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1648 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_AGGR_ERROR_CNT,
1649 		    &(unm_stats->rx_errors));
1650 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1651 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_CRC_ERROR_CNT,
1652 		    &(unm_stats->rx_CRC_errors));
1653 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1654 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1655 		    &(unm_stats->rx_long_length_error));
1656 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1657 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1658 		    &(unm_stats->rx_short_length_error));
1659 
1660 		/*
1661 		 * For reading rx_MAC_error bit different procedure
1662 		 * UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_TEST_MUX_CTL, 0x15);
1663 		 * UNM_NIC_LOCKED_READ_REG((UNM_CRB_NIU + 0xC0), &temp);
1664 		 * unm_stats->rx_MAC_errors = temp & 0xff;
1665 		 */
1666 
1667 		unm_nic_pci_change_crbwindow_128M(adapter, 1);
1668 		UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1669 	} else {
1670 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1671 		unm_stats->tx_bytes = adapter->stats.txbytes;
1672 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1673 		    adapter->stats.xmitfinished;
1674 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1675 		unm_stats->rx_packets = adapter->stats.no_rcv;
1676 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1677 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1678 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1679 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1680 		unm_stats->rx_CRC_errors = 0;
1681 		unm_stats->rx_MAC_errors = 0;
1682 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1683 	}
1684 	return (0);
1685 }
1686 
1687 int
1688 unm_nic_fill_statistics_2M(struct unm_adapter_s *adapter,
1689     struct unm_statistics *unm_stats)
1690 {
1691 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1692 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1693 		    &(unm_stats->tx_bytes), 4);
1694 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1695 		    &(unm_stats->tx_packets), 4);
1696 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1697 		    &(unm_stats->rx_bytes), 4);
1698 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1699 		    &(unm_stats->rx_packets), 4);
1700 		(void) unm_nic_hw_read_wx_2M(adapter,
1701 		    UNM_NIU_XGE_AGGR_ERROR_CNT, &(unm_stats->rx_errors), 4);
1702 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1703 		    &(unm_stats->rx_CRC_errors), 4);
1704 		(void) unm_nic_hw_read_wx_2M(adapter,
1705 		    UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1706 		    &(unm_stats->rx_long_length_error), 4);
1707 		(void) unm_nic_hw_read_wx_2M(adapter,
1708 		    UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1709 		    &(unm_stats->rx_short_length_error), 4);
1710 	} else {
1711 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1712 		unm_stats->tx_bytes = adapter->stats.txbytes;
1713 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1714 		    adapter->stats.xmitfinished;
1715 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1716 		unm_stats->rx_packets = adapter->stats.no_rcv;
1717 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1718 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1719 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1720 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1721 		unm_stats->rx_CRC_errors = 0;
1722 		unm_stats->rx_MAC_errors = 0;
1723 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1724 	}
1725 	return (0);
1726 }
1727 
1728 int
1729 unm_nic_clear_statistics_128M(struct unm_adapter_s *adapter)
1730 {
1731 	void *addr;
1732 	int data = 0;
1733 
1734 	UNM_WRITE_LOCK(&adapter->adapter_lock);
1735 	unm_nic_pci_change_crbwindow_128M(adapter, 0);
1736 
1737 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1738 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_BYTE_CNT, &data);
1739 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1740 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_FRAME_CNT, &data);
1741 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1742 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_BYTE_CNT, &data);
1743 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1744 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_FRAME_CNT, &data);
1745 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1746 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_AGGR_ERROR_CNT, &data);
1747 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1748 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_CRC_ERROR_CNT, &data);
1749 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1750 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR, &data);
1751 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1752 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR, &data);
1753 
1754 	unm_nic_pci_change_crbwindow_128M(adapter, 1);
1755 	UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1756 	unm_nic_clear_stats(adapter);
1757 	return (0);
1758 }
1759 
1760 int
1761 unm_nic_clear_statistics_2M(struct unm_adapter_s *adapter)
1762 {
1763 	int data = 0;
1764 
1765 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1766 	    &data, 4);
1767 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1768 	    &data, 4);
1769 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1770 	    &data, 4);
1771 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1772 	    &data, 4);
1773 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_AGGR_ERROR_CNT,
1774 	    &data, 4);
1775 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1776 	    &data, 4);
1777 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1778 	    &data, 4);
1779 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1780 	    &data, 4);
1781 	unm_nic_clear_stats(adapter);
1782 	return (0);
1783 }
1784 
1785 /*
1786  * unm_nic_ioctl ()    We provide the tcl/phanmon support
1787  * through these ioctls.
1788  */
1789 static void
1790 unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q, mblk_t *mp)
1791 {
1792 	void *ptr;
1793 
1794 	switch (cmd) {
1795 	case UNM_NIC_CMD:
1796 		(void) unm_nic_do_ioctl(adapter, q, mp);
1797 		break;
1798 
1799 	case UNM_NIC_NAME:
1800 		ptr = (void *) mp->b_cont->b_rptr;
1801 
1802 		/*
1803 		 * Phanmon checks for "UNM-UNM" string
1804 		 * Replace the hardcoded value with appropriate macro
1805 		 */
1806 		DPRINTF(-1, (CE_CONT, "UNM_NIC_NAME ioctl executed %d %d\n",
1807 		    cmd, __LINE__));
1808 		(void) memcpy(ptr, "UNM-UNM", 10);
1809 		miocack(q, mp, 10, 0);
1810 		break;
1811 
1812 	default:
1813 		cmn_err(CE_WARN, "Netxen ioctl cmd %x not supported\n", cmd);
1814 
1815 		miocnak(q, mp, 0, EINVAL);
1816 		break;
1817 	}
1818 }
1819 
1820 int
1821 unm_nic_resume(unm_adapter *adapter)
1822 {
1823 
1824 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1825 	    (void *) adapter, 50000);
1826 
1827 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1828 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
1829 	else
1830 		(void) ddi_intr_enable(adapter->intr_handle);
1831 	UNM_READ_LOCK(&adapter->adapter_lock);
1832 	unm_nic_enable_int(adapter);
1833 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1834 
1835 	mac_link_update(adapter->mach, LINK_STATE_UP);
1836 
1837 	return (DDI_SUCCESS);
1838 }
1839 
1840 int
1841 unm_nic_suspend(unm_adapter *adapter)
1842 {
1843 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
1844 
1845 	(void) untimeout(adapter->watchdog_timer);
1846 
1847 	UNM_READ_LOCK(&adapter->adapter_lock);
1848 	unm_nic_disable_int(adapter);
1849 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1850 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1851 		(void) ddi_intr_block_disable(&adapter->intr_handle, 1);
1852 	else
1853 		(void) ddi_intr_disable(adapter->intr_handle);
1854 
1855 	return (DDI_SUCCESS);
1856 }
1857 
1858 static int
1859 unm_nic_do_ioctl(unm_adapter *adapter, queue_t *wq, mblk_t *mp)
1860 {
1861 	unm_nic_ioctl_data_t		data;
1862 	struct unm_nic_ioctl_data	*up_data;
1863 	ddi_acc_handle_t		conf_handle;
1864 	int				retval = 0;
1865 	unsigned int			efuse_chip_id;
1866 	char				*ptr1;
1867 	short				*ptr2;
1868 	int				*ptr4;
1869 
1870 	up_data = (struct unm_nic_ioctl_data *)(mp->b_cont->b_rptr);
1871 	(void) memcpy(&data, (void **)(uintptr_t)(mp->b_cont->b_rptr),
1872 	    sizeof (data));
1873 
1874 	/* Shouldn't access beyond legal limits of  "char u[64];" member */
1875 	if (data.size > sizeof (data.uabc)) {
1876 		/* evil user tried to crash the kernel */
1877 		cmn_err(CE_WARN, "bad size: %d\n", data.size);
1878 		retval = GLD_BADARG;
1879 		goto error_out;
1880 	}
1881 
1882 	switch (data.cmd) {
1883 	case unm_nic_cmd_pci_read:
1884 
1885 		if ((retval = adapter->unm_nic_hw_read_ioctl(adapter,
1886 		    data.off, up_data, data.size))) {
1887 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_read_wx "
1888 		    "returned %d\n", __FUNCTION__, __LINE__, retval));
1889 
1890 			retval = data.rv;
1891 			goto error_out;
1892 		}
1893 
1894 		data.rv = 0;
1895 		break;
1896 
1897 	case unm_nic_cmd_pci_write:
1898 		if ((data.rv = adapter->unm_nic_hw_write_ioctl(adapter,
1899 		    data.off, &(data.uabc), data.size))) {
1900 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_write_wx "
1901 			    "returned %d\n", __FUNCTION__,
1902 			    __LINE__, data.rv));
1903 			retval = data.rv;
1904 			goto error_out;
1905 		}
1906 		data.size = 0;
1907 		break;
1908 
1909 	case unm_nic_cmd_pci_mem_read:
1910 		if ((data.rv = adapter->unm_nic_pci_mem_read(adapter,
1911 		    data.off, up_data, data.size))) {
1912 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_pci_mem_read "
1913 			    "returned %d\n", __FUNCTION__,
1914 			    __LINE__, data.rv));
1915 			retval = data.rv;
1916 			goto error_out;
1917 		}
1918 		data.rv = 0;
1919 		break;
1920 
1921 	case unm_nic_cmd_pci_mem_write:
1922 		if ((data.rv = adapter->unm_nic_pci_mem_write(adapter,
1923 		    data.off, &(data.uabc), data.size))) {
1924 			DPRINTF(-1, (CE_WARN,
1925 			    "%s(%d) unm_nic_cmd_pci_mem_write "
1926 			    "returned %d\n",
1927 			    __FUNCTION__, __LINE__, data.rv));
1928 			retval = data.rv;
1929 			goto error_out;
1930 		}
1931 
1932 		data.size = 0;
1933 		data.rv = 0;
1934 		break;
1935 
1936 	case unm_nic_cmd_pci_config_read:
1937 
1938 		if (adapter->pci_cfg_handle != NULL) {
1939 			conf_handle = adapter->pci_cfg_handle;
1940 
1941 		} else if ((retval = pci_config_setup(adapter->dip,
1942 		    &conf_handle)) != DDI_SUCCESS) {
1943 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1944 			    " error:%d\n", unm_nic_driver_name, retval));
1945 			goto error_out;
1946 
1947 		} else
1948 			adapter->pci_cfg_handle = conf_handle;
1949 
1950 		switch (data.size) {
1951 		case 1:
1952 			ptr1 = (char *)up_data;
1953 			*ptr1 = (char)pci_config_get8(conf_handle, data.off);
1954 			break;
1955 		case 2:
1956 			ptr2 = (short *)up_data;
1957 			*ptr2 = (short)pci_config_get16(conf_handle, data.off);
1958 			break;
1959 		case 4:
1960 			ptr4 = (int *)up_data;
1961 			*ptr4 = (int)pci_config_get32(conf_handle, data.off);
1962 			break;
1963 		}
1964 
1965 		break;
1966 
1967 	case unm_nic_cmd_pci_config_write:
1968 
1969 		if (adapter->pci_cfg_handle != NULL) {
1970 			conf_handle = adapter->pci_cfg_handle;
1971 		} else if ((retval = pci_config_setup(adapter->dip,
1972 		    &conf_handle)) != DDI_SUCCESS) {
1973 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1974 			    " error:%d\n", unm_nic_driver_name, retval));
1975 			goto error_out;
1976 		} else {
1977 			adapter->pci_cfg_handle = conf_handle;
1978 		}
1979 
1980 		switch (data.size) {
1981 		case 1:
1982 			pci_config_put8(conf_handle,
1983 			    data.off, *(char *)&(data.uabc));
1984 			break;
1985 		case 2:
1986 			pci_config_put16(conf_handle,
1987 			    data.off, *(short *)(uintptr_t)&(data.uabc));
1988 			break;
1989 		case 4:
1990 			pci_config_put32(conf_handle,
1991 			    data.off, *(u32 *)(uintptr_t)&(data.uabc));
1992 			break;
1993 		}
1994 		data.size = 0;
1995 		break;
1996 
1997 	case unm_nic_cmd_get_stats:
1998 		data.rv = adapter->unm_nic_fill_statistics(adapter,
1999 		    (struct unm_statistics *)up_data);
2000 		data.size = sizeof (struct unm_statistics);
2001 
2002 		break;
2003 
2004 	case unm_nic_cmd_clear_stats:
2005 		data.rv = adapter->unm_nic_clear_statistics(adapter);
2006 		break;
2007 
2008 	case unm_nic_cmd_get_version:
2009 		(void) memcpy(up_data, UNM_NIC_VERSIONID,
2010 		    sizeof (UNM_NIC_VERSIONID));
2011 		data.size = sizeof (UNM_NIC_VERSIONID);
2012 
2013 		break;
2014 
2015 	case unm_nic_cmd_get_phy_type:
2016 		cmn_err(CE_WARN, "unm_nic_cmd_get_phy_type unimplemented\n");
2017 		break;
2018 
2019 	case unm_nic_cmd_efuse_chip_id:
2020 		efuse_chip_id = adapter->unm_nic_pci_read_normalize(adapter,
2021 		    UNM_EFUSE_CHIP_ID);
2022 		(void) memcpy(up_data, &efuse_chip_id, sizeof (unsigned long));
2023 		data.rv = 0;
2024 		break;
2025 
2026 	default:
2027 		cmn_err(CE_WARN, "%s%d: bad command %d\n", adapter->name,
2028 		    adapter->instance, data.cmd);
2029 		data.rv = GLD_NOTSUPPORTED;
2030 		data.size = 0;
2031 		goto error_out;
2032 	}
2033 
2034 work_done:
2035 	miocack(wq, mp, data.size, data.rv);
2036 	return (DDI_SUCCESS);
2037 
2038 error_out:
2039 	cmn_err(CE_WARN, "%s(%d) ioctl error\n", __FUNCTION__, data.cmd);
2040 	miocnak(wq, mp, 0, EINVAL);
2041 	return (retval);
2042 }
2043 
2044 /*
2045  * Local datatype for defining tables of (Offset, Name) pairs
2046  */
2047 typedef struct {
2048 	offset_t	index;
2049 	char		*name;
2050 } unm_ksindex_t;
2051 
2052 static const unm_ksindex_t unm_kstat[] = {
2053 	{ 0,		"freehdls"		},
2054 	{ 1,		"freecmds"		},
2055 	{ 2,		"tx_bcopy_threshold"	},
2056 	{ 3,		"rx_bcopy_threshold"	},
2057 	{ 4,		"xmitcalled"		},
2058 	{ 5,		"xmitedframes"		},
2059 	{ 6,		"xmitfinished"		},
2060 	{ 7,		"txbytes"		},
2061 	{ 8,		"txcopyed"		},
2062 	{ 9,		"txmapped"		},
2063 	{ 10,		"outoftxdmahdl"		},
2064 	{ 11,		"outofcmddesc"		},
2065 	{ 12,		"txdropped"		},
2066 	{ 13,		"polled"		},
2067 	{ 14,		"uphappy"		},
2068 	{ 15,		"updropped"		},
2069 	{ 16,		"csummed"		},
2070 	{ 17,		"no_rcv"		},
2071 	{ 18,		"rxbytes"		},
2072 	{ 19,		"rxcopyed"		},
2073 	{ 20,		"rxmapped"		},
2074 	{ 21,		"desballocfailed"	},
2075 	{ 22,		"outofrxbuf"		},
2076 	{ 23,		"promiscmode"		},
2077 	{ 24,		"rxbufshort"		},
2078 	{ 25,		"allocbfailed"		},
2079 	{ -1,		NULL			}
2080 };
2081 
2082 static int
2083 unm_kstat_update(kstat_t *ksp, int flag)
2084 {
2085 	unm_adapter *adapter;
2086 	kstat_named_t *knp;
2087 
2088 	if (flag != KSTAT_READ)
2089 		return (EACCES);
2090 
2091 	adapter = ksp->ks_private;
2092 	knp = ksp->ks_data;
2093 
2094 	(knp++)->value.ui32 = adapter->freehdls;
2095 	(knp++)->value.ui64 = adapter->freecmds;
2096 	(knp++)->value.ui64 = adapter->tx_bcopy_threshold;
2097 	(knp++)->value.ui64 = adapter->rx_bcopy_threshold;
2098 
2099 	(knp++)->value.ui64 = adapter->stats.xmitcalled;
2100 	(knp++)->value.ui64 = adapter->stats.xmitedframes;
2101 	(knp++)->value.ui64 = adapter->stats.xmitfinished;
2102 	(knp++)->value.ui64 = adapter->stats.txbytes;
2103 	(knp++)->value.ui64 = adapter->stats.txcopyed;
2104 	(knp++)->value.ui64 = adapter->stats.txmapped;
2105 	(knp++)->value.ui64 = adapter->stats.outoftxdmahdl;
2106 	(knp++)->value.ui64 = adapter->stats.outofcmddesc;
2107 	(knp++)->value.ui64 = adapter->stats.txdropped;
2108 	(knp++)->value.ui64 = adapter->stats.polled;
2109 	(knp++)->value.ui64 = adapter->stats.uphappy;
2110 	(knp++)->value.ui64 = adapter->stats.updropped;
2111 	(knp++)->value.ui64 = adapter->stats.csummed;
2112 	(knp++)->value.ui64 = adapter->stats.no_rcv;
2113 	(knp++)->value.ui64 = adapter->stats.rxbytes;
2114 	(knp++)->value.ui64 = adapter->stats.rxcopyed;
2115 	(knp++)->value.ui64 = adapter->stats.rxmapped;
2116 	(knp++)->value.ui64 = adapter->stats.desballocfailed;
2117 	(knp++)->value.ui64 = adapter->stats.outofrxbuf;
2118 	(knp++)->value.ui64 = adapter->stats.promiscmode;
2119 	(knp++)->value.ui64 = adapter->stats.rxbufshort;
2120 	(knp++)->value.ui64 = adapter->stats.allocbfailed;
2121 
2122 	return (0);
2123 }
2124 
2125 static kstat_t *
2126 unm_setup_named_kstat(unm_adapter *adapter, int instance, char *name,
2127 	const unm_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
2128 {
2129 	kstat_t *ksp;
2130 	kstat_named_t *knp;
2131 	char *np;
2132 	int type;
2133 	int count = 0;
2134 
2135 	size /= sizeof (unm_ksindex_t);
2136 	ksp = kstat_create(unm_nic_driver_name, instance, name, "net",
2137 	    KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT);
2138 	if (ksp == NULL)
2139 		return (NULL);
2140 
2141 	ksp->ks_private = adapter;
2142 	ksp->ks_update = update;
2143 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
2144 		count++;
2145 		switch (*np) {
2146 		default:
2147 			type = KSTAT_DATA_UINT64;
2148 			break;
2149 		case '%':
2150 			np += 1;
2151 			type = KSTAT_DATA_UINT32;
2152 			break;
2153 		case '$':
2154 			np += 1;
2155 			type = KSTAT_DATA_STRING;
2156 			break;
2157 		case '&':
2158 			np += 1;
2159 			type = KSTAT_DATA_CHAR;
2160 			break;
2161 		}
2162 		kstat_named_init(knp, np, type);
2163 	}
2164 	kstat_install(ksp);
2165 
2166 	return (ksp);
2167 }
2168 
2169 void
2170 unm_init_kstats(unm_adapter* adapter, int instance)
2171 {
2172 	adapter->kstats[0] = unm_setup_named_kstat(adapter,
2173 	    instance, "kstatinfo", unm_kstat,
2174 	    sizeof (unm_kstat), unm_kstat_update);
2175 }
2176 
2177 void
2178 unm_fini_kstats(unm_adapter* adapter)
2179 {
2180 
2181 	if (adapter->kstats[0] != NULL) {
2182 			kstat_delete(adapter->kstats[0]);
2183 			adapter->kstats[0] = NULL;
2184 		}
2185 }
2186 
2187 static int
2188 unm_nic_set_pauseparam(unm_adapter *adapter, unm_pauseparam_t *pause)
2189 {
2190 	int ret = 0;
2191 
2192 	if (adapter->ahw.board_type == UNM_NIC_GBE) {
2193 		if (unm_niu_gbe_set_rx_flow_ctl(adapter, pause->rx_pause))
2194 			ret = -EIO;
2195 
2196 		if (unm_niu_gbe_set_tx_flow_ctl(adapter, pause->tx_pause))
2197 			ret = -EIO;
2198 
2199 	} else if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2200 		if (unm_niu_xg_set_tx_flow_ctl(adapter, pause->tx_pause))
2201 			ret =  -EIO;
2202 	} else
2203 		ret = -EIO;
2204 
2205 	return (ret);
2206 }
2207 
2208 /*
2209  *
2210  * GLD/MAC interfaces
2211  *
2212  */
2213 
2214 static int
2215 ntxn_m_start(void *arg)
2216 {
2217 	unm_adapter	*adapter = arg;
2218 	int		ring;
2219 
2220 	UNM_SPIN_LOCK(&adapter->lock);
2221 	if (adapter->is_up == UNM_ADAPTER_UP_MAGIC) {
2222 		UNM_SPIN_UNLOCK(&adapter->lock);
2223 		return (DDI_SUCCESS);
2224 	}
2225 
2226 	if (init_firmware(adapter) != DDI_SUCCESS) {
2227 		UNM_SPIN_UNLOCK(&adapter->lock);
2228 		cmn_err(CE_WARN, "%s%d: Failed to init firmware\n",
2229 		    adapter->name, adapter->instance);
2230 		return (DDI_FAILURE);
2231 	}
2232 
2233 	unm_nic_clear_stats(adapter);
2234 
2235 	if (unm_nic_hw_resources(adapter) != 0) {
2236 		UNM_SPIN_UNLOCK(&adapter->lock);
2237 		cmn_err(CE_WARN, "%s%d: Error setting hw resources\n",
2238 		    adapter->name, adapter->instance);
2239 		return (DDI_FAILURE);
2240 	}
2241 
2242 	if (adapter->fw_major < 4) {
2243 		adapter->crb_addr_cmd_producer =
2244 		    crb_cmd_producer[adapter->portnum];
2245 		adapter->crb_addr_cmd_consumer =
2246 		    crb_cmd_consumer[adapter->portnum];
2247 		unm_nic_update_cmd_producer(adapter, 0);
2248 		unm_nic_update_cmd_consumer(adapter, 0);
2249 	}
2250 
2251 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
2252 		if (unm_post_rx_buffers(adapter, ring) != DDI_SUCCESS) {
2253 			/* TODO: clean up */
2254 			UNM_SPIN_UNLOCK(&adapter->lock);
2255 			return (DDI_FAILURE);
2256 		}
2257 	}
2258 
2259 	if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) {
2260 		UNM_SPIN_UNLOCK(&adapter->lock);
2261 		cmn_err(CE_WARN, "%s%d: Could not set mac address\n",
2262 		    adapter->name, adapter->instance);
2263 		return (DDI_FAILURE);
2264 	}
2265 
2266 	if (unm_nic_init_port(adapter) != 0) {
2267 		UNM_SPIN_UNLOCK(&adapter->lock);
2268 		cmn_err(CE_WARN, "%s%d: Could not initialize port\n",
2269 		    adapter->name, adapter->instance);
2270 		return (DDI_FAILURE);
2271 	}
2272 
2273 	unm_nic_set_link_parameters(adapter);
2274 
2275 	/*
2276 	 * P2 and P3 should be handled similarly.
2277 	 */
2278 	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
2279 		if (unm_nic_set_promisc_mode(adapter) != 0) {
2280 			UNM_SPIN_UNLOCK(&adapter->lock);
2281 			cmn_err(CE_WARN, "%s%d: Could not set promisc mode\n",
2282 			    adapter->name, adapter->instance);
2283 			return (DDI_FAILURE);
2284 		}
2285 	} else {
2286 		nx_p3_nic_set_multi(adapter);
2287 	}
2288 	adapter->stats.promiscmode = 1;
2289 
2290 	if (unm_nic_set_mtu(adapter, adapter->mtu) != 0) {
2291 		UNM_SPIN_UNLOCK(&adapter->lock);
2292 		cmn_err(CE_WARN, "%s%d: Could not set mtu\n",
2293 		    adapter->name, adapter->instance);
2294 		return (DDI_FAILURE);
2295 	}
2296 
2297 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
2298 	    (void *)adapter, 0);
2299 
2300 	adapter->is_up = UNM_ADAPTER_UP_MAGIC;
2301 
2302 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
2303 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
2304 	else
2305 		(void) ddi_intr_enable(adapter->intr_handle);
2306 	unm_nic_enable_int(adapter);
2307 
2308 	UNM_SPIN_UNLOCK(&adapter->lock);
2309 	return (GLD_SUCCESS);
2310 }
2311 
2312 
2313 /*
2314  * This code is kept here for reference so as to
2315  * see if something different is required to be done
2316  * in GLDV3. This will be deleted later.
2317  */
2318 /* ARGSUSED */
2319 static void
2320 ntxn_m_stop(void *arg)
2321 {
2322 }
2323 
2324 /*ARGSUSED*/
2325 static int
2326 ntxn_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
2327 {
2328 	/*
2329 	 * When we correctly implement this, invoke nx_p3_nic_set_multi()
2330 	 * or nx_p2_nic_set_multi() here.
2331 	 */
2332 	return (GLD_SUCCESS);
2333 }
2334 
2335 /*ARGSUSED*/
2336 static int
2337 ntxn_m_promisc(void *arg, boolean_t on)
2338 {
2339 #if 0
2340 	int err = 0;
2341 	struct unm_adapter_s *adapter = arg;
2342 
2343 	err = on ? unm_nic_set_promisc_mode(adapter) :
2344 	    unm_nic_unset_promisc_mode(adapter);
2345 
2346 	if (err)
2347 		return (GLD_FAILURE);
2348 #endif
2349 
2350 	return (GLD_SUCCESS);
2351 }
2352 
2353 static int
2354 ntxn_m_stat(void *arg, uint_t stat, uint64_t *val)
2355 {
2356 	struct unm_adapter_s		*adapter = arg;
2357 	struct unm_adapter_stats	*portstat = &adapter->stats;
2358 
2359 	switch (stat) {
2360 	case MAC_STAT_IFSPEED:
2361 		if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2362 			/* 10 Gigs */
2363 			*val = 10000000000ULL;
2364 		} else {
2365 			/* 1 Gig */
2366 			*val = 1000000000;
2367 		}
2368 		break;
2369 
2370 	case MAC_STAT_MULTIRCV:
2371 		*val = 0;
2372 		break;
2373 
2374 	case MAC_STAT_BRDCSTRCV:
2375 	case MAC_STAT_BRDCSTXMT:
2376 		*val = 0;
2377 		break;
2378 
2379 	case MAC_STAT_NORCVBUF:
2380 		*val = portstat->updropped;
2381 		break;
2382 
2383 	case MAC_STAT_NOXMTBUF:
2384 		*val = portstat->txdropped;
2385 		break;
2386 
2387 	case MAC_STAT_RBYTES:
2388 		*val = portstat->rxbytes;
2389 		break;
2390 
2391 	case MAC_STAT_OBYTES:
2392 		*val = portstat->txbytes;
2393 		break;
2394 
2395 	case MAC_STAT_OPACKETS:
2396 		*val = portstat->xmitedframes;
2397 		break;
2398 
2399 	case MAC_STAT_IPACKETS:
2400 		*val = portstat->uphappy;
2401 		break;
2402 
2403 	case MAC_STAT_OERRORS:
2404 		*val = portstat->xmitcalled - portstat->xmitedframes;
2405 		break;
2406 
2407 	case ETHER_STAT_LINK_DUPLEX:
2408 		*val = LINK_DUPLEX_FULL;
2409 		break;
2410 
2411 	default:
2412 		/*
2413 		 * Shouldn't reach here...
2414 		 */
2415 		*val = 0;
2416 		DPRINTF(0, (CE_WARN, ": unrecognized parameter = %d, value "
2417 		    "returned 1\n", stat));
2418 
2419 	}
2420 
2421 	return (0);
2422 }
2423 
2424 static int
2425 ntxn_m_unicst(void *arg, const uint8_t *mac)
2426 {
2427 	struct unm_adapter_s *adapter = arg;
2428 
2429 	DPRINTF(-1, (CE_CONT, "%s: called\n", __func__));
2430 
2431 	if (unm_nic_macaddr_set(adapter, (uint8_t *)mac))
2432 		return (EAGAIN);
2433 	bcopy(mac, adapter->mac_addr, ETHERADDRL);
2434 
2435 	return (0);
2436 }
2437 
2438 static mblk_t *
2439 ntxn_m_tx(void *arg, mblk_t *mp)
2440 {
2441 	unm_adapter *adapter = arg;
2442 	mblk_t *next;
2443 
2444 	while (mp != NULL) {
2445 		next = mp->b_next;
2446 		mp->b_next = NULL;
2447 
2448 		if (unm_nic_xmit_frame(adapter, mp) != B_TRUE) {
2449 			mp->b_next = next;
2450 			break;
2451 		}
2452 		mp = next;
2453 		adapter->stats.xmitedframes++;
2454 	}
2455 
2456 	return (mp);
2457 }
2458 
2459 static void
2460 ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2461 {
2462 	int		cmd;
2463 	struct iocblk   *iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
2464 	struct unm_adapter_s *adapter = (struct unm_adapter_s *)arg;
2465 	enum ioc_reply status = IOC_DONE;
2466 
2467 	iocp->ioc_error = 0;
2468 	cmd = iocp->ioc_cmd;
2469 
2470 	if (cmd == ND_GET || cmd == ND_SET) {
2471 		status = unm_nd_ioctl(adapter, wq, mp, iocp);
2472 		switch (status) {
2473 		default:
2474 		case IOC_INVAL:
2475 			miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2476 			    EINVAL : iocp->ioc_error);
2477 			break;
2478 
2479 		case IOC_DONE:
2480 			break;
2481 
2482 		case IOC_RESTART_ACK:
2483 		case IOC_ACK:
2484 			miocack(wq, mp, 0, 0);
2485 			break;
2486 
2487 		case IOC_RESTART_REPLY:
2488 		case IOC_REPLY:
2489 			mp->b_datap->db_type = iocp->ioc_error == 0 ?
2490 			    M_IOCACK : M_IOCNAK;
2491 			qreply(wq, mp);
2492 			break;
2493 		}
2494 	} else if (cmd <= UNM_NIC_NAME && cmd >= UNM_CMD_START) {
2495 		unm_nic_ioctl(adapter, cmd, wq, mp);
2496 		return;
2497 	} else {
2498 		miocnak(wq, mp, 0, EINVAL);
2499 		return;
2500 	}
2501 }
2502 
2503 /* ARGSUSED */
2504 static boolean_t
2505 ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2506 {
2507 	switch (cap) {
2508 	case MAC_CAPAB_HCKSUM:
2509 		{
2510 			uint32_t *txflags = cap_data;
2511 
2512 			*txflags = (HCKSUM_ENABLE |
2513 			    HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM);
2514 		}
2515 		break;
2516 
2517 	case MAC_CAPAB_POLL:
2518 	case MAC_CAPAB_MULTIADDRESS:
2519 	default:
2520 		return (B_FALSE);
2521 	}
2522 
2523 	return (B_TRUE);
2524 }
2525 
2526 #define	NETXEN_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
2527 
2528 static mac_callbacks_t ntxn_m_callbacks = {
2529 	NETXEN_M_CALLBACK_FLAGS,
2530 	ntxn_m_stat,
2531 	ntxn_m_start,
2532 	ntxn_m_stop,
2533 	ntxn_m_promisc,
2534 	ntxn_m_multicst,
2535 	ntxn_m_unicst,
2536 	ntxn_m_tx,
2537 	NULL,			/* mc_resources */
2538 	ntxn_m_ioctl,
2539 	ntxn_m_getcapab,
2540 	NULL,			/* mc_open */
2541 	NULL,			/* mc_close */
2542 	NULL,			/* mc_setprop */
2543 	NULL			/* mc_getprop */
2544 };
2545 
2546 int
2547 unm_register_mac(unm_adapter *adapter)
2548 {
2549 	int ret;
2550 	mac_register_t *macp;
2551 	unm_pauseparam_t pause;
2552 
2553 	dev_info_t *dip = adapter->dip;
2554 
2555 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2556 		cmn_err(CE_WARN, "Memory not available\n");
2557 		return (DDI_FAILURE);
2558 	}
2559 
2560 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2561 	macp->m_driver = adapter;
2562 	macp->m_dip = dip;
2563 	macp->m_instance = adapter->instance;
2564 	macp->m_src_addr = adapter->mac_addr;
2565 	macp->m_callbacks = &ntxn_m_callbacks;
2566 	macp->m_min_sdu = 0;
2567 	macp->m_max_sdu = adapter->mtu;
2568 #ifdef SOLARIS11
2569 	macp->m_margin = VLAN_TAGSZ;
2570 #endif /* SOLARIS11 */
2571 
2572 	ret = mac_register(macp, &adapter->mach);
2573 	mac_free(macp);
2574 	if (ret != 0) {
2575 		cmn_err(CE_WARN, "mac_register failed for port %d\n",
2576 		    adapter->portnum);
2577 		return (DDI_FAILURE);
2578 	}
2579 
2580 	unm_init_kstats(adapter, adapter->instance);
2581 
2582 	/* Register NDD-tweakable parameters */
2583 	if (unm_nd_init(adapter)) {
2584 		cmn_err(CE_WARN, "unm_nd_init() failed");
2585 		return (DDI_FAILURE);
2586 	}
2587 
2588 	pause.rx_pause = adapter->nd_params[PARAM_ADV_PAUSE_CAP].ndp_val;
2589 	pause.tx_pause = adapter->nd_params[PARAM_ADV_ASYM_PAUSE_CAP].ndp_val;
2590 
2591 	if (unm_nic_set_pauseparam(adapter, &pause)) {
2592 		cmn_err(CE_WARN, "\nBad Pause settings RX %d, Tx %d",
2593 		    pause.rx_pause, pause.tx_pause);
2594 	}
2595 	adapter->nd_params[PARAM_PAUSE_CAP].ndp_val = pause.rx_pause;
2596 	adapter->nd_params[PARAM_ASYM_PAUSE_CAP].ndp_val = pause.tx_pause;
2597 
2598 	return (DDI_SUCCESS);
2599 }
2600