xref: /illumos-gate/usr/src/uts/common/io/ntxn/unm_nic_main.c (revision 47842382d52f28aa3173aa6b511781c322ccb6a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 NetXen, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/conf.h>
33 #include <sys/debug.h>
34 #include <sys/stropts.h>
35 #include <sys/stream.h>
36 #include <sys/strlog.h>
37 #include <sys/kmem.h>
38 #include <sys/stat.h>
39 #include <sys/kstat.h>
40 #include <sys/vtrace.h>
41 #include <sys/dlpi.h>
42 #include <sys/strsun.h>
43 #include <sys/ethernet.h>
44 #include <sys/modctl.h>
45 #include <sys/errno.h>
46 #include <sys/dditypes.h>
47 #include <sys/ddi.h>
48 #include <sys/sunddi.h>
49 #include <sys/sysmacros.h>
50 #include <sys/pci.h>
51 
52 #include <sys/gld.h>
53 #include <netinet/in.h>
54 #include <inet/ip.h>
55 #include <inet/tcp.h>
56 
57 #include <sys/rwlock.h>
58 #include <sys/mutex.h>
59 #include <sys/pattr.h>
60 #include <sys/strsubr.h>
61 #include <sys/ddi_impldefs.h>
62 #include<sys/task.h>
63 
64 #include "unm_nic_hw.h"
65 #include "unm_nic.h"
66 
67 #include "nic_phan_reg.h"
68 #include "unm_nic_ioctl.h"
69 #include "nic_cmn.h"
70 #include "unm_version.h"
71 #include "unm_brdcfg.h"
72 
73 #if defined(lint)
74 #undef MBLKL
75 #define	MBLKL(_mp_)	((uintptr_t)(_mp_)->b_wptr - (uintptr_t)(_mp_)->b_rptr)
76 #endif /* lint */
77 
78 #undef UNM_LOOPBACK
79 #undef SINGLE_DMA_BUF
80 
81 #define	UNM_ADAPTER_UP_MAGIC	777
82 #define	VLAN_TAGSZ		0x4
83 
84 #define	index2rxbuf(_rdp_, _idx_)	((_rdp_)->rx_buf_pool + (_idx_))
85 #define	rxbuf2index(_rdp_, _bufp_)	((_bufp_) - (_rdp_)->rx_buf_pool)
86 
87 /*
88  * Receive ISR processes NX_RX_MAXBUFS incoming packets at most, then posts
89  * as many buffers as packets processed. This loop repeats as required to
90  * process all incoming packets delivered in a single interrupt. Higher
91  * value of NX_RX_MAXBUFS improves performance by posting rx buffers less
92  * frequently, but at the cost of not posting quickly enough when card is
93  * running out of rx buffers.
94  */
95 #define	NX_RX_THRESHOLD		32
96 #define	NX_RX_MAXBUFS		128
97 #define	NX_MAX_TXCOMPS		256
98 
99 extern void unm_free_tx_buffers(unm_adapter *adapter);
100 extern void unm_free_tx_dmahdl(unm_adapter *adapter);
101 extern void unm_destroy_rx_ring(unm_rcv_desc_ctx_t *rcv_desc);
102 
103 static void unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
104     uint32_t ringid);
105 static mblk_t *unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc);
106 static int unm_process_rcv_ring(unm_adapter *, int);
107 static int unm_process_cmd_ring(struct unm_adapter_s *adapter);
108 
109 static int unm_nic_do_ioctl(unm_adapter *adapter, queue_t *q, mblk_t *mp);
110 static void unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q,
111     mblk_t *mp);
112 
113 /* GLDv3 interface functions */
114 static int ntxn_m_start(void *);
115 static void ntxn_m_stop(void *);
116 static int ntxn_m_multicst(void *, boolean_t, const uint8_t *);
117 static int ntxn_m_promisc(void *, boolean_t);
118 static int ntxn_m_stat(void *arg, uint_t stat, uint64_t *val);
119 static mblk_t *ntxn_m_tx(void *, mblk_t *);
120 static void ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
121 static boolean_t ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data);
122 
123 /*
124  * Allocates DMA handle, virtual memory and binds them
125  * returns size of actual memory binded and the physical address.
126  */
127 int
128 unm_pci_alloc_consistent(unm_adapter *adapter,
129 		int size, caddr_t *address, ddi_dma_cookie_t *cookie,
130 		ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep)
131 {
132 	int			err;
133 	uint32_t		ncookies;
134 	size_t			ring_len;
135 	uint_t			dma_flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
136 
137 	*dma_handle = NULL;
138 
139 	if (size <= 0)
140 		return (DDI_ENOMEM);
141 
142 	err = ddi_dma_alloc_handle(adapter->dip,
143 	    &adapter->gc_dma_attr_desc,
144 	    DDI_DMA_DONTWAIT, NULL, dma_handle);
145 	if (err != DDI_SUCCESS) {
146 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_alloc_handle FAILED:"
147 		    " %d", unm_nic_driver_name, __func__, err);
148 		return (DDI_ENOMEM);
149 	}
150 
151 	err = ddi_dma_mem_alloc(*dma_handle,
152 	    size, &adapter->gc_attr_desc,
153 	    dma_flags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
154 	    DDI_DMA_DONTWAIT, NULL, address, &ring_len,
155 	    handlep);
156 	if (err != DDI_SUCCESS) {
157 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_mem_alloc failed:"
158 		    "ret %d, request size: %d",
159 		    unm_nic_driver_name, __func__, err, size);
160 		ddi_dma_free_handle(dma_handle);
161 		return (DDI_ENOMEM);
162 	}
163 
164 	if (ring_len < size) {
165 		cmn_err(CE_WARN, "%s: %s: could not allocate required "
166 		    "memory :%d\n", unm_nic_driver_name,
167 		    __func__, err);
168 		ddi_dma_mem_free(handlep);
169 		ddi_dma_free_handle(dma_handle);
170 		return (DDI_FAILURE);
171 	}
172 
173 	(void) memset(*address, 0, size);
174 
175 	if (((err = ddi_dma_addr_bind_handle(*dma_handle,
176 	    NULL, *address, ring_len,
177 	    dma_flags,
178 	    DDI_DMA_DONTWAIT, NULL,
179 	    cookie, &ncookies)) != DDI_DMA_MAPPED) ||
180 	    (ncookies != 1)) {
181 		cmn_err(CE_WARN,
182 		    "!%s: %s: ddi_dma_addr_bind_handle FAILED: %d",
183 		    unm_nic_driver_name, __func__, err);
184 		ddi_dma_mem_free(handlep);
185 		ddi_dma_free_handle(dma_handle);
186 		return (DDI_FAILURE);
187 	}
188 
189 	return (DDI_SUCCESS);
190 }
191 
192 /*
193  * Unbinds the memory, frees the DMA handle and at the end, frees the memory
194  */
195 void
196 unm_pci_free_consistent(ddi_dma_handle_t *dma_handle,
197     ddi_acc_handle_t *acc_handle)
198 {
199 	int err;
200 
201 	err = ddi_dma_unbind_handle(*dma_handle);
202 	if (err != DDI_SUCCESS) {
203 		cmn_err(CE_WARN, "%s: Error unbinding memory\n", __func__);
204 		return;
205 	}
206 
207 	ddi_dma_mem_free(acc_handle);
208 	ddi_dma_free_handle(dma_handle);
209 }
210 
211 static uint32_t msi_tgt_status[] = {
212     ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
213     ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
214     ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
215     ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
216 };
217 
218 static void
219 unm_nic_disable_int(unm_adapter *adapter)
220 {
221 	__uint32_t	temp = 0;
222 
223 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
224 	    &temp, 4);
225 }
226 
227 static int
228 unm_nic_clear_int(unm_adapter *adapter)
229 {
230 	uint32_t	mask, temp, our_int, status;
231 
232 	UNM_READ_LOCK(&adapter->adapter_lock);
233 
234 	/* check whether it's our interrupt */
235 	if (!UNM_IS_MSI_FAMILY(adapter)) {
236 
237 		/* Legacy Interrupt case */
238 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
239 		    &status);
240 
241 		if (!(status & adapter->legacy_intr.int_vec_bit)) {
242 			UNM_READ_UNLOCK(&adapter->adapter_lock);
243 			return (-1);
244 		}
245 
246 		if (adapter->ahw.revision_id >= NX_P3_B1) {
247 			adapter->unm_nic_pci_read_immediate(adapter,
248 			    ISR_INT_STATE_REG, &temp);
249 			if (!ISR_IS_LEGACY_INTR_TRIGGERED(temp)) {
250 				UNM_READ_UNLOCK(&adapter->adapter_lock);
251 				return (-1);
252 			}
253 		} else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
254 			our_int = adapter->unm_nic_pci_read_normalize(adapter,
255 			    CRB_INT_VECTOR);
256 
257 			/* FIXME: Assumes pci_func is same as ctx */
258 			if ((our_int & (0x80 << adapter->portnum)) == 0) {
259 				if (our_int != 0) {
260 					/* not our interrupt */
261 					UNM_READ_UNLOCK(&adapter->adapter_lock);
262 					return (-1);
263 				}
264 			}
265 			temp = our_int & ~((u32)(0x80 << adapter->portnum));
266 			adapter->unm_nic_pci_write_normalize(adapter,
267 			    CRB_INT_VECTOR, temp);
268 		}
269 
270 		if (adapter->fw_major < 4)
271 			unm_nic_disable_int(adapter);
272 
273 		/* claim interrupt */
274 		temp = 0xffffffff;
275 		adapter->unm_nic_pci_write_immediate(adapter,
276 		    adapter->legacy_intr.tgt_status_reg, &temp);
277 
278 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
279 		    &mask);
280 
281 		/*
282 		 * Read again to make sure the legacy interrupt message got
283 		 * flushed out
284 		 */
285 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
286 		    &mask);
287 	} else if (adapter->flags & UNM_NIC_MSI_ENABLED) {
288 		/* clear interrupt */
289 		temp = 0xffffffff;
290 		adapter->unm_nic_pci_write_immediate(adapter,
291 		    msi_tgt_status[adapter->ahw.pci_func], &temp);
292 	}
293 
294 	UNM_READ_UNLOCK(&adapter->adapter_lock);
295 
296 	return (0);
297 }
298 
299 static void
300 unm_nic_enable_int(unm_adapter *adapter)
301 {
302 	u32	temp = 1;
303 
304 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
305 	    &temp, 4);
306 
307 	if (!UNM_IS_MSI_FAMILY(adapter)) {
308 		u32	mask = 0xfbff;
309 
310 		adapter->unm_nic_pci_write_immediate(adapter,
311 		    adapter->legacy_intr.tgt_mask_reg, &mask);
312 	}
313 }
314 
315 static void
316 unm_free_hw_resources(unm_adapter *adapter)
317 {
318 	unm_recv_context_t *recv_ctx;
319 	unm_rcv_desc_ctx_t *rcv_desc;
320 	int ctx, ring;
321 
322 	if (adapter->context_alloced == 1) {
323 		netxen_destroy_rxtx(adapter);
324 		adapter->context_alloced = 0;
325 	}
326 
327 	if (adapter->ctxDesc != NULL) {
328 		unm_pci_free_consistent(&adapter->ctxDesc_dma_handle,
329 		    &adapter->ctxDesc_acc_handle);
330 		adapter->ctxDesc = NULL;
331 	}
332 
333 	if (adapter->ahw.cmdDescHead != NULL) {
334 		unm_pci_free_consistent(&adapter->ahw.cmd_desc_dma_handle,
335 		    &adapter->ahw.cmd_desc_acc_handle);
336 		adapter->ahw.cmdDesc_physAddr = NULL;
337 		adapter->ahw.cmdDescHead = NULL;
338 	}
339 
340 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
341 		recv_ctx = &adapter->recv_ctx[ctx];
342 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
343 			rcv_desc = &recv_ctx->rcv_desc[ring];
344 
345 			if (rcv_desc->desc_head != NULL) {
346 				unm_pci_free_consistent(
347 				    &rcv_desc->rx_desc_dma_handle,
348 				    &rcv_desc->rx_desc_acc_handle);
349 				rcv_desc->desc_head = NULL;
350 				rcv_desc->phys_addr = NULL;
351 			}
352 		}
353 
354 		if (recv_ctx->rcvStatusDescHead != NULL) {
355 			unm_pci_free_consistent(
356 			    &recv_ctx->status_desc_dma_handle,
357 			    &recv_ctx->status_desc_acc_handle);
358 			recv_ctx->rcvStatusDesc_physAddr = NULL;
359 			recv_ctx->rcvStatusDescHead = NULL;
360 		}
361 	}
362 }
363 
364 static void
365 cleanup_adapter(struct unm_adapter_s *adapter)
366 {
367 	if (adapter->cmd_buf_arr != NULL)
368 		kmem_free(adapter->cmd_buf_arr,
369 		    sizeof (struct unm_cmd_buffer) * adapter->MaxTxDescCount);
370 
371 	ddi_regs_map_free(&(adapter->regs_handle));
372 	ddi_regs_map_free(&(adapter->db_handle));
373 	kmem_free(adapter, sizeof (unm_adapter));
374 
375 }
376 
377 void
378 unm_nic_remove(unm_adapter *adapter)
379 {
380 	unm_recv_context_t *recv_ctx;
381 	unm_rcv_desc_ctx_t	*rcv_desc;
382 	int ctx, ring;
383 
384 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
385 	unm_nic_stop_port(adapter);
386 
387 	if (adapter->interrupt_crb) {
388 		UNM_READ_LOCK(&adapter->adapter_lock);
389 		unm_nic_disable_int(adapter);
390 		UNM_READ_UNLOCK(&adapter->adapter_lock);
391 	}
392 	(void) untimeout(adapter->watchdog_timer);
393 
394 	unm_free_hw_resources(adapter);
395 	unm_free_tx_buffers(adapter);
396 	unm_free_tx_dmahdl(adapter);
397 
398 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
399 		recv_ctx = &adapter->recv_ctx[ctx];
400 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
401 			rcv_desc = &recv_ctx->rcv_desc[ring];
402 			if (rcv_desc->rx_buf_pool != NULL)
403 				unm_destroy_rx_ring(rcv_desc);
404 		}
405 	}
406 
407 	if (adapter->portnum == 0)
408 		unm_free_dummy_dma(adapter);
409 
410 	unm_destroy_intr(adapter);
411 
412 	ddi_set_driver_private(adapter->dip, NULL);
413 	cleanup_adapter(adapter);
414 }
415 
416 static int
417 init_firmware(unm_adapter *adapter)
418 {
419 	uint32_t state = 0, loops = 0, tempout;
420 
421 	/* Window 1 call */
422 	UNM_READ_LOCK(&adapter->adapter_lock);
423 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_CMDPEG_STATE);
424 	UNM_READ_UNLOCK(&adapter->adapter_lock);
425 
426 	if (state == PHAN_INITIALIZE_ACK)
427 		return (0);
428 
429 	while (state != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
430 		drv_usecwait(100);
431 		/* Window 1 call */
432 		UNM_READ_LOCK(&adapter->adapter_lock);
433 		state = adapter->unm_nic_pci_read_normalize(adapter,
434 		    CRB_CMDPEG_STATE);
435 		UNM_READ_UNLOCK(&adapter->adapter_lock);
436 		loops++;
437 	}
438 
439 	if (loops >= 200000) {
440 		cmn_err(CE_WARN, "%s%d: CmdPeg init incomplete:%x\n",
441 		    adapter->name, adapter->instance, state);
442 		return (-EIO);
443 	}
444 
445 	/* Window 1 call */
446 	UNM_READ_LOCK(&adapter->adapter_lock);
447 	tempout = INTR_SCHEME_PERPORT;
448 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_CAPABILITIES_HOST,
449 	    &tempout, 4);
450 	tempout = MSI_MODE_MULTIFUNC;
451 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_MSI_MODE_HOST,
452 	    &tempout, 4);
453 	tempout = MPORT_MULTI_FUNCTION_MODE;
454 	adapter->unm_nic_hw_write_wx(adapter, CRB_MPORT_MODE, &tempout, 4);
455 	tempout = PHAN_INITIALIZE_ACK;
456 	adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, &tempout, 4);
457 	UNM_READ_UNLOCK(&adapter->adapter_lock);
458 
459 	return (0);
460 }
461 
462 /*
463  * Utility to synchronize with receive peg.
464  *  Returns   0 on sucess
465  *         -EIO on error
466  */
467 int
468 receive_peg_ready(struct unm_adapter_s *adapter)
469 {
470 	uint32_t state = 0;
471 	int loops = 0, err = 0;
472 
473 	/* Window 1 call */
474 	UNM_READ_LOCK(&adapter->adapter_lock);
475 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_RCVPEG_STATE);
476 	UNM_READ_UNLOCK(&adapter->adapter_lock);
477 
478 	while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 20000)) {
479 		drv_usecwait(100);
480 		/* Window 1 call */
481 
482 		UNM_READ_LOCK(&adapter->adapter_lock);
483 		state = adapter->unm_nic_pci_read_normalize(adapter,
484 		    CRB_RCVPEG_STATE);
485 		UNM_READ_UNLOCK(&adapter->adapter_lock);
486 
487 		loops++;
488 	}
489 
490 	if (loops >= 20000) {
491 		cmn_err(CE_WARN, "Receive Peg initialization incomplete 0x%x\n",
492 		    state);
493 		err = -EIO;
494 	}
495 
496 	return (err);
497 }
498 
499 /*
500  * check if the firmware has been downloaded and ready to run  and
501  * setup the address for the descriptors in the adapter
502  */
503 static int
504 unm_nic_hw_resources(unm_adapter *adapter)
505 {
506 	hardware_context	*hw = &adapter->ahw;
507 	void			*addr;
508 	int			err;
509 	int			ctx, ring;
510 	unm_recv_context_t	*recv_ctx;
511 	unm_rcv_desc_ctx_t	*rcv_desc;
512 	ddi_dma_cookie_t	cookie;
513 	int			size;
514 
515 	if (err = receive_peg_ready(adapter))
516 		return (err);
517 
518 	size = (sizeof (RingContext) + sizeof (uint32_t));
519 
520 	err = unm_pci_alloc_consistent(adapter,
521 	    size, (caddr_t *)&addr, &cookie,
522 	    &adapter->ctxDesc_dma_handle,
523 	    &adapter->ctxDesc_acc_handle);
524 	if (err != DDI_SUCCESS) {
525 		cmn_err(CE_WARN, "Failed to allocate HW context\n");
526 		return (err);
527 	}
528 
529 	adapter->ctxDesc_physAddr = cookie.dmac_laddress;
530 
531 	(void) memset(addr, 0, sizeof (RingContext));
532 
533 	adapter->ctxDesc = (RingContext *) addr;
534 	adapter->ctxDesc->CtxId = adapter->portnum;
535 	adapter->ctxDesc->CMD_CONSUMER_OFFSET =
536 	    adapter->ctxDesc_physAddr + sizeof (RingContext);
537 	adapter->cmdConsumer =
538 	    (uint32_t *)(uintptr_t)(((char *)addr) + sizeof (RingContext));
539 
540 	ASSERT(!((unsigned long)adapter->ctxDesc_physAddr & 0x3f));
541 
542 	/*
543 	 * Allocate command descriptor ring.
544 	 */
545 	size = (sizeof (cmdDescType0_t) * adapter->MaxTxDescCount);
546 	err = unm_pci_alloc_consistent(adapter,
547 	    size, (caddr_t *)&addr, &cookie,
548 	    &hw->cmd_desc_dma_handle,
549 	    &hw->cmd_desc_acc_handle);
550 	if (err != DDI_SUCCESS) {
551 		cmn_err(CE_WARN, "Failed to allocate cmd desc ring\n");
552 		return (err);
553 	}
554 
555 	hw->cmdDesc_physAddr = cookie.dmac_laddress;
556 	hw->cmdDescHead = (cmdDescType0_t *)addr;
557 
558 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
559 		recv_ctx = &adapter->recv_ctx[ctx];
560 
561 		size = (sizeof (statusDesc_t)* adapter->MaxRxDescCount);
562 		err = unm_pci_alloc_consistent(adapter,
563 		    size, (caddr_t *)&addr,
564 		    &recv_ctx->status_desc_dma_cookie,
565 		    &recv_ctx->status_desc_dma_handle,
566 		    &recv_ctx->status_desc_acc_handle);
567 		if (err != DDI_SUCCESS) {
568 			cmn_err(CE_WARN, "Failed to allocate sts desc ring\n");
569 			goto free_cmd_desc;
570 		}
571 
572 		(void) memset(addr, 0, size);
573 		recv_ctx->rcvStatusDesc_physAddr =
574 		    recv_ctx->status_desc_dma_cookie.dmac_laddress;
575 		recv_ctx->rcvStatusDescHead = (statusDesc_t *)addr;
576 
577 		/* rds rings */
578 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
579 			rcv_desc = &recv_ctx->rcv_desc[ring];
580 
581 			size = (sizeof (rcvDesc_t) * adapter->MaxRxDescCount);
582 			err = unm_pci_alloc_consistent(adapter,
583 			    size, (caddr_t *)&addr,
584 			    &rcv_desc->rx_desc_dma_cookie,
585 			    &rcv_desc->rx_desc_dma_handle,
586 			    &rcv_desc->rx_desc_acc_handle);
587 			if (err != DDI_SUCCESS) {
588 				cmn_err(CE_WARN, "Failed to allocate "
589 				    "rx desc ring %d\n", ring);
590 				goto free_status_desc;
591 			}
592 
593 			rcv_desc->phys_addr =
594 			    rcv_desc->rx_desc_dma_cookie.dmac_laddress;
595 			rcv_desc->desc_head = (rcvDesc_t *)addr;
596 		}
597 	}
598 
599 	if (err = netxen_create_rxtx(adapter))
600 		goto free_statusrx_desc;
601 	adapter->context_alloced = 1;
602 
603 	return (DDI_SUCCESS);
604 
605 free_statusrx_desc:
606 free_status_desc:
607 free_cmd_desc:
608 	unm_free_hw_resources(adapter);
609 
610 	return (err);
611 }
612 
613 void unm_desc_dma_sync(ddi_dma_handle_t handle, uint_t start, uint_t count,
614     uint_t range, uint_t unit_size, uint_t direction)
615 {
616 	if ((start + count) < range) {
617 		(void) ddi_dma_sync(handle, start * unit_size,
618 		    count * unit_size, direction);
619 	} else {
620 		(void) ddi_dma_sync(handle, start * unit_size, 0, direction);
621 		(void) ddi_dma_sync(handle, 0,
622 		    (start + count - range) * unit_size, DDI_DMA_SYNC_FORCPU);
623 	}
624 }
625 
626 static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET,
627     CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2,
628     CRB_CMD_PRODUCER_OFFSET_3 };
629 
630 static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET,
631     CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2,
632     CRB_CMD_CONSUMER_OFFSET_3 };
633 
634 void
635 unm_nic_update_cmd_producer(struct unm_adapter_s *adapter,
636     uint32_t crb_producer)
637 {
638 	int data = crb_producer;
639 
640 	if (adapter->crb_addr_cmd_producer) {
641 		UNM_READ_LOCK(&adapter->adapter_lock);
642 		adapter->unm_nic_hw_write_wx(adapter,
643 		    adapter->crb_addr_cmd_producer, &data, 4);
644 		UNM_READ_UNLOCK(&adapter->adapter_lock);
645 	}
646 }
647 
648 static void
649 unm_nic_update_cmd_consumer(struct unm_adapter_s *adapter,
650     uint32_t crb_producer)
651 {
652 	int data = crb_producer;
653 
654 	if (adapter->crb_addr_cmd_consumer)
655 		adapter->unm_nic_hw_write_wx(adapter,
656 		    adapter->crb_addr_cmd_consumer, &data, 4);
657 }
658 
659 /*
660  * Looks for type of packet and sets opcode accordingly
661  * so that checksum offload can be used.
662  */
663 static void
664 unm_tx_csum(cmdDescType0_t *desc, mblk_t *mp, pktinfo_t *pktinfo)
665 {
666 	if (pktinfo->mac_hlen == sizeof (struct ether_vlan_header))
667 		desc->u1.s1.flags = FLAGS_VLAN_TAGGED;
668 
669 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
670 		uint32_t	start, flags;
671 
672 		hcksum_retrieve(mp, NULL, NULL, &start, NULL, NULL, NULL,
673 		    &flags);
674 		if ((flags & (HCK_FULLCKSUM | HCK_IPV4_HDRCKSUM)) == 0)
675 			return;
676 
677 		/*
678 		 * For TCP/UDP, ask hardware to do both IP header and
679 		 * full checksum, even if stack has already done one or
680 		 * the other. Hardware will always get it correct even
681 		 * if stack has already done it.
682 		 */
683 		switch (pktinfo->l4_proto) {
684 			case IPPROTO_TCP:
685 				desc->u1.s1.opcode = TX_TCP_PKT;
686 				break;
687 			case IPPROTO_UDP:
688 				desc->u1.s1.opcode = TX_UDP_PKT;
689 				break;
690 			default:
691 				/* Must be here with HCK_IPV4_HDRCKSUM */
692 				desc->u1.s1.opcode = TX_IP_PKT;
693 				return;
694 		}
695 
696 		desc->u1.s1.ipHdrOffset = pktinfo->mac_hlen;
697 		desc->u1.s1.tcpHdrOffset = pktinfo->mac_hlen + pktinfo->ip_hlen;
698 	}
699 }
700 
701 /*
702  * For IP/UDP/TCP checksum offload, this checks for MAC+IP header in one
703  * contiguous block ending at 8 byte aligned address as required by hardware.
704  * Caller assumes pktinfo->total_len will be updated by this function and
705  * if pktinfo->etype is set to 0, it will need to linearize the mblk and
706  * invoke unm_update_pkt_info() to determine ethertype, IP header len and
707  * protocol.
708  */
709 static boolean_t
710 unm_get_pkt_info(mblk_t *mp, pktinfo_t *pktinfo)
711 {
712 	mblk_t		*bp;
713 	ushort_t	type;
714 
715 	(void) memset(pktinfo, 0, sizeof (pktinfo_t));
716 
717 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
718 		if (MBLKL(bp) == 0)
719 			continue;
720 		pktinfo->mblk_no++;
721 		pktinfo->total_len += MBLKL(bp);
722 	}
723 
724 	if (MBLKL(mp) < (sizeof (struct ether_header) + sizeof (ipha_t)))
725 		return (B_FALSE);
726 
727 	/*
728 	 * We just need non 1 byte aligned address, since ether_type is
729 	 * ushort.
730 	 */
731 	if ((uintptr_t)mp->b_rptr & 1)
732 		return (B_FALSE);
733 
734 	type = ((struct ether_header *)(uintptr_t)mp->b_rptr)->ether_type;
735 	if (type == htons(ETHERTYPE_VLAN)) {
736 		if (MBLKL(mp) < (sizeof (struct ether_vlan_header) +
737 		    sizeof (ipha_t)))
738 			return (B_FALSE);
739 		type = ((struct ether_vlan_header *) \
740 		    (uintptr_t)mp->b_rptr)->ether_type;
741 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
742 	} else {
743 		pktinfo->mac_hlen = sizeof (struct ether_header);
744 	}
745 	pktinfo->etype = type;
746 
747 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
748 		uchar_t *ip_off = mp->b_rptr + pktinfo->mac_hlen;
749 
750 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ip_off);
751 		pktinfo->l4_proto =
752 		    ((ipha_t *)(uintptr_t)ip_off)->ipha_protocol;
753 
754 		/* IP header not aligned to quadward boundary? */
755 		if ((unsigned long)(ip_off + pktinfo->ip_hlen) % 8 != 0)
756 			return (B_FALSE);
757 	}
758 
759 	return (B_TRUE);
760 }
761 
762 static void
763 unm_update_pkt_info(char *ptr, pktinfo_t *pktinfo)
764 {
765 	ushort_t	type;
766 
767 	type = ((struct ether_header *)(uintptr_t)ptr)->ether_type;
768 	if (type == htons(ETHERTYPE_VLAN)) {
769 		type = ((struct ether_vlan_header *)(uintptr_t)ptr)->ether_type;
770 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
771 	} else {
772 		pktinfo->mac_hlen = sizeof (struct ether_header);
773 	}
774 	pktinfo->etype = type;
775 
776 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
777 		char *ipp = ptr + pktinfo->mac_hlen;
778 
779 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ipp);
780 		pktinfo->l4_proto = ((ipha_t *)(uintptr_t)ipp)->ipha_protocol;
781 	}
782 }
783 
784 static boolean_t
785 unm_send_copy(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
786 {
787 	hardware_context *hw;
788 	u32				producer = 0;
789 	cmdDescType0_t			*hwdesc;
790 	struct unm_cmd_buffer		*pbuf = NULL;
791 	u32				mblen;
792 	int				no_of_desc = 1;
793 	int				MaxTxDescCount;
794 	mblk_t				*bp;
795 	char				*txb;
796 
797 	hw = &adapter->ahw;
798 	MaxTxDescCount = adapter->MaxTxDescCount;
799 
800 	UNM_SPIN_LOCK(&adapter->tx_lock);
801 	membar_enter();
802 
803 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
804 	    MaxTxDescCount) <= 2) {
805 		adapter->stats.outofcmddesc++;
806 		adapter->resched_needed = 1;
807 		membar_exit();
808 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
809 		return (B_FALSE);
810 	}
811 	adapter->freecmds -= no_of_desc;
812 
813 	producer = adapter->cmdProducer;
814 
815 	adapter->cmdProducer = get_index_range(adapter->cmdProducer,
816 	    MaxTxDescCount, no_of_desc);
817 
818 	hwdesc = &hw->cmdDescHead[producer];
819 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
820 	pbuf = &adapter->cmd_buf_arr[producer];
821 
822 	pbuf->msg = NULL;
823 	pbuf->head = NULL;
824 	pbuf->tail = NULL;
825 
826 	txb = pbuf->dma_area.vaddr;
827 
828 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
829 		if ((mblen = MBLKL(bp)) == 0)
830 			continue;
831 		bcopy(bp->b_rptr, txb, mblen);
832 		txb += mblen;
833 	}
834 
835 	/*
836 	 * Determine metadata if not previously done due to fragmented mblk.
837 	 */
838 	if (pktinfo->etype == 0)
839 		unm_update_pkt_info(pbuf->dma_area.vaddr, pktinfo);
840 
841 	(void) ddi_dma_sync(pbuf->dma_area.dma_hdl,
842 	    0, pktinfo->total_len, DDI_DMA_SYNC_FORDEV);
843 
844 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
845 	/* hwdesc->mss = 0; */
846 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
847 	hwdesc->u3.s1.port = adapter->portnum;
848 	hwdesc->u3.s1.ctx_id = adapter->portnum;
849 
850 	hwdesc->u6.s1.buffer1Length = pktinfo->total_len;
851 	hwdesc->u5.AddrBuffer1 = pbuf->dma_area.dma_addr;
852 	hwdesc->u1.s1.numOfBuffers = 1;
853 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
854 
855 	unm_tx_csum(hwdesc, mp, pktinfo);
856 
857 	unm_desc_dma_sync(hw->cmd_desc_dma_handle,
858 	    producer,
859 	    no_of_desc,
860 	    MaxTxDescCount,
861 	    sizeof (cmdDescType0_t),
862 	    DDI_DMA_SYNC_FORDEV);
863 
864 	hw->cmdProducer = adapter->cmdProducer;
865 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
866 
867 	adapter->stats.txbytes += pktinfo->total_len;
868 	adapter->stats.xmitfinished++;
869 	adapter->stats.txcopyed++;
870 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
871 
872 	freemsg(mp);
873 	return (B_TRUE);
874 }
875 
876 /* Should be called with adapter->tx_lock held. */
877 static void
878 unm_return_dma_handle(unm_adapter *adapter, unm_dmah_node_t *head,
879     unm_dmah_node_t *tail, uint32_t num)
880 {
881 	ASSERT(tail != NULL);
882 	tail->next = adapter->dmahdl_pool;
883 	adapter->dmahdl_pool = head;
884 	adapter->freehdls += num;
885 }
886 
887 static unm_dmah_node_t *
888 unm_reserve_dma_handle(unm_adapter* adapter)
889 {
890 	unm_dmah_node_t *dmah = NULL;
891 
892 	dmah = adapter->dmahdl_pool;
893 	if (dmah != NULL) {
894 		adapter->dmahdl_pool = dmah->next;
895 		dmah->next = NULL;
896 		adapter->freehdls--;
897 		membar_exit();
898 	}
899 
900 	return (dmah);
901 }
902 
903 static boolean_t
904 unm_send_mapped(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
905 {
906 	hardware_context		*hw;
907 	u32				producer = 0;
908 	u32				saved_producer = 0;
909 	cmdDescType0_t			*hwdesc;
910 	struct unm_cmd_buffer		*pbuf = NULL;
911 	int				no_of_desc;
912 	int				k;
913 	int				MaxTxDescCount;
914 	mblk_t				*bp;
915 
916 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL, *hdlp;
917 	ddi_dma_cookie_t cookie[MAX_COOKIES_PER_CMD + 1];
918 	int ret, i;
919 	uint32_t hdl_reserved = 0;
920 	uint32_t mblen;
921 	uint32_t ncookies, index = 0, total_cookies = 0;
922 
923 	MaxTxDescCount = adapter->MaxTxDescCount;
924 
925 	UNM_SPIN_LOCK(&adapter->tx_lock);
926 
927 	/* bind all the mblks of the packet first */
928 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
929 		mblen = MBLKL(bp);
930 		if (mblen == 0)
931 			continue;
932 
933 		dmah = unm_reserve_dma_handle(adapter);
934 		if (dmah == NULL) {
935 			adapter->stats.outoftxdmahdl++;
936 			goto err_map;
937 		}
938 
939 		ret = ddi_dma_addr_bind_handle(dmah->dmahdl,
940 		    NULL, (caddr_t)bp->b_rptr, mblen,
941 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
942 		    DDI_DMA_DONTWAIT, NULL, &cookie[index], &ncookies);
943 
944 		if (ret != DDI_DMA_MAPPED)
945 			goto err_map;
946 
947 		if (tail == NULL) {
948 			head = tail = dmah;
949 		} else {
950 			tail->next = dmah;
951 			tail = dmah;
952 		}
953 		hdl_reserved++;
954 
955 		total_cookies += ncookies;
956 		if (total_cookies > MAX_COOKIES_PER_CMD) {
957 			dmah = NULL;
958 			goto err_map;
959 		}
960 
961 		if (index == 0) {
962 			size_t	hsize = cookie[0].dmac_size;
963 
964 			/*
965 			 * For TCP/UDP packets with checksum offload,
966 			 * MAC/IP headers need to be contiguous. Otherwise,
967 			 * there must be at least 16 bytes in the first
968 			 * descriptor.
969 			 */
970 			if ((pktinfo->l4_proto == IPPROTO_TCP) ||
971 			    (pktinfo->l4_proto == IPPROTO_UDP)) {
972 				if (hsize < (pktinfo->mac_hlen +
973 				    pktinfo->ip_hlen)) {
974 					dmah = NULL;
975 					goto err_map;
976 				}
977 			} else {
978 				if (hsize < 16) {
979 					dmah = NULL;
980 					goto err_map;
981 				}
982 			}
983 		}
984 
985 		index++;
986 		ncookies--;
987 		for (i = 0; i < ncookies; i++, index++)
988 			ddi_dma_nextcookie(dmah->dmahdl, &cookie[index]);
989 	}
990 
991 	dmah = NULL;
992 	hw = &adapter->ahw;
993 	no_of_desc = (total_cookies + 3) >> 2;
994 
995 	membar_enter();
996 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
997 	    MaxTxDescCount) < no_of_desc+2) {
998 		/*
999 		 * If we are going to be trying the copy path, no point
1000 		 * scheduling an upcall when Tx resources are freed.
1001 		 */
1002 		if (pktinfo->total_len > adapter->maxmtu) {
1003 			adapter->stats.outofcmddesc++;
1004 			adapter->resched_needed = 1;
1005 		}
1006 		membar_exit();
1007 		goto err_alloc_desc;
1008 	}
1009 	adapter->freecmds -= no_of_desc;
1010 
1011 	/* Copy the descriptors into the hardware    */
1012 	producer = adapter->cmdProducer;
1013 	saved_producer = producer;
1014 	hwdesc = &hw->cmdDescHead[producer];
1015 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1016 	pbuf = &adapter->cmd_buf_arr[producer];
1017 
1018 	pbuf->msg = mp;
1019 	pbuf->head = head;
1020 	pbuf->tail = tail;
1021 
1022 	hwdesc->u1.s1.numOfBuffers = total_cookies;
1023 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
1024 	hwdesc->u3.s1.port = adapter->portnum;
1025 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
1026 	/* hwdesc->mss = 0; */
1027 	hwdesc->u3.s1.ctx_id = adapter->portnum;
1028 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
1029 	unm_tx_csum(hwdesc, mp, pktinfo);
1030 
1031 	for (i = k = 0; i < total_cookies; i++) {
1032 		if (k == 4) {
1033 			/* Move to the next descriptor */
1034 			k = 0;
1035 			producer = get_next_index(producer, MaxTxDescCount);
1036 			hwdesc = &hw->cmdDescHead[producer];
1037 			(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1038 		}
1039 
1040 		switch (k) {
1041 		case 0:
1042 			hwdesc->u6.s1.buffer1Length = cookie[i].dmac_size;
1043 			hwdesc->u5.AddrBuffer1 = cookie[i].dmac_laddress;
1044 			break;
1045 		case 1:
1046 			hwdesc->u6.s1.buffer2Length = cookie[i].dmac_size;
1047 			hwdesc->u2.AddrBuffer2 = cookie[i].dmac_laddress;
1048 			break;
1049 		case 2:
1050 			hwdesc->u6.s1.buffer3Length = cookie[i].dmac_size;
1051 			hwdesc->u4.AddrBuffer3 = cookie[i].dmac_laddress;
1052 			break;
1053 		case 3:
1054 			hwdesc->u6.s1.buffer4Length = cookie[i].dmac_size;
1055 			hwdesc->u7.AddrBuffer4 = cookie[i].dmac_laddress;
1056 			break;
1057 		}
1058 		k++;
1059 	}
1060 
1061 	unm_desc_dma_sync(hw->cmd_desc_dma_handle, saved_producer, no_of_desc,
1062 	    MaxTxDescCount, sizeof (cmdDescType0_t), DDI_DMA_SYNC_FORDEV);
1063 
1064 	adapter->cmdProducer = get_next_index(producer, MaxTxDescCount);
1065 	hw->cmdProducer = adapter->cmdProducer;
1066 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
1067 
1068 	adapter->stats.txbytes += pktinfo->total_len;
1069 	adapter->stats.xmitfinished++;
1070 	adapter->stats.txmapped++;
1071 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1072 	return (B_TRUE);
1073 
1074 err_alloc_desc:
1075 err_map:
1076 
1077 	hdlp = head;
1078 	while (hdlp != NULL) {
1079 		(void) ddi_dma_unbind_handle(hdlp->dmahdl);
1080 		hdlp = hdlp->next;
1081 	}
1082 
1083 	/*
1084 	 * add the reserved but bind failed one to the list to be returned
1085 	 */
1086 	if (dmah != NULL) {
1087 		if (tail == NULL)
1088 			head = tail = dmah;
1089 		else {
1090 			tail->next = dmah;
1091 			tail = dmah;
1092 		}
1093 		hdl_reserved++;
1094 	}
1095 
1096 	if (head != NULL)
1097 		unm_return_dma_handle(adapter, head, tail, hdl_reserved);
1098 
1099 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1100 	return (B_FALSE);
1101 }
1102 
1103 static boolean_t
1104 unm_nic_xmit_frame(unm_adapter *adapter, mblk_t *mp)
1105 {
1106 	pktinfo_t	pktinfo;
1107 	boolean_t	status = B_FALSE, send_mapped;
1108 
1109 	adapter->stats.xmitcalled++;
1110 
1111 	send_mapped = unm_get_pkt_info(mp, &pktinfo);
1112 
1113 	if (pktinfo.total_len <= adapter->tx_bcopy_threshold ||
1114 	    pktinfo.mblk_no >= MAX_BUFFERS_PER_CMD)
1115 		send_mapped = B_FALSE;
1116 
1117 	if (send_mapped == B_TRUE)
1118 		status = unm_send_mapped(adapter, mp, &pktinfo);
1119 
1120 	if (status != B_TRUE) {
1121 		if (pktinfo.total_len <= adapter->maxmtu)
1122 			return (unm_send_copy(adapter, mp, &pktinfo));
1123 
1124 		/* message too large */
1125 		freemsg(mp);
1126 		adapter->stats.txdropped++;
1127 		status = B_TRUE;
1128 	}
1129 
1130 	return (status);
1131 }
1132 
1133 static int
1134 unm_nic_check_temp(struct unm_adapter_s *adapter)
1135 {
1136 	uint32_t temp, temp_state, temp_val;
1137 	int rv = 0;
1138 
1139 	if ((adapter->ahw.revision_id == NX_P3_A2) ||
1140 	    (adapter->ahw.revision_id == NX_P3_A0))
1141 		return (0);
1142 
1143 	temp = adapter->unm_nic_pci_read_normalize(adapter, CRB_TEMP_STATE);
1144 
1145 	temp_state = nx_get_temp_state(temp);
1146 	temp_val = nx_get_temp_val(temp);
1147 
1148 	if (temp_state == NX_TEMP_PANIC) {
1149 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds "
1150 		    "maximum allowed, device has been shut down\n",
1151 		    unm_nic_driver_name, temp_val);
1152 		rv = 1;
1153 	} else if (temp_state == NX_TEMP_WARN) {
1154 		if (adapter->temp == NX_TEMP_NORMAL) {
1155 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds"
1156 		    "operating range. Immediate action needed.\n",
1157 		    unm_nic_driver_name, temp_val);
1158 		}
1159 	} else {
1160 		if (adapter->temp == NX_TEMP_WARN) {
1161 			cmn_err(CE_WARN, "%s: Device temperature is now %d "
1162 			    "degrees C in normal range.\n",
1163 			    unm_nic_driver_name, temp_val);
1164 		}
1165 	}
1166 
1167 	adapter->temp = temp_state;
1168 	return (rv);
1169 }
1170 
1171 static void
1172 unm_watchdog(unsigned long v)
1173 {
1174 	unm_adapter *adapter = (unm_adapter *)v;
1175 
1176 	if ((adapter->portnum == 0) && unm_nic_check_temp(adapter)) {
1177 		/*
1178 		 * We return without turning on the netdev queue as there
1179 		 * was an overheated device
1180 		 */
1181 		return;
1182 	}
1183 
1184 	unm_nic_handle_phy_intr(adapter);
1185 
1186 	/*
1187 	 * This function schedules a call for itself.
1188 	 */
1189 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1190 	    (void *)adapter, 2 * drv_usectohz(1000000));
1191 
1192 }
1193 
1194 static void unm_nic_clear_stats(unm_adapter *adapter)
1195 {
1196 	(void) memset(&adapter->stats, 0, sizeof (adapter->stats));
1197 }
1198 
1199 static void
1200 unm_nic_poll(unm_adapter *adapter)
1201 {
1202 	int	work_done, tx_complete;
1203 
1204 	adapter->stats.polled++;
1205 
1206 loop:
1207 	tx_complete = unm_process_cmd_ring(adapter);
1208 	work_done = unm_process_rcv_ring(adapter, NX_RX_MAXBUFS);
1209 	if ((!tx_complete) || (!(work_done < NX_RX_MAXBUFS)))
1210 		goto loop;
1211 
1212 	UNM_READ_LOCK(&adapter->adapter_lock);
1213 	unm_nic_enable_int(adapter);
1214 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1215 }
1216 
1217 /* ARGSUSED */
1218 uint_t
1219 unm_intr(caddr_t data, caddr_t arg)
1220 {
1221 	unm_adapter	*adapter = (unm_adapter *)(uintptr_t)data;
1222 
1223 	if (unm_nic_clear_int(adapter))
1224 		return (DDI_INTR_UNCLAIMED);
1225 
1226 	unm_nic_poll(adapter);
1227 	return (DDI_INTR_CLAIMED);
1228 }
1229 
1230 /*
1231  * This is invoked from receive isr. Due to the single threaded nature
1232  * of the invocation, pool_lock acquisition is not neccesary to protect
1233  * pool_list.
1234  */
1235 static void
1236 unm_free_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc, unm_rx_buffer_t *rx_buffer)
1237 {
1238 	/* mutex_enter(rcv_desc->pool_lock); */
1239 	rx_buffer->next = rcv_desc->pool_list;
1240 	rcv_desc->pool_list = rx_buffer;
1241 	rcv_desc->rx_buf_free++;
1242 	/* mutex_exit(rcv_desc->pool_lock); */
1243 }
1244 
1245 /*
1246  * unm_process_rcv() send the received packet to the protocol stack.
1247  */
1248 static mblk_t *
1249 unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc)
1250 {
1251 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1252 	unm_rx_buffer_t		*rx_buffer;
1253 	mblk_t *mp;
1254 	u32			desc_ctx = desc->u1.s1.type;
1255 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
1256 	u32			pkt_length = desc->u1.s1.totalLength;
1257 	int			poff = desc->u1.s1.pkt_offset;
1258 	int			index, cksum_flags, docopy;
1259 	int			index_lo = desc->u1.s1.referenceHandle_lo;
1260 	char			*vaddr;
1261 
1262 	index = ((desc->u1.s1.referenceHandle_hi << 4) | index_lo);
1263 
1264 	rx_buffer = index2rxbuf(rcv_desc, index);
1265 
1266 	if (rx_buffer == NULL) {
1267 		cmn_err(CE_WARN, "\r\nNULL rx_buffer idx=%d", index);
1268 		return (NULL);
1269 	}
1270 	vaddr = (char *)rx_buffer->dma_info.vaddr;
1271 	if (vaddr == NULL) {
1272 		cmn_err(CE_WARN, "\r\nNULL vaddr");
1273 		return (NULL);
1274 	}
1275 	rcv_desc->rx_desc_handled++;
1276 	rcv_desc->rx_buf_card--;
1277 
1278 	(void) ddi_dma_sync(rx_buffer->dma_info.dma_hdl, 0,
1279 	    pkt_length + poff + (adapter->ahw.cut_through ? 0 :
1280 	    IP_ALIGNMENT_BYTES), DDI_DMA_SYNC_FORCPU);
1281 
1282 	/*
1283 	 * Copy packet into new allocated message buffer, if pkt_length
1284 	 * is below copy threshold.
1285 	 */
1286 	docopy = (pkt_length <= adapter->rx_bcopy_threshold) ? 1 : 0;
1287 
1288 	/*
1289 	 * If card is running out of rx buffers, then attempt to allocate
1290 	 * new mblk so we can feed this rx buffer back to card (we
1291 	 * _could_ look at what's pending on free and recycle lists).
1292 	 */
1293 	if (rcv_desc->rx_buf_card < NX_RX_THRESHOLD) {
1294 		docopy = 1;
1295 		adapter->stats.rxbufshort++;
1296 	}
1297 
1298 	if (docopy == 1) {
1299 		if ((mp = allocb(pkt_length + IP_ALIGNMENT_BYTES, 0)) == NULL) {
1300 			adapter->stats.allocbfailed++;
1301 			goto freebuf;
1302 		}
1303 
1304 		mp->b_rptr += IP_ALIGNMENT_BYTES;
1305 		vaddr += poff;
1306 		bcopy(vaddr, mp->b_rptr, pkt_length);
1307 		adapter->stats.rxcopyed++;
1308 		unm_free_rx_buffer(rcv_desc, rx_buffer);
1309 	} else {
1310 		mp = (mblk_t *)rx_buffer->mp;
1311 		if (mp == NULL) {
1312 			mp = desballoc(rx_buffer->dma_info.vaddr,
1313 			    rcv_desc->dma_size, 0, &rx_buffer->rx_recycle);
1314 			if (mp == NULL) {
1315 				adapter->stats.desballocfailed++;
1316 				goto freebuf;
1317 			}
1318 			rx_buffer->mp = mp;
1319 		}
1320 		mp->b_rptr += poff;
1321 		adapter->stats.rxmapped++;
1322 	}
1323 
1324 	mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + pkt_length);
1325 
1326 	if (desc->u1.s1.status == STATUS_CKSUM_OK) {
1327 		adapter->stats.csummed++;
1328 		cksum_flags =
1329 		    HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM | HCK_FULLCKSUM;
1330 	} else {
1331 		cksum_flags = 0;
1332 	}
1333 	(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, cksum_flags, 0);
1334 
1335 	adapter->stats.no_rcv++;
1336 	adapter->stats.rxbytes += pkt_length;
1337 	adapter->stats.uphappy++;
1338 
1339 	return (mp);
1340 
1341 freebuf:
1342 	unm_free_rx_buffer(rcv_desc, rx_buffer);
1343 	return (NULL);
1344 }
1345 
1346 /* Process Receive status ring */
1347 static int
1348 unm_process_rcv_ring(unm_adapter *adapter, int max)
1349 {
1350 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1351 	statusDesc_t		*desc_head = recv_ctx->rcvStatusDescHead;
1352 	statusDesc_t		*desc = NULL;
1353 	uint32_t		consumer, start;
1354 	int			count = 0, ring;
1355 	mblk_t *mp;
1356 
1357 	start = consumer = recv_ctx->statusRxConsumer;
1358 
1359 	unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start, max,
1360 	    adapter->MaxRxDescCount, sizeof (statusDesc_t),
1361 	    DDI_DMA_SYNC_FORCPU);
1362 
1363 	while (count < max) {
1364 		desc = &desc_head[consumer];
1365 		if (!(desc->u1.s1.owner & STATUS_OWNER_HOST))
1366 			break;
1367 
1368 		mp = unm_process_rcv(adapter, desc);
1369 		desc->u1.s1.owner = STATUS_OWNER_PHANTOM;
1370 
1371 		consumer = (consumer + 1) % adapter->MaxRxDescCount;
1372 		count++;
1373 		if (mp != NULL)
1374 			mac_rx(adapter->mach, NULL, mp);
1375 	}
1376 
1377 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1378 		if (recv_ctx->rcv_desc[ring].rx_desc_handled > 0)
1379 			unm_post_rx_buffers_nodb(adapter, ring);
1380 	}
1381 
1382 	if (count) {
1383 		unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start,
1384 		    count, adapter->MaxRxDescCount, sizeof (statusDesc_t),
1385 		    DDI_DMA_SYNC_FORDEV);
1386 
1387 		/* update the consumer index in phantom */
1388 		recv_ctx->statusRxConsumer = consumer;
1389 
1390 		UNM_READ_LOCK(&adapter->adapter_lock);
1391 		adapter->unm_nic_hw_write_wx(adapter,
1392 		    recv_ctx->host_sds_consumer, &consumer, 4);
1393 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1394 	}
1395 
1396 	return (count);
1397 }
1398 
1399 /* Process Command status ring */
1400 static int
1401 unm_process_cmd_ring(struct unm_adapter_s *adapter)
1402 {
1403 	u32			last_consumer;
1404 	u32			consumer;
1405 	int			count = 0;
1406 	struct unm_cmd_buffer	*buffer;
1407 	int			done;
1408 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL;
1409 	uint32_t	free_hdls = 0;
1410 
1411 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1412 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1413 
1414 	last_consumer = adapter->lastCmdConsumer;
1415 	consumer = *(adapter->cmdConsumer);
1416 
1417 	while (last_consumer != consumer) {
1418 		buffer = &adapter->cmd_buf_arr[last_consumer];
1419 		if (buffer->head != NULL) {
1420 			dmah = buffer->head;
1421 			while (dmah != NULL) {
1422 				(void) ddi_dma_unbind_handle(dmah->dmahdl);
1423 				dmah = dmah->next;
1424 				free_hdls++;
1425 			}
1426 
1427 			if (head == NULL) {
1428 				head = buffer->head;
1429 				tail = buffer->tail;
1430 			} else {
1431 				tail->next = buffer->head;
1432 				tail = buffer->tail;
1433 			}
1434 
1435 			buffer->head = NULL;
1436 			buffer->tail = NULL;
1437 
1438 			if (buffer->msg != NULL) {
1439 				freemsg(buffer->msg);
1440 				buffer->msg = NULL;
1441 			}
1442 		}
1443 
1444 		last_consumer = get_next_index(last_consumer,
1445 		    adapter->MaxTxDescCount);
1446 		if (++count > NX_MAX_TXCOMPS)
1447 			break;
1448 	}
1449 
1450 	if (count) {
1451 		int	doresched;
1452 
1453 		UNM_SPIN_LOCK(&adapter->tx_lock);
1454 		adapter->lastCmdConsumer = last_consumer;
1455 		adapter->freecmds += count;
1456 		membar_exit();
1457 
1458 		doresched = adapter->resched_needed;
1459 		if (doresched)
1460 			adapter->resched_needed = 0;
1461 
1462 		if (head != NULL)
1463 			unm_return_dma_handle(adapter, head, tail, free_hdls);
1464 
1465 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
1466 
1467 		if (doresched)
1468 			mac_tx_update(adapter->mach);
1469 	}
1470 
1471 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1472 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1473 
1474 	consumer = *(adapter->cmdConsumer);
1475 	done = (adapter->lastCmdConsumer == consumer);
1476 
1477 	return (done);
1478 }
1479 
1480 /*
1481  * This is invoked from receive isr, and at initialization time when no
1482  * rx buffers have been posted to card. Due to the single threaded nature
1483  * of the invocation, pool_lock acquisition is not neccesary to protect
1484  * pool_list.
1485  */
1486 static unm_rx_buffer_t *
1487 unm_reserve_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc)
1488 {
1489 	unm_rx_buffer_t *rx_buffer = NULL;
1490 
1491 	/* mutex_enter(rcv_desc->pool_lock); */
1492 	if (rcv_desc->rx_buf_free) {
1493 		rx_buffer = rcv_desc->pool_list;
1494 		rcv_desc->pool_list = rx_buffer->next;
1495 		rx_buffer->next = NULL;
1496 		rcv_desc->rx_buf_free--;
1497 	} else {
1498 		mutex_enter(rcv_desc->recycle_lock);
1499 
1500 		if (rcv_desc->rx_buf_recycle) {
1501 			rcv_desc->pool_list = rcv_desc->recycle_list;
1502 			rcv_desc->recycle_list = NULL;
1503 			rcv_desc->rx_buf_free += rcv_desc->rx_buf_recycle;
1504 			rcv_desc->rx_buf_recycle = 0;
1505 
1506 			rx_buffer = rcv_desc->pool_list;
1507 			rcv_desc->pool_list = rx_buffer->next;
1508 			rx_buffer->next = NULL;
1509 			rcv_desc->rx_buf_free--;
1510 		}
1511 
1512 		mutex_exit(rcv_desc->recycle_lock);
1513 	}
1514 
1515 	/* mutex_exit(rcv_desc->pool_lock); */
1516 	return (rx_buffer);
1517 }
1518 
1519 static void
1520 post_rx_doorbell(struct unm_adapter_s *adapter, uint32_t ringid, int count)
1521 {
1522 #define	UNM_RCV_PEG_DB_ID	2
1523 #define	UNM_RCV_PRODUCER_OFFSET	0
1524 	ctx_msg msg = {0};
1525 
1526 	/*
1527 	 * Write a doorbell msg to tell phanmon of change in
1528 	 * receive ring producer
1529 	 */
1530 	msg.PegId = UNM_RCV_PEG_DB_ID;
1531 	msg.privId = 1;
1532 	msg.Count = count;
1533 	msg.CtxId = adapter->portnum;
1534 	msg.Opcode = UNM_RCV_PRODUCER(ringid);
1535 	dbwritel(*((__uint32_t *)&msg),
1536 	    (void *)(DB_NORMALIZE(adapter, UNM_RCV_PRODUCER_OFFSET)));
1537 }
1538 
1539 static int
1540 unm_post_rx_buffers(struct unm_adapter_s *adapter, uint32_t ringid)
1541 {
1542 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1543 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1544 	unm_rx_buffer_t		*rx_buffer;
1545 	rcvDesc_t		*pdesc;
1546 	int			count;
1547 
1548 	for (count = 0; count < rcv_desc->MaxRxDescCount; count++) {
1549 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1550 		if (rx_buffer != NULL) {
1551 			pdesc = &rcv_desc->desc_head[count];
1552 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1553 			    rx_buffer);
1554 			pdesc->flags = ringid;
1555 			pdesc->bufferLength = rcv_desc->dma_size;
1556 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1557 		}
1558 		else
1559 			return (DDI_FAILURE);
1560 	}
1561 
1562 	rcv_desc->producer = count % rcv_desc->MaxRxDescCount;
1563 	count--;
1564 	unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle,
1565 	    0,		/* start */
1566 	    count,	/* count */
1567 	    count,	/* range */
1568 	    sizeof (rcvDesc_t),	/* unit_size */
1569 	    DDI_DMA_SYNC_FORDEV);	/* direction */
1570 
1571 	rcv_desc->rx_buf_card = rcv_desc->MaxRxDescCount;
1572 	UNM_READ_LOCK(&adapter->adapter_lock);
1573 	adapter->unm_nic_hw_write_wx(adapter, rcv_desc->host_rx_producer,
1574 	    &count, 4);
1575 	if (adapter->fw_major < 4)
1576 		post_rx_doorbell(adapter, ringid, count);
1577 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1578 
1579 	return (DDI_SUCCESS);
1580 }
1581 
1582 static void
1583 unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
1584     uint32_t ringid)
1585 {
1586 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1587 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1588 	struct unm_rx_buffer	*rx_buffer;
1589 	rcvDesc_t		*pdesc;
1590 	int 			count, producer = rcv_desc->producer;
1591 	int 			last_producer = producer;
1592 
1593 	for (count = 0; count < rcv_desc->rx_desc_handled; count++) {
1594 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1595 		if (rx_buffer != NULL) {
1596 			pdesc = &rcv_desc->desc_head[producer];
1597 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1598 			    rx_buffer);
1599 			pdesc->flags = ringid;
1600 			pdesc->bufferLength = rcv_desc->dma_size;
1601 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1602 		} else {
1603 			adapter->stats.outofrxbuf++;
1604 			break;
1605 		}
1606 		producer = get_next_index(producer, rcv_desc->MaxRxDescCount);
1607 	}
1608 
1609 	/* if we did allocate buffers, then write the count to Phantom */
1610 	if (count) {
1611 		/* Sync rx ring, considering case for wrap around */
1612 		unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle, last_producer,
1613 		    count, rcv_desc->MaxRxDescCount, sizeof (rcvDesc_t),
1614 		    DDI_DMA_SYNC_FORDEV);
1615 
1616 		rcv_desc->producer = producer;
1617 		rcv_desc->rx_desc_handled -= count;
1618 		rcv_desc->rx_buf_card += count;
1619 
1620 		producer = (producer - 1) % rcv_desc->MaxRxDescCount;
1621 		UNM_READ_LOCK(&adapter->adapter_lock);
1622 		adapter->unm_nic_hw_write_wx(adapter,
1623 		    rcv_desc->host_rx_producer, &producer, 4);
1624 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1625 	}
1626 }
1627 
1628 int
1629 unm_nic_fill_statistics_128M(struct unm_adapter_s *adapter,
1630 			    struct unm_statistics *unm_stats)
1631 {
1632 	void *addr;
1633 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1634 		UNM_WRITE_LOCK(&adapter->adapter_lock);
1635 		unm_nic_pci_change_crbwindow_128M(adapter, 0);
1636 
1637 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1638 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_BYTE_CNT,
1639 		    &(unm_stats->tx_bytes));
1640 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1641 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_FRAME_CNT,
1642 		    &(unm_stats->tx_packets));
1643 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1644 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_BYTE_CNT,
1645 		    &(unm_stats->rx_bytes));
1646 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1647 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_FRAME_CNT,
1648 		    &(unm_stats->rx_packets));
1649 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1650 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_AGGR_ERROR_CNT,
1651 		    &(unm_stats->rx_errors));
1652 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1653 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_CRC_ERROR_CNT,
1654 		    &(unm_stats->rx_CRC_errors));
1655 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1656 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1657 		    &(unm_stats->rx_long_length_error));
1658 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1659 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1660 		    &(unm_stats->rx_short_length_error));
1661 
1662 		/*
1663 		 * For reading rx_MAC_error bit different procedure
1664 		 * UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_TEST_MUX_CTL, 0x15);
1665 		 * UNM_NIC_LOCKED_READ_REG((UNM_CRB_NIU + 0xC0), &temp);
1666 		 * unm_stats->rx_MAC_errors = temp & 0xff;
1667 		 */
1668 
1669 		unm_nic_pci_change_crbwindow_128M(adapter, 1);
1670 		UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1671 	} else {
1672 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1673 		unm_stats->tx_bytes = adapter->stats.txbytes;
1674 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1675 		    adapter->stats.xmitfinished;
1676 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1677 		unm_stats->rx_packets = adapter->stats.no_rcv;
1678 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1679 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1680 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1681 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1682 		unm_stats->rx_CRC_errors = 0;
1683 		unm_stats->rx_MAC_errors = 0;
1684 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1685 	}
1686 	return (0);
1687 }
1688 
1689 int
1690 unm_nic_fill_statistics_2M(struct unm_adapter_s *adapter,
1691     struct unm_statistics *unm_stats)
1692 {
1693 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1694 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1695 		    &(unm_stats->tx_bytes), 4);
1696 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1697 		    &(unm_stats->tx_packets), 4);
1698 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1699 		    &(unm_stats->rx_bytes), 4);
1700 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1701 		    &(unm_stats->rx_packets), 4);
1702 		(void) unm_nic_hw_read_wx_2M(adapter,
1703 		    UNM_NIU_XGE_AGGR_ERROR_CNT, &(unm_stats->rx_errors), 4);
1704 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1705 		    &(unm_stats->rx_CRC_errors), 4);
1706 		(void) unm_nic_hw_read_wx_2M(adapter,
1707 		    UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1708 		    &(unm_stats->rx_long_length_error), 4);
1709 		(void) unm_nic_hw_read_wx_2M(adapter,
1710 		    UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1711 		    &(unm_stats->rx_short_length_error), 4);
1712 	} else {
1713 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1714 		unm_stats->tx_bytes = adapter->stats.txbytes;
1715 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1716 		    adapter->stats.xmitfinished;
1717 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1718 		unm_stats->rx_packets = adapter->stats.no_rcv;
1719 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1720 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1721 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1722 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1723 		unm_stats->rx_CRC_errors = 0;
1724 		unm_stats->rx_MAC_errors = 0;
1725 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1726 	}
1727 	return (0);
1728 }
1729 
1730 int
1731 unm_nic_clear_statistics_128M(struct unm_adapter_s *adapter)
1732 {
1733 	void *addr;
1734 	int data = 0;
1735 
1736 	UNM_WRITE_LOCK(&adapter->adapter_lock);
1737 	unm_nic_pci_change_crbwindow_128M(adapter, 0);
1738 
1739 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1740 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_BYTE_CNT, &data);
1741 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1742 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_FRAME_CNT, &data);
1743 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1744 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_BYTE_CNT, &data);
1745 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1746 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_FRAME_CNT, &data);
1747 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1748 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_AGGR_ERROR_CNT, &data);
1749 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1750 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_CRC_ERROR_CNT, &data);
1751 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1752 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR, &data);
1753 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1754 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR, &data);
1755 
1756 	unm_nic_pci_change_crbwindow_128M(adapter, 1);
1757 	UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1758 	unm_nic_clear_stats(adapter);
1759 	return (0);
1760 }
1761 
1762 int
1763 unm_nic_clear_statistics_2M(struct unm_adapter_s *adapter)
1764 {
1765 	int data = 0;
1766 
1767 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1768 	    &data, 4);
1769 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1770 	    &data, 4);
1771 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1772 	    &data, 4);
1773 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1774 	    &data, 4);
1775 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_AGGR_ERROR_CNT,
1776 	    &data, 4);
1777 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1778 	    &data, 4);
1779 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1780 	    &data, 4);
1781 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1782 	    &data, 4);
1783 	unm_nic_clear_stats(adapter);
1784 	return (0);
1785 }
1786 
1787 /*
1788  * unm_nic_ioctl ()    We provide the tcl/phanmon support
1789  * through these ioctls.
1790  */
1791 static void
1792 unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q, mblk_t *mp)
1793 {
1794 	void *ptr;
1795 
1796 	switch (cmd) {
1797 	case UNM_NIC_CMD:
1798 		(void) unm_nic_do_ioctl(adapter, q, mp);
1799 		break;
1800 
1801 	case UNM_NIC_NAME:
1802 		ptr = (void *) mp->b_cont->b_rptr;
1803 
1804 		/*
1805 		 * Phanmon checks for "UNM-UNM" string
1806 		 * Replace the hardcoded value with appropriate macro
1807 		 */
1808 		DPRINTF(-1, (CE_CONT, "UNM_NIC_NAME ioctl executed %d %d\n",
1809 		    cmd, __LINE__));
1810 		(void) memcpy(ptr, "UNM-UNM", 10);
1811 		miocack(q, mp, 10, 0);
1812 		break;
1813 
1814 	default:
1815 		cmn_err(CE_WARN, "Netxen ioctl cmd %x not supported\n", cmd);
1816 
1817 		miocnak(q, mp, 0, EINVAL);
1818 		break;
1819 	}
1820 }
1821 
1822 int
1823 unm_nic_resume(unm_adapter *adapter)
1824 {
1825 
1826 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1827 	    (void *) adapter, 50000);
1828 
1829 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1830 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
1831 	else
1832 		(void) ddi_intr_enable(adapter->intr_handle);
1833 	UNM_READ_LOCK(&adapter->adapter_lock);
1834 	unm_nic_enable_int(adapter);
1835 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1836 
1837 	mac_link_update(adapter->mach, LINK_STATE_UP);
1838 
1839 	return (DDI_SUCCESS);
1840 }
1841 
1842 int
1843 unm_nic_suspend(unm_adapter *adapter)
1844 {
1845 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
1846 
1847 	(void) untimeout(adapter->watchdog_timer);
1848 
1849 	UNM_READ_LOCK(&adapter->adapter_lock);
1850 	unm_nic_disable_int(adapter);
1851 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1852 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1853 		(void) ddi_intr_block_disable(&adapter->intr_handle, 1);
1854 	else
1855 		(void) ddi_intr_disable(adapter->intr_handle);
1856 
1857 	return (DDI_SUCCESS);
1858 }
1859 
1860 static int
1861 unm_nic_do_ioctl(unm_adapter *adapter, queue_t *wq, mblk_t *mp)
1862 {
1863 	unm_nic_ioctl_data_t		data;
1864 	struct unm_nic_ioctl_data	*up_data;
1865 	ddi_acc_handle_t		conf_handle;
1866 	int				retval = 0;
1867 	unsigned int			efuse_chip_id;
1868 	char				*ptr1;
1869 	short				*ptr2;
1870 	int				*ptr4;
1871 
1872 	up_data = (struct unm_nic_ioctl_data *)(mp->b_cont->b_rptr);
1873 	(void) memcpy(&data, (void **)(uintptr_t)(mp->b_cont->b_rptr),
1874 	    sizeof (data));
1875 
1876 	/* Shouldn't access beyond legal limits of  "char u[64];" member */
1877 	if (data.size > sizeof (data.uabc)) {
1878 		/* evil user tried to crash the kernel */
1879 		cmn_err(CE_WARN, "bad size: %d\n", data.size);
1880 		retval = GLD_BADARG;
1881 		goto error_out;
1882 	}
1883 
1884 	switch (data.cmd) {
1885 	case unm_nic_cmd_pci_read:
1886 
1887 		if ((retval = adapter->unm_nic_hw_read_ioctl(adapter,
1888 		    data.off, up_data, data.size))) {
1889 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_read_wx "
1890 		    "returned %d\n", __FUNCTION__, __LINE__, retval));
1891 
1892 			retval = data.rv;
1893 			goto error_out;
1894 		}
1895 
1896 		data.rv = 0;
1897 		break;
1898 
1899 	case unm_nic_cmd_pci_write:
1900 		if ((data.rv = adapter->unm_nic_hw_write_ioctl(adapter,
1901 		    data.off, &(data.uabc), data.size))) {
1902 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_write_wx "
1903 			    "returned %d\n", __FUNCTION__,
1904 			    __LINE__, data.rv));
1905 			retval = data.rv;
1906 			goto error_out;
1907 		}
1908 		data.size = 0;
1909 		break;
1910 
1911 	case unm_nic_cmd_pci_mem_read:
1912 		if ((data.rv = adapter->unm_nic_pci_mem_read(adapter,
1913 		    data.off, up_data, data.size))) {
1914 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_pci_mem_read "
1915 			    "returned %d\n", __FUNCTION__,
1916 			    __LINE__, data.rv));
1917 			retval = data.rv;
1918 			goto error_out;
1919 		}
1920 		data.rv = 0;
1921 		break;
1922 
1923 	case unm_nic_cmd_pci_mem_write:
1924 		if ((data.rv = adapter->unm_nic_pci_mem_write(adapter,
1925 		    data.off, &(data.uabc), data.size))) {
1926 			DPRINTF(-1, (CE_WARN,
1927 			    "%s(%d) unm_nic_cmd_pci_mem_write "
1928 			    "returned %d\n",
1929 			    __FUNCTION__, __LINE__, data.rv));
1930 			retval = data.rv;
1931 			goto error_out;
1932 		}
1933 
1934 		data.size = 0;
1935 		data.rv = 0;
1936 		break;
1937 
1938 	case unm_nic_cmd_pci_config_read:
1939 
1940 		if (adapter->pci_cfg_handle != NULL) {
1941 			conf_handle = adapter->pci_cfg_handle;
1942 
1943 		} else if ((retval = pci_config_setup(adapter->dip,
1944 		    &conf_handle)) != DDI_SUCCESS) {
1945 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1946 			    " error:%d\n", unm_nic_driver_name, retval));
1947 			goto error_out;
1948 
1949 		} else
1950 			adapter->pci_cfg_handle = conf_handle;
1951 
1952 		switch (data.size) {
1953 		case 1:
1954 			ptr1 = (char *)up_data;
1955 			*ptr1 = (char)pci_config_get8(conf_handle, data.off);
1956 			break;
1957 		case 2:
1958 			ptr2 = (short *)up_data;
1959 			*ptr2 = (short)pci_config_get16(conf_handle, data.off);
1960 			break;
1961 		case 4:
1962 			ptr4 = (int *)up_data;
1963 			*ptr4 = (int)pci_config_get32(conf_handle, data.off);
1964 			break;
1965 		}
1966 
1967 		break;
1968 
1969 	case unm_nic_cmd_pci_config_write:
1970 
1971 		if (adapter->pci_cfg_handle != NULL) {
1972 			conf_handle = adapter->pci_cfg_handle;
1973 		} else if ((retval = pci_config_setup(adapter->dip,
1974 		    &conf_handle)) != DDI_SUCCESS) {
1975 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1976 			    " error:%d\n", unm_nic_driver_name, retval));
1977 			goto error_out;
1978 		} else {
1979 			adapter->pci_cfg_handle = conf_handle;
1980 		}
1981 
1982 		switch (data.size) {
1983 		case 1:
1984 			pci_config_put8(conf_handle,
1985 			    data.off, *(char *)&(data.uabc));
1986 			break;
1987 		case 2:
1988 			pci_config_put16(conf_handle,
1989 			    data.off, *(short *)(uintptr_t)&(data.uabc));
1990 			break;
1991 		case 4:
1992 			pci_config_put32(conf_handle,
1993 			    data.off, *(u32 *)(uintptr_t)&(data.uabc));
1994 			break;
1995 		}
1996 		data.size = 0;
1997 		break;
1998 
1999 	case unm_nic_cmd_get_stats:
2000 		data.rv = adapter->unm_nic_fill_statistics(adapter,
2001 		    (struct unm_statistics *)up_data);
2002 		data.size = sizeof (struct unm_statistics);
2003 
2004 		break;
2005 
2006 	case unm_nic_cmd_clear_stats:
2007 		data.rv = adapter->unm_nic_clear_statistics(adapter);
2008 		break;
2009 
2010 	case unm_nic_cmd_get_version:
2011 		(void) memcpy(up_data, UNM_NIC_VERSIONID,
2012 		    sizeof (UNM_NIC_VERSIONID));
2013 		data.size = sizeof (UNM_NIC_VERSIONID);
2014 
2015 		break;
2016 
2017 	case unm_nic_cmd_get_phy_type:
2018 		cmn_err(CE_WARN, "unm_nic_cmd_get_phy_type unimplemented\n");
2019 		break;
2020 
2021 	case unm_nic_cmd_efuse_chip_id:
2022 		efuse_chip_id = adapter->unm_nic_pci_read_normalize(adapter,
2023 		    UNM_EFUSE_CHIP_ID);
2024 		(void) memcpy(up_data, &efuse_chip_id, sizeof (unsigned long));
2025 		data.rv = 0;
2026 		break;
2027 
2028 	default:
2029 		cmn_err(CE_WARN, "%s%d: bad command %d\n", adapter->name,
2030 		    adapter->instance, data.cmd);
2031 		data.rv = GLD_NOTSUPPORTED;
2032 		data.size = 0;
2033 		goto error_out;
2034 	}
2035 
2036 work_done:
2037 	miocack(wq, mp, data.size, data.rv);
2038 	return (DDI_SUCCESS);
2039 
2040 error_out:
2041 	cmn_err(CE_WARN, "%s(%d) ioctl error\n", __FUNCTION__, data.cmd);
2042 	miocnak(wq, mp, 0, EINVAL);
2043 	return (retval);
2044 }
2045 
2046 /*
2047  * Local datatype for defining tables of (Offset, Name) pairs
2048  */
2049 typedef struct {
2050 	offset_t	index;
2051 	char		*name;
2052 } unm_ksindex_t;
2053 
2054 static const unm_ksindex_t unm_kstat[] = {
2055 	{ 0,		"freehdls"		},
2056 	{ 1,		"freecmds"		},
2057 	{ 2,		"tx_bcopy_threshold"	},
2058 	{ 3,		"rx_bcopy_threshold"	},
2059 	{ 4,		"xmitcalled"		},
2060 	{ 5,		"xmitedframes"		},
2061 	{ 6,		"xmitfinished"		},
2062 	{ 7,		"txbytes"		},
2063 	{ 8,		"txcopyed"		},
2064 	{ 9,		"txmapped"		},
2065 	{ 10,		"outoftxdmahdl"		},
2066 	{ 11,		"outofcmddesc"		},
2067 	{ 12,		"txdropped"		},
2068 	{ 13,		"polled"		},
2069 	{ 14,		"uphappy"		},
2070 	{ 15,		"updropped"		},
2071 	{ 16,		"csummed"		},
2072 	{ 17,		"no_rcv"		},
2073 	{ 18,		"rxbytes"		},
2074 	{ 19,		"rxcopyed"		},
2075 	{ 20,		"rxmapped"		},
2076 	{ 21,		"desballocfailed"	},
2077 	{ 22,		"outofrxbuf"		},
2078 	{ 23,		"promiscmode"		},
2079 	{ 24,		"rxbufshort"		},
2080 	{ 25,		"allocbfailed"		},
2081 	{ -1,		NULL			}
2082 };
2083 
2084 static int
2085 unm_kstat_update(kstat_t *ksp, int flag)
2086 {
2087 	unm_adapter *adapter;
2088 	kstat_named_t *knp;
2089 
2090 	if (flag != KSTAT_READ)
2091 		return (EACCES);
2092 
2093 	adapter = ksp->ks_private;
2094 	knp = ksp->ks_data;
2095 
2096 	(knp++)->value.ui32 = adapter->freehdls;
2097 	(knp++)->value.ui64 = adapter->freecmds;
2098 	(knp++)->value.ui64 = adapter->tx_bcopy_threshold;
2099 	(knp++)->value.ui64 = adapter->rx_bcopy_threshold;
2100 
2101 	(knp++)->value.ui64 = adapter->stats.xmitcalled;
2102 	(knp++)->value.ui64 = adapter->stats.xmitedframes;
2103 	(knp++)->value.ui64 = adapter->stats.xmitfinished;
2104 	(knp++)->value.ui64 = adapter->stats.txbytes;
2105 	(knp++)->value.ui64 = adapter->stats.txcopyed;
2106 	(knp++)->value.ui64 = adapter->stats.txmapped;
2107 	(knp++)->value.ui64 = adapter->stats.outoftxdmahdl;
2108 	(knp++)->value.ui64 = adapter->stats.outofcmddesc;
2109 	(knp++)->value.ui64 = adapter->stats.txdropped;
2110 	(knp++)->value.ui64 = adapter->stats.polled;
2111 	(knp++)->value.ui64 = adapter->stats.uphappy;
2112 	(knp++)->value.ui64 = adapter->stats.updropped;
2113 	(knp++)->value.ui64 = adapter->stats.csummed;
2114 	(knp++)->value.ui64 = adapter->stats.no_rcv;
2115 	(knp++)->value.ui64 = adapter->stats.rxbytes;
2116 	(knp++)->value.ui64 = adapter->stats.rxcopyed;
2117 	(knp++)->value.ui64 = adapter->stats.rxmapped;
2118 	(knp++)->value.ui64 = adapter->stats.desballocfailed;
2119 	(knp++)->value.ui64 = adapter->stats.outofrxbuf;
2120 	(knp++)->value.ui64 = adapter->stats.promiscmode;
2121 	(knp++)->value.ui64 = adapter->stats.rxbufshort;
2122 	(knp++)->value.ui64 = adapter->stats.allocbfailed;
2123 
2124 	return (0);
2125 }
2126 
2127 static kstat_t *
2128 unm_setup_named_kstat(unm_adapter *adapter, int instance, char *name,
2129 	const unm_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
2130 {
2131 	kstat_t *ksp;
2132 	kstat_named_t *knp;
2133 	char *np;
2134 	int type;
2135 	int count = 0;
2136 
2137 	size /= sizeof (unm_ksindex_t);
2138 	ksp = kstat_create(unm_nic_driver_name, instance, name, "net",
2139 	    KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT);
2140 	if (ksp == NULL)
2141 		return (NULL);
2142 
2143 	ksp->ks_private = adapter;
2144 	ksp->ks_update = update;
2145 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
2146 		count++;
2147 		switch (*np) {
2148 		default:
2149 			type = KSTAT_DATA_UINT64;
2150 			break;
2151 		case '%':
2152 			np += 1;
2153 			type = KSTAT_DATA_UINT32;
2154 			break;
2155 		case '$':
2156 			np += 1;
2157 			type = KSTAT_DATA_STRING;
2158 			break;
2159 		case '&':
2160 			np += 1;
2161 			type = KSTAT_DATA_CHAR;
2162 			break;
2163 		}
2164 		kstat_named_init(knp, np, type);
2165 	}
2166 	kstat_install(ksp);
2167 
2168 	return (ksp);
2169 }
2170 
2171 void
2172 unm_init_kstats(unm_adapter* adapter, int instance)
2173 {
2174 	adapter->kstats[0] = unm_setup_named_kstat(adapter,
2175 	    instance, "kstatinfo", unm_kstat,
2176 	    sizeof (unm_kstat), unm_kstat_update);
2177 }
2178 
2179 void
2180 unm_fini_kstats(unm_adapter* adapter)
2181 {
2182 
2183 	if (adapter->kstats[0] != NULL) {
2184 			kstat_delete(adapter->kstats[0]);
2185 			adapter->kstats[0] = NULL;
2186 		}
2187 }
2188 
2189 static int
2190 unm_nic_set_pauseparam(unm_adapter *adapter, unm_pauseparam_t *pause)
2191 {
2192 	int ret = 0;
2193 
2194 	if (adapter->ahw.board_type == UNM_NIC_GBE) {
2195 		if (unm_niu_gbe_set_rx_flow_ctl(adapter, pause->rx_pause))
2196 			ret = -EIO;
2197 
2198 		if (unm_niu_gbe_set_tx_flow_ctl(adapter, pause->tx_pause))
2199 			ret = -EIO;
2200 
2201 	} else if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2202 		if (unm_niu_xg_set_tx_flow_ctl(adapter, pause->tx_pause))
2203 			ret =  -EIO;
2204 	} else
2205 		ret = -EIO;
2206 
2207 	return (ret);
2208 }
2209 
2210 /*
2211  *
2212  * GLD/MAC interfaces
2213  *
2214  */
2215 
2216 static int
2217 ntxn_m_start(void *arg)
2218 {
2219 	unm_adapter	*adapter = arg;
2220 	int		ring;
2221 
2222 	UNM_SPIN_LOCK(&adapter->lock);
2223 	if (adapter->is_up == UNM_ADAPTER_UP_MAGIC) {
2224 		UNM_SPIN_UNLOCK(&adapter->lock);
2225 		return (DDI_SUCCESS);
2226 	}
2227 
2228 	if (init_firmware(adapter) != DDI_SUCCESS) {
2229 		UNM_SPIN_UNLOCK(&adapter->lock);
2230 		cmn_err(CE_WARN, "%s%d: Failed to init firmware\n",
2231 		    adapter->name, adapter->instance);
2232 		return (DDI_FAILURE);
2233 	}
2234 
2235 	unm_nic_clear_stats(adapter);
2236 
2237 	if (unm_nic_hw_resources(adapter) != 0) {
2238 		UNM_SPIN_UNLOCK(&adapter->lock);
2239 		cmn_err(CE_WARN, "%s%d: Error setting hw resources\n",
2240 		    adapter->name, adapter->instance);
2241 		return (DDI_FAILURE);
2242 	}
2243 
2244 	if (adapter->fw_major < 4) {
2245 		adapter->crb_addr_cmd_producer =
2246 		    crb_cmd_producer[adapter->portnum];
2247 		adapter->crb_addr_cmd_consumer =
2248 		    crb_cmd_consumer[adapter->portnum];
2249 		unm_nic_update_cmd_producer(adapter, 0);
2250 		unm_nic_update_cmd_consumer(adapter, 0);
2251 	}
2252 
2253 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
2254 		if (unm_post_rx_buffers(adapter, ring) != DDI_SUCCESS) {
2255 			/* TODO: clean up */
2256 			UNM_SPIN_UNLOCK(&adapter->lock);
2257 			return (DDI_FAILURE);
2258 		}
2259 	}
2260 
2261 	if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) {
2262 		UNM_SPIN_UNLOCK(&adapter->lock);
2263 		cmn_err(CE_WARN, "%s%d: Could not set mac address\n",
2264 		    adapter->name, adapter->instance);
2265 		return (DDI_FAILURE);
2266 	}
2267 
2268 	if (unm_nic_init_port(adapter) != 0) {
2269 		UNM_SPIN_UNLOCK(&adapter->lock);
2270 		cmn_err(CE_WARN, "%s%d: Could not initialize port\n",
2271 		    adapter->name, adapter->instance);
2272 		return (DDI_FAILURE);
2273 	}
2274 
2275 	unm_nic_set_link_parameters(adapter);
2276 
2277 	/*
2278 	 * P2 and P3 should be handled similarly.
2279 	 */
2280 	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
2281 		if (unm_nic_set_promisc_mode(adapter) != 0) {
2282 			UNM_SPIN_UNLOCK(&adapter->lock);
2283 			cmn_err(CE_WARN, "%s%d: Could not set promisc mode\n",
2284 			    adapter->name, adapter->instance);
2285 			return (DDI_FAILURE);
2286 		}
2287 	} else {
2288 		nx_p3_nic_set_multi(adapter);
2289 	}
2290 	adapter->stats.promiscmode = 1;
2291 
2292 	if (unm_nic_set_mtu(adapter, adapter->mtu) != 0) {
2293 		UNM_SPIN_UNLOCK(&adapter->lock);
2294 		cmn_err(CE_WARN, "%s%d: Could not set mtu\n",
2295 		    adapter->name, adapter->instance);
2296 		return (DDI_FAILURE);
2297 	}
2298 
2299 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
2300 	    (void *)adapter, 0);
2301 
2302 	adapter->is_up = UNM_ADAPTER_UP_MAGIC;
2303 
2304 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
2305 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
2306 	else
2307 		(void) ddi_intr_enable(adapter->intr_handle);
2308 	unm_nic_enable_int(adapter);
2309 
2310 	UNM_SPIN_UNLOCK(&adapter->lock);
2311 	return (GLD_SUCCESS);
2312 }
2313 
2314 
2315 /*
2316  * This code is kept here for reference so as to
2317  * see if something different is required to be done
2318  * in GLDV3. This will be deleted later.
2319  */
2320 /* ARGSUSED */
2321 static void
2322 ntxn_m_stop(void *arg)
2323 {
2324 }
2325 
2326 /*ARGSUSED*/
2327 static int
2328 ntxn_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
2329 {
2330 	/*
2331 	 * When we correctly implement this, invoke nx_p3_nic_set_multi()
2332 	 * or nx_p2_nic_set_multi() here.
2333 	 */
2334 	return (GLD_SUCCESS);
2335 }
2336 
2337 /*ARGSUSED*/
2338 static int
2339 ntxn_m_promisc(void *arg, boolean_t on)
2340 {
2341 #if 0
2342 	int err = 0;
2343 	struct unm_adapter_s *adapter = arg;
2344 
2345 	err = on ? unm_nic_set_promisc_mode(adapter) :
2346 	    unm_nic_unset_promisc_mode(adapter);
2347 
2348 	if (err)
2349 		return (GLD_FAILURE);
2350 #endif
2351 
2352 	return (GLD_SUCCESS);
2353 }
2354 
2355 static int
2356 ntxn_m_stat(void *arg, uint_t stat, uint64_t *val)
2357 {
2358 	struct unm_adapter_s		*adapter = arg;
2359 	struct unm_adapter_stats	*portstat = &adapter->stats;
2360 
2361 	switch (stat) {
2362 	case MAC_STAT_IFSPEED:
2363 		if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2364 			/* 10 Gigs */
2365 			*val = 10000000000ULL;
2366 		} else {
2367 			/* 1 Gig */
2368 			*val = 1000000000;
2369 		}
2370 		break;
2371 
2372 	case MAC_STAT_MULTIRCV:
2373 		*val = 0;
2374 		break;
2375 
2376 	case MAC_STAT_BRDCSTRCV:
2377 	case MAC_STAT_BRDCSTXMT:
2378 		*val = 0;
2379 		break;
2380 
2381 	case MAC_STAT_NORCVBUF:
2382 		*val = portstat->updropped;
2383 		break;
2384 
2385 	case MAC_STAT_NOXMTBUF:
2386 		*val = portstat->txdropped;
2387 		break;
2388 
2389 	case MAC_STAT_RBYTES:
2390 		*val = portstat->rxbytes;
2391 		break;
2392 
2393 	case MAC_STAT_OBYTES:
2394 		*val = portstat->txbytes;
2395 		break;
2396 
2397 	case MAC_STAT_OPACKETS:
2398 		*val = portstat->xmitedframes;
2399 		break;
2400 
2401 	case MAC_STAT_IPACKETS:
2402 		*val = portstat->uphappy;
2403 		break;
2404 
2405 	case MAC_STAT_OERRORS:
2406 		*val = portstat->xmitcalled - portstat->xmitedframes;
2407 		break;
2408 
2409 	case ETHER_STAT_LINK_DUPLEX:
2410 		*val = LINK_DUPLEX_FULL;
2411 		break;
2412 
2413 	default:
2414 		/*
2415 		 * Shouldn't reach here...
2416 		 */
2417 		*val = 0;
2418 		DPRINTF(0, (CE_WARN, ": unrecognized parameter = %d, value "
2419 		    "returned 1\n", stat));
2420 
2421 	}
2422 
2423 	return (0);
2424 }
2425 
2426 static int
2427 ntxn_m_unicst(void *arg, const uint8_t *mac)
2428 {
2429 	struct unm_adapter_s *adapter = arg;
2430 
2431 	DPRINTF(-1, (CE_CONT, "%s: called\n", __func__));
2432 
2433 	if (unm_nic_macaddr_set(adapter, (uint8_t *)mac))
2434 		return (EAGAIN);
2435 	bcopy(mac, adapter->mac_addr, ETHERADDRL);
2436 
2437 	return (0);
2438 }
2439 
2440 static mblk_t *
2441 ntxn_m_tx(void *arg, mblk_t *mp)
2442 {
2443 	unm_adapter *adapter = arg;
2444 	mblk_t *next;
2445 
2446 	while (mp != NULL) {
2447 		next = mp->b_next;
2448 		mp->b_next = NULL;
2449 
2450 		if (unm_nic_xmit_frame(adapter, mp) != B_TRUE) {
2451 			mp->b_next = next;
2452 			break;
2453 		}
2454 		mp = next;
2455 		adapter->stats.xmitedframes++;
2456 	}
2457 
2458 	return (mp);
2459 }
2460 
2461 static void
2462 ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2463 {
2464 	int		cmd;
2465 	struct iocblk   *iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
2466 	struct unm_adapter_s *adapter = (struct unm_adapter_s *)arg;
2467 	enum ioc_reply status = IOC_DONE;
2468 
2469 	iocp->ioc_error = 0;
2470 	cmd = iocp->ioc_cmd;
2471 
2472 	if (cmd == ND_GET || cmd == ND_SET) {
2473 		status = unm_nd_ioctl(adapter, wq, mp, iocp);
2474 		switch (status) {
2475 		default:
2476 		case IOC_INVAL:
2477 			miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2478 			    EINVAL : iocp->ioc_error);
2479 			break;
2480 
2481 		case IOC_DONE:
2482 			break;
2483 
2484 		case IOC_RESTART_ACK:
2485 		case IOC_ACK:
2486 			miocack(wq, mp, 0, 0);
2487 			break;
2488 
2489 		case IOC_RESTART_REPLY:
2490 		case IOC_REPLY:
2491 			mp->b_datap->db_type = iocp->ioc_error == 0 ?
2492 			    M_IOCACK : M_IOCNAK;
2493 			qreply(wq, mp);
2494 			break;
2495 		}
2496 	} else if (cmd <= UNM_NIC_NAME && cmd >= UNM_CMD_START) {
2497 		unm_nic_ioctl(adapter, cmd, wq, mp);
2498 		return;
2499 	} else {
2500 		miocnak(wq, mp, 0, EINVAL);
2501 		return;
2502 	}
2503 }
2504 
2505 /* ARGSUSED */
2506 static boolean_t
2507 ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2508 {
2509 	switch (cap) {
2510 	case MAC_CAPAB_HCKSUM:
2511 		{
2512 			uint32_t *txflags = cap_data;
2513 
2514 			*txflags = (HCKSUM_ENABLE |
2515 			    HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM);
2516 		}
2517 		break;
2518 	default:
2519 		return (B_FALSE);
2520 	}
2521 
2522 	return (B_TRUE);
2523 }
2524 
2525 #define	NETXEN_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
2526 
2527 static mac_callbacks_t ntxn_m_callbacks = {
2528 	NETXEN_M_CALLBACK_FLAGS,
2529 	ntxn_m_stat,
2530 	ntxn_m_start,
2531 	ntxn_m_stop,
2532 	ntxn_m_promisc,
2533 	ntxn_m_multicst,
2534 	ntxn_m_unicst,
2535 	ntxn_m_tx,
2536 	ntxn_m_ioctl,
2537 	ntxn_m_getcapab,
2538 	NULL,			/* mc_open */
2539 	NULL,			/* mc_close */
2540 	NULL,			/* mc_setprop */
2541 	NULL			/* mc_getprop */
2542 };
2543 
2544 int
2545 unm_register_mac(unm_adapter *adapter)
2546 {
2547 	int ret;
2548 	mac_register_t *macp;
2549 	unm_pauseparam_t pause;
2550 
2551 	dev_info_t *dip = adapter->dip;
2552 
2553 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2554 		cmn_err(CE_WARN, "Memory not available\n");
2555 		return (DDI_FAILURE);
2556 	}
2557 
2558 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2559 	macp->m_driver = adapter;
2560 	macp->m_dip = dip;
2561 	macp->m_instance = adapter->instance;
2562 	macp->m_src_addr = adapter->mac_addr;
2563 	macp->m_callbacks = &ntxn_m_callbacks;
2564 	macp->m_min_sdu = 0;
2565 	macp->m_max_sdu = adapter->mtu;
2566 #ifdef SOLARIS11
2567 	macp->m_margin = VLAN_TAGSZ;
2568 #endif /* SOLARIS11 */
2569 
2570 	ret = mac_register(macp, &adapter->mach);
2571 	mac_free(macp);
2572 	if (ret != 0) {
2573 		cmn_err(CE_WARN, "mac_register failed for port %d\n",
2574 		    adapter->portnum);
2575 		return (DDI_FAILURE);
2576 	}
2577 
2578 	unm_init_kstats(adapter, adapter->instance);
2579 
2580 	/* Register NDD-tweakable parameters */
2581 	if (unm_nd_init(adapter)) {
2582 		cmn_err(CE_WARN, "unm_nd_init() failed");
2583 		return (DDI_FAILURE);
2584 	}
2585 
2586 	pause.rx_pause = adapter->nd_params[PARAM_ADV_PAUSE_CAP].ndp_val;
2587 	pause.tx_pause = adapter->nd_params[PARAM_ADV_ASYM_PAUSE_CAP].ndp_val;
2588 
2589 	if (unm_nic_set_pauseparam(adapter, &pause)) {
2590 		cmn_err(CE_WARN, "\nBad Pause settings RX %d, Tx %d",
2591 		    pause.rx_pause, pause.tx_pause);
2592 	}
2593 	adapter->nd_params[PARAM_PAUSE_CAP].ndp_val = pause.rx_pause;
2594 	adapter->nd_params[PARAM_ASYM_PAUSE_CAP].ndp_val = pause.tx_pause;
2595 
2596 	return (DDI_SUCCESS);
2597 }
2598