xref: /illumos-gate/usr/src/uts/common/io/ntxn/unm_nic_main.c (revision b31b5de1357c915fe7dab4d9646d9d84f9fe69bc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 NetXen, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 #include <sys/types.h>
30 #include <sys/conf.h>
31 #include <sys/debug.h>
32 #include <sys/stropts.h>
33 #include <sys/stream.h>
34 #include <sys/strlog.h>
35 #include <sys/kmem.h>
36 #include <sys/stat.h>
37 #include <sys/kstat.h>
38 #include <sys/vtrace.h>
39 #include <sys/dlpi.h>
40 #include <sys/strsun.h>
41 #include <sys/ethernet.h>
42 #include <sys/modctl.h>
43 #include <sys/errno.h>
44 #include <sys/dditypes.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sysmacros.h>
48 #include <sys/pci.h>
49 
50 #include <sys/gld.h>
51 #include <netinet/in.h>
52 #include <inet/ip.h>
53 #include <inet/tcp.h>
54 
55 #include <sys/rwlock.h>
56 #include <sys/mutex.h>
57 #include <sys/pattr.h>
58 #include <sys/strsubr.h>
59 #include <sys/ddi_impldefs.h>
60 #include<sys/task.h>
61 
62 #include "unm_nic_hw.h"
63 #include "unm_nic.h"
64 
65 #include "nic_phan_reg.h"
66 #include "unm_nic_ioctl.h"
67 #include "nic_cmn.h"
68 #include "unm_version.h"
69 #include "unm_brdcfg.h"
70 
71 #if defined(lint)
72 #undef MBLKL
73 #define	MBLKL(_mp_)	((uintptr_t)(_mp_)->b_wptr - (uintptr_t)(_mp_)->b_rptr)
74 #endif /* lint */
75 
76 #undef UNM_LOOPBACK
77 #undef SINGLE_DMA_BUF
78 
79 #define	UNM_ADAPTER_UP_MAGIC	777
80 #define	VLAN_TAGSZ		0x4
81 
82 #define	index2rxbuf(_rdp_, _idx_)	((_rdp_)->rx_buf_pool + (_idx_))
83 #define	rxbuf2index(_rdp_, _bufp_)	((_bufp_) - (_rdp_)->rx_buf_pool)
84 
85 /*
86  * Receive ISR processes NX_RX_MAXBUFS incoming packets at most, then posts
87  * as many buffers as packets processed. This loop repeats as required to
88  * process all incoming packets delivered in a single interrupt. Higher
89  * value of NX_RX_MAXBUFS improves performance by posting rx buffers less
90  * frequently, but at the cost of not posting quickly enough when card is
91  * running out of rx buffers.
92  */
93 #define	NX_RX_THRESHOLD		32
94 #define	NX_RX_MAXBUFS		128
95 #define	NX_MAX_TXCOMPS		256
96 
97 extern int create_rxtx_rings(unm_adapter *adapter);
98 extern void destroy_rxtx_rings(unm_adapter *adapter);
99 
100 static void unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
101     uint32_t ringid);
102 static mblk_t *unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc);
103 static int unm_process_rcv_ring(unm_adapter *, int);
104 static int unm_process_cmd_ring(struct unm_adapter_s *adapter);
105 
106 static int unm_nic_do_ioctl(unm_adapter *adapter, queue_t *q, mblk_t *mp);
107 static void unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q,
108     mblk_t *mp);
109 
110 /* GLDv3 interface functions */
111 static int ntxn_m_start(void *);
112 static void ntxn_m_stop(void *);
113 static int ntxn_m_multicst(void *, boolean_t, const uint8_t *);
114 static int ntxn_m_promisc(void *, boolean_t);
115 static int ntxn_m_stat(void *arg, uint_t stat, uint64_t *val);
116 static mblk_t *ntxn_m_tx(void *, mblk_t *);
117 static void ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
118 static boolean_t ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data);
119 
120 /*
121  * Allocates DMA handle, virtual memory and binds them
122  * returns size of actual memory binded and the physical address.
123  */
124 int
125 unm_pci_alloc_consistent(unm_adapter *adapter,
126 		int size, caddr_t *address, ddi_dma_cookie_t *cookie,
127 		ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep)
128 {
129 	int			err;
130 	uint32_t		ncookies;
131 	size_t			ring_len;
132 	uint_t			dma_flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
133 
134 	*dma_handle = NULL;
135 
136 	if (size <= 0)
137 		return (DDI_ENOMEM);
138 
139 	err = ddi_dma_alloc_handle(adapter->dip,
140 	    &adapter->gc_dma_attr_desc,
141 	    DDI_DMA_DONTWAIT, NULL, dma_handle);
142 	if (err != DDI_SUCCESS) {
143 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_alloc_handle FAILED:"
144 		    " %d", unm_nic_driver_name, __func__, err);
145 		return (DDI_ENOMEM);
146 	}
147 
148 	err = ddi_dma_mem_alloc(*dma_handle,
149 	    size, &adapter->gc_attr_desc,
150 	    dma_flags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
151 	    DDI_DMA_DONTWAIT, NULL, address, &ring_len,
152 	    handlep);
153 	if (err != DDI_SUCCESS) {
154 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_mem_alloc failed:"
155 		    "ret %d, request size: %d",
156 		    unm_nic_driver_name, __func__, err, size);
157 		ddi_dma_free_handle(dma_handle);
158 		return (DDI_ENOMEM);
159 	}
160 
161 	if (ring_len < size) {
162 		cmn_err(CE_WARN, "%s: %s: could not allocate required "
163 		    "memory :%d\n", unm_nic_driver_name,
164 		    __func__, err);
165 		ddi_dma_mem_free(handlep);
166 		ddi_dma_free_handle(dma_handle);
167 		return (DDI_FAILURE);
168 	}
169 
170 	(void) memset(*address, 0, size);
171 
172 	if (((err = ddi_dma_addr_bind_handle(*dma_handle,
173 	    NULL, *address, ring_len,
174 	    dma_flags,
175 	    DDI_DMA_DONTWAIT, NULL,
176 	    cookie, &ncookies)) != DDI_DMA_MAPPED) ||
177 	    (ncookies != 1)) {
178 		cmn_err(CE_WARN,
179 		    "!%s: %s: ddi_dma_addr_bind_handle FAILED: %d",
180 		    unm_nic_driver_name, __func__, err);
181 		ddi_dma_mem_free(handlep);
182 		ddi_dma_free_handle(dma_handle);
183 		return (DDI_FAILURE);
184 	}
185 
186 	return (DDI_SUCCESS);
187 }
188 
189 /*
190  * Unbinds the memory, frees the DMA handle and at the end, frees the memory
191  */
192 void
193 unm_pci_free_consistent(ddi_dma_handle_t *dma_handle,
194     ddi_acc_handle_t *acc_handle)
195 {
196 	int err;
197 
198 	err = ddi_dma_unbind_handle(*dma_handle);
199 	if (err != DDI_SUCCESS) {
200 		cmn_err(CE_WARN, "%s: Error unbinding memory\n", __func__);
201 		return;
202 	}
203 
204 	ddi_dma_mem_free(acc_handle);
205 	ddi_dma_free_handle(dma_handle);
206 }
207 
208 static uint32_t msi_tgt_status[] = {
209     ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
210     ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
211     ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
212     ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
213 };
214 
215 static void
216 unm_nic_disable_int(unm_adapter *adapter)
217 {
218 	__uint32_t	temp = 0;
219 
220 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
221 	    &temp, 4);
222 }
223 
224 static inline int
225 unm_nic_clear_int(unm_adapter *adapter)
226 {
227 	uint32_t	mask, temp, our_int, status;
228 
229 	UNM_READ_LOCK(&adapter->adapter_lock);
230 
231 	/* check whether it's our interrupt */
232 	if (!UNM_IS_MSI_FAMILY(adapter)) {
233 
234 		/* Legacy Interrupt case */
235 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
236 		    &status);
237 
238 		if (!(status & adapter->legacy_intr.int_vec_bit)) {
239 			UNM_READ_UNLOCK(&adapter->adapter_lock);
240 			return (-1);
241 		}
242 
243 		if (adapter->ahw.revision_id >= NX_P3_B1) {
244 			adapter->unm_nic_pci_read_immediate(adapter,
245 			    ISR_INT_STATE_REG, &temp);
246 			if (!ISR_IS_LEGACY_INTR_TRIGGERED(temp)) {
247 				UNM_READ_UNLOCK(&adapter->adapter_lock);
248 				return (-1);
249 			}
250 		} else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
251 			our_int = adapter->unm_nic_pci_read_normalize(adapter,
252 			    CRB_INT_VECTOR);
253 
254 			/* FIXME: Assumes pci_func is same as ctx */
255 			if ((our_int & (0x80 << adapter->portnum)) == 0) {
256 				if (our_int != 0) {
257 					/* not our interrupt */
258 					UNM_READ_UNLOCK(&adapter->adapter_lock);
259 					return (-1);
260 				}
261 			}
262 			temp = our_int & ~((u32)(0x80 << adapter->portnum));
263 			adapter->unm_nic_pci_write_normalize(adapter,
264 			    CRB_INT_VECTOR, temp);
265 		}
266 
267 		if (adapter->fw_major < 4)
268 			unm_nic_disable_int(adapter);
269 
270 		/* claim interrupt */
271 		temp = 0xffffffff;
272 		adapter->unm_nic_pci_write_immediate(adapter,
273 		    adapter->legacy_intr.tgt_status_reg, &temp);
274 
275 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
276 		    &mask);
277 
278 		/*
279 		 * Read again to make sure the legacy interrupt message got
280 		 * flushed out
281 		 */
282 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
283 		    &mask);
284 	} else if (adapter->flags & UNM_NIC_MSI_ENABLED) {
285 		/* clear interrupt */
286 		temp = 0xffffffff;
287 		adapter->unm_nic_pci_write_immediate(adapter,
288 		    msi_tgt_status[adapter->ahw.pci_func], &temp);
289 	}
290 
291 	UNM_READ_UNLOCK(&adapter->adapter_lock);
292 
293 	return (0);
294 }
295 
296 static void
297 unm_nic_enable_int(unm_adapter *adapter)
298 {
299 	u32	temp = 1;
300 
301 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
302 	    &temp, 4);
303 
304 	if (!UNM_IS_MSI_FAMILY(adapter)) {
305 		u32	mask = 0xfbff;
306 
307 		adapter->unm_nic_pci_write_immediate(adapter,
308 		    adapter->legacy_intr.tgt_mask_reg, &mask);
309 	}
310 }
311 
312 static void
313 unm_free_hw_resources(unm_adapter *adapter)
314 {
315 	unm_recv_context_t *recv_ctx;
316 	unm_rcv_desc_ctx_t *rcv_desc;
317 	int ctx, ring;
318 
319 	if (adapter->context_alloced == 1) {
320 		netxen_destroy_rxtx(adapter);
321 		adapter->context_alloced = 0;
322 	}
323 
324 	if (adapter->ctxDesc != NULL) {
325 		unm_pci_free_consistent(&adapter->ctxDesc_dma_handle,
326 		    &adapter->ctxDesc_acc_handle);
327 		adapter->ctxDesc = NULL;
328 	}
329 
330 	if (adapter->ahw.cmdDescHead != NULL) {
331 		unm_pci_free_consistent(&adapter->ahw.cmd_desc_dma_handle,
332 		    &adapter->ahw.cmd_desc_acc_handle);
333 		adapter->ahw.cmdDesc_physAddr = NULL;
334 		adapter->ahw.cmdDescHead = NULL;
335 	}
336 
337 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
338 		recv_ctx = &adapter->recv_ctx[ctx];
339 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
340 			rcv_desc = &recv_ctx->rcv_desc[ring];
341 
342 			if (rcv_desc->desc_head != NULL) {
343 				unm_pci_free_consistent(
344 				    &rcv_desc->rx_desc_dma_handle,
345 				    &rcv_desc->rx_desc_acc_handle);
346 				rcv_desc->desc_head = NULL;
347 				rcv_desc->phys_addr = NULL;
348 			}
349 		}
350 
351 		if (recv_ctx->rcvStatusDescHead != NULL) {
352 			unm_pci_free_consistent(
353 			    &recv_ctx->status_desc_dma_handle,
354 			    &recv_ctx->status_desc_acc_handle);
355 			recv_ctx->rcvStatusDesc_physAddr = NULL;
356 			recv_ctx->rcvStatusDescHead = NULL;
357 		}
358 	}
359 }
360 
361 static void
362 cleanup_adapter(struct unm_adapter_s *adapter)
363 {
364 	ddi_regs_map_free(&(adapter->regs_handle));
365 	ddi_regs_map_free(&(adapter->db_handle));
366 	kmem_free(adapter, sizeof (unm_adapter));
367 }
368 
369 void
370 unm_nic_remove(unm_adapter *adapter)
371 {
372 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
373 	unm_nic_stop_port(adapter);
374 
375 	if (adapter->interrupt_crb) {
376 		UNM_READ_LOCK(&adapter->adapter_lock);
377 		unm_nic_disable_int(adapter);
378 		UNM_READ_UNLOCK(&adapter->adapter_lock);
379 	}
380 	(void) untimeout(adapter->watchdog_timer);
381 
382 	unm_free_hw_resources(adapter);
383 
384 	if (adapter->is_up == UNM_ADAPTER_UP_MAGIC)
385 		destroy_rxtx_rings(adapter);
386 
387 	if (adapter->portnum == 0)
388 		unm_free_dummy_dma(adapter);
389 
390 	unm_destroy_intr(adapter);
391 
392 	ddi_set_driver_private(adapter->dip, NULL);
393 	cleanup_adapter(adapter);
394 }
395 
396 static int
397 init_firmware(unm_adapter *adapter)
398 {
399 	uint32_t state = 0, loops = 0, tempout;
400 
401 	/* Window 1 call */
402 	UNM_READ_LOCK(&adapter->adapter_lock);
403 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_CMDPEG_STATE);
404 	UNM_READ_UNLOCK(&adapter->adapter_lock);
405 
406 	if (state == PHAN_INITIALIZE_ACK)
407 		return (0);
408 
409 	while (state != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
410 		drv_usecwait(100);
411 		/* Window 1 call */
412 		UNM_READ_LOCK(&adapter->adapter_lock);
413 		state = adapter->unm_nic_pci_read_normalize(adapter,
414 		    CRB_CMDPEG_STATE);
415 		UNM_READ_UNLOCK(&adapter->adapter_lock);
416 		loops++;
417 	}
418 
419 	if (loops >= 200000) {
420 		cmn_err(CE_WARN, "%s%d: CmdPeg init incomplete:%x\n",
421 		    adapter->name, adapter->instance, state);
422 		return (-EIO);
423 	}
424 
425 	/* Window 1 call */
426 	UNM_READ_LOCK(&adapter->adapter_lock);
427 	tempout = INTR_SCHEME_PERPORT;
428 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_CAPABILITIES_HOST,
429 	    &tempout, 4);
430 	tempout = MSI_MODE_MULTIFUNC;
431 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_MSI_MODE_HOST,
432 	    &tempout, 4);
433 	tempout = MPORT_MULTI_FUNCTION_MODE;
434 	adapter->unm_nic_hw_write_wx(adapter, CRB_MPORT_MODE, &tempout, 4);
435 	tempout = PHAN_INITIALIZE_ACK;
436 	adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, &tempout, 4);
437 	UNM_READ_UNLOCK(&adapter->adapter_lock);
438 
439 	return (0);
440 }
441 
442 /*
443  * Utility to synchronize with receive peg.
444  *  Returns   0 on sucess
445  *         -EIO on error
446  */
447 int
448 receive_peg_ready(struct unm_adapter_s *adapter)
449 {
450 	uint32_t state = 0;
451 	int loops = 0, err = 0;
452 
453 	/* Window 1 call */
454 	UNM_READ_LOCK(&adapter->adapter_lock);
455 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_RCVPEG_STATE);
456 	UNM_READ_UNLOCK(&adapter->adapter_lock);
457 
458 	while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 20000)) {
459 		drv_usecwait(100);
460 		/* Window 1 call */
461 
462 		UNM_READ_LOCK(&adapter->adapter_lock);
463 		state = adapter->unm_nic_pci_read_normalize(adapter,
464 		    CRB_RCVPEG_STATE);
465 		UNM_READ_UNLOCK(&adapter->adapter_lock);
466 
467 		loops++;
468 	}
469 
470 	if (loops >= 20000) {
471 		cmn_err(CE_WARN, "Receive Peg initialization incomplete 0x%x\n",
472 		    state);
473 		err = -EIO;
474 	}
475 
476 	return (err);
477 }
478 
479 /*
480  * check if the firmware has been downloaded and ready to run  and
481  * setup the address for the descriptors in the adapter
482  */
483 static int
484 unm_nic_hw_resources(unm_adapter *adapter)
485 {
486 	hardware_context	*hw = &adapter->ahw;
487 	void			*addr;
488 	int			err;
489 	int			ctx, ring;
490 	unm_recv_context_t	*recv_ctx;
491 	unm_rcv_desc_ctx_t	*rcv_desc;
492 	ddi_dma_cookie_t	cookie;
493 	int			size;
494 
495 	if (err = receive_peg_ready(adapter))
496 		return (err);
497 
498 	size = (sizeof (RingContext) + sizeof (uint32_t));
499 
500 	err = unm_pci_alloc_consistent(adapter,
501 	    size, (caddr_t *)&addr, &cookie,
502 	    &adapter->ctxDesc_dma_handle,
503 	    &adapter->ctxDesc_acc_handle);
504 	if (err != DDI_SUCCESS) {
505 		cmn_err(CE_WARN, "Failed to allocate HW context\n");
506 		return (err);
507 	}
508 
509 	adapter->ctxDesc_physAddr = cookie.dmac_laddress;
510 
511 	(void) memset(addr, 0, sizeof (RingContext));
512 
513 	adapter->ctxDesc = (RingContext *) addr;
514 	adapter->ctxDesc->CtxId = adapter->portnum;
515 	adapter->ctxDesc->CMD_CONSUMER_OFFSET =
516 	    adapter->ctxDesc_physAddr + sizeof (RingContext);
517 	adapter->cmdConsumer =
518 	    (uint32_t *)(uintptr_t)(((char *)addr) + sizeof (RingContext));
519 
520 	ASSERT(!((unsigned long)adapter->ctxDesc_physAddr & 0x3f));
521 
522 	/*
523 	 * Allocate command descriptor ring.
524 	 */
525 	size = (sizeof (cmdDescType0_t) * adapter->MaxTxDescCount);
526 	err = unm_pci_alloc_consistent(adapter,
527 	    size, (caddr_t *)&addr, &cookie,
528 	    &hw->cmd_desc_dma_handle,
529 	    &hw->cmd_desc_acc_handle);
530 	if (err != DDI_SUCCESS) {
531 		cmn_err(CE_WARN, "Failed to allocate cmd desc ring\n");
532 		return (err);
533 	}
534 
535 	hw->cmdDesc_physAddr = cookie.dmac_laddress;
536 	hw->cmdDescHead = (cmdDescType0_t *)addr;
537 
538 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
539 		recv_ctx = &adapter->recv_ctx[ctx];
540 
541 		size = (sizeof (statusDesc_t)* adapter->MaxRxDescCount);
542 		err = unm_pci_alloc_consistent(adapter,
543 		    size, (caddr_t *)&addr,
544 		    &recv_ctx->status_desc_dma_cookie,
545 		    &recv_ctx->status_desc_dma_handle,
546 		    &recv_ctx->status_desc_acc_handle);
547 		if (err != DDI_SUCCESS) {
548 			cmn_err(CE_WARN, "Failed to allocate sts desc ring\n");
549 			goto free_cmd_desc;
550 		}
551 
552 		(void) memset(addr, 0, size);
553 		recv_ctx->rcvStatusDesc_physAddr =
554 		    recv_ctx->status_desc_dma_cookie.dmac_laddress;
555 		recv_ctx->rcvStatusDescHead = (statusDesc_t *)addr;
556 
557 		/* rds rings */
558 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
559 			rcv_desc = &recv_ctx->rcv_desc[ring];
560 
561 			size = (sizeof (rcvDesc_t) * adapter->MaxRxDescCount);
562 			err = unm_pci_alloc_consistent(adapter,
563 			    size, (caddr_t *)&addr,
564 			    &rcv_desc->rx_desc_dma_cookie,
565 			    &rcv_desc->rx_desc_dma_handle,
566 			    &rcv_desc->rx_desc_acc_handle);
567 			if (err != DDI_SUCCESS) {
568 				cmn_err(CE_WARN, "Failed to allocate "
569 				    "rx desc ring %d\n", ring);
570 				goto free_status_desc;
571 			}
572 
573 			rcv_desc->phys_addr =
574 			    rcv_desc->rx_desc_dma_cookie.dmac_laddress;
575 			rcv_desc->desc_head = (rcvDesc_t *)addr;
576 		}
577 	}
578 
579 	if (err = netxen_create_rxtx(adapter))
580 		goto free_statusrx_desc;
581 	adapter->context_alloced = 1;
582 
583 	return (DDI_SUCCESS);
584 
585 free_statusrx_desc:
586 free_status_desc:
587 free_cmd_desc:
588 	unm_free_hw_resources(adapter);
589 
590 	return (err);
591 }
592 
593 void unm_desc_dma_sync(ddi_dma_handle_t handle, uint_t start, uint_t count,
594     uint_t range, uint_t unit_size, uint_t direction)
595 {
596 	if ((start + count) < range) {
597 		(void) ddi_dma_sync(handle, start * unit_size,
598 		    count * unit_size, direction);
599 	} else {
600 		(void) ddi_dma_sync(handle, start * unit_size, 0, direction);
601 		(void) ddi_dma_sync(handle, 0,
602 		    (start + count - range) * unit_size, DDI_DMA_SYNC_FORCPU);
603 	}
604 }
605 
606 static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET,
607     CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2,
608     CRB_CMD_PRODUCER_OFFSET_3 };
609 
610 static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET,
611     CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2,
612     CRB_CMD_CONSUMER_OFFSET_3 };
613 
614 void
615 unm_nic_update_cmd_producer(struct unm_adapter_s *adapter,
616     uint32_t crb_producer)
617 {
618 	int data = crb_producer;
619 
620 	if (adapter->crb_addr_cmd_producer) {
621 		UNM_READ_LOCK(&adapter->adapter_lock);
622 		adapter->unm_nic_hw_write_wx(adapter,
623 		    adapter->crb_addr_cmd_producer, &data, 4);
624 		UNM_READ_UNLOCK(&adapter->adapter_lock);
625 	}
626 }
627 
628 static void
629 unm_nic_update_cmd_consumer(struct unm_adapter_s *adapter,
630     uint32_t crb_producer)
631 {
632 	int data = crb_producer;
633 
634 	if (adapter->crb_addr_cmd_consumer)
635 		adapter->unm_nic_hw_write_wx(adapter,
636 		    adapter->crb_addr_cmd_consumer, &data, 4);
637 }
638 
639 /*
640  * Looks for type of packet and sets opcode accordingly
641  * so that checksum offload can be used.
642  */
643 static void
644 unm_tx_csum(cmdDescType0_t *desc, mblk_t *mp, pktinfo_t *pktinfo)
645 {
646 	if (pktinfo->mac_hlen == sizeof (struct ether_vlan_header))
647 		desc->u1.s1.flags = FLAGS_VLAN_TAGGED;
648 
649 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
650 		uint32_t	start, flags;
651 
652 		hcksum_retrieve(mp, NULL, NULL, &start, NULL, NULL, NULL,
653 		    &flags);
654 		if ((flags & (HCK_FULLCKSUM | HCK_IPV4_HDRCKSUM)) == 0)
655 			return;
656 
657 		/*
658 		 * For TCP/UDP, ask hardware to do both IP header and
659 		 * full checksum, even if stack has already done one or
660 		 * the other. Hardware will always get it correct even
661 		 * if stack has already done it.
662 		 */
663 		switch (pktinfo->l4_proto) {
664 			case IPPROTO_TCP:
665 				desc->u1.s1.opcode = TX_TCP_PKT;
666 				break;
667 			case IPPROTO_UDP:
668 				desc->u1.s1.opcode = TX_UDP_PKT;
669 				break;
670 			default:
671 				/* Must be here with HCK_IPV4_HDRCKSUM */
672 				desc->u1.s1.opcode = TX_IP_PKT;
673 				return;
674 		}
675 
676 		desc->u1.s1.ipHdrOffset = pktinfo->mac_hlen;
677 		desc->u1.s1.tcpHdrOffset = pktinfo->mac_hlen + pktinfo->ip_hlen;
678 	}
679 }
680 
681 /*
682  * For IP/UDP/TCP checksum offload, this checks for MAC+IP header in one
683  * contiguous block ending at 8 byte aligned address as required by hardware.
684  * Caller assumes pktinfo->total_len will be updated by this function and
685  * if pktinfo->etype is set to 0, it will need to linearize the mblk and
686  * invoke unm_update_pkt_info() to determine ethertype, IP header len and
687  * protocol.
688  */
689 static boolean_t
690 unm_get_pkt_info(mblk_t *mp, pktinfo_t *pktinfo)
691 {
692 	mblk_t		*bp;
693 	ushort_t	type;
694 
695 	(void) memset(pktinfo, 0, sizeof (pktinfo_t));
696 
697 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
698 		if (MBLKL(bp) == 0)
699 			continue;
700 		pktinfo->mblk_no++;
701 		pktinfo->total_len += MBLKL(bp);
702 	}
703 
704 	if (MBLKL(mp) < (sizeof (struct ether_header) + sizeof (ipha_t)))
705 		return (B_FALSE);
706 
707 	/*
708 	 * We just need non 1 byte aligned address, since ether_type is
709 	 * ushort.
710 	 */
711 	if ((uintptr_t)mp->b_rptr & 1)
712 		return (B_FALSE);
713 
714 	type = ((struct ether_header *)(uintptr_t)mp->b_rptr)->ether_type;
715 	if (type == htons(ETHERTYPE_VLAN)) {
716 		if (MBLKL(mp) < (sizeof (struct ether_vlan_header) +
717 		    sizeof (ipha_t)))
718 			return (B_FALSE);
719 		type = ((struct ether_vlan_header *) \
720 		    (uintptr_t)mp->b_rptr)->ether_type;
721 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
722 	} else {
723 		pktinfo->mac_hlen = sizeof (struct ether_header);
724 	}
725 	pktinfo->etype = type;
726 
727 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
728 		uchar_t *ip_off = mp->b_rptr + pktinfo->mac_hlen;
729 
730 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ip_off);
731 		pktinfo->l4_proto =
732 		    ((ipha_t *)(uintptr_t)ip_off)->ipha_protocol;
733 
734 		/* IP header not aligned to quadward boundary? */
735 		if ((unsigned long)(ip_off + pktinfo->ip_hlen) % 8 != 0)
736 			return (B_FALSE);
737 	}
738 
739 	return (B_TRUE);
740 }
741 
742 static void
743 unm_update_pkt_info(char *ptr, pktinfo_t *pktinfo)
744 {
745 	ushort_t	type;
746 
747 	type = ((struct ether_header *)(uintptr_t)ptr)->ether_type;
748 	if (type == htons(ETHERTYPE_VLAN)) {
749 		type = ((struct ether_vlan_header *)(uintptr_t)ptr)->ether_type;
750 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
751 	} else {
752 		pktinfo->mac_hlen = sizeof (struct ether_header);
753 	}
754 	pktinfo->etype = type;
755 
756 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
757 		char *ipp = ptr + pktinfo->mac_hlen;
758 
759 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ipp);
760 		pktinfo->l4_proto = ((ipha_t *)(uintptr_t)ipp)->ipha_protocol;
761 	}
762 }
763 
764 static boolean_t
765 unm_send_copy(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
766 {
767 	hardware_context *hw;
768 	u32				producer = 0;
769 	cmdDescType0_t			*hwdesc;
770 	struct unm_cmd_buffer		*pbuf = NULL;
771 	u32				mblen;
772 	int				no_of_desc = 1;
773 	int				MaxTxDescCount;
774 	mblk_t				*bp;
775 	char				*txb;
776 
777 	hw = &adapter->ahw;
778 	MaxTxDescCount = adapter->MaxTxDescCount;
779 
780 	UNM_SPIN_LOCK(&adapter->tx_lock);
781 	membar_enter();
782 
783 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
784 	    MaxTxDescCount) <= 2) {
785 		adapter->stats.outofcmddesc++;
786 		adapter->resched_needed = 1;
787 		membar_exit();
788 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
789 		return (B_FALSE);
790 	}
791 	adapter->freecmds -= no_of_desc;
792 
793 	producer = adapter->cmdProducer;
794 
795 	adapter->cmdProducer = get_index_range(adapter->cmdProducer,
796 	    MaxTxDescCount, no_of_desc);
797 
798 	hwdesc = &hw->cmdDescHead[producer];
799 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
800 	pbuf = &adapter->cmd_buf_arr[producer];
801 
802 	pbuf->msg = NULL;
803 	pbuf->head = NULL;
804 	pbuf->tail = NULL;
805 
806 	txb = pbuf->dma_area.vaddr;
807 
808 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
809 		if ((mblen = MBLKL(bp)) == 0)
810 			continue;
811 		bcopy(bp->b_rptr, txb, mblen);
812 		txb += mblen;
813 	}
814 
815 	/*
816 	 * Determine metadata if not previously done due to fragmented mblk.
817 	 */
818 	if (pktinfo->etype == 0)
819 		unm_update_pkt_info(pbuf->dma_area.vaddr, pktinfo);
820 
821 	(void) ddi_dma_sync(pbuf->dma_area.dma_hdl,
822 	    0, pktinfo->total_len, DDI_DMA_SYNC_FORDEV);
823 
824 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
825 	/* hwdesc->mss = 0; */
826 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
827 	hwdesc->u3.s1.port = adapter->portnum;
828 	hwdesc->u3.s1.ctx_id = adapter->portnum;
829 
830 	hwdesc->u6.s1.buffer1Length = pktinfo->total_len;
831 	hwdesc->u5.AddrBuffer1 = pbuf->dma_area.dma_addr;
832 	hwdesc->u1.s1.numOfBuffers = 1;
833 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
834 
835 	unm_tx_csum(hwdesc, mp, pktinfo);
836 
837 	unm_desc_dma_sync(hw->cmd_desc_dma_handle,
838 	    producer,
839 	    no_of_desc,
840 	    MaxTxDescCount,
841 	    sizeof (cmdDescType0_t),
842 	    DDI_DMA_SYNC_FORDEV);
843 
844 	hw->cmdProducer = adapter->cmdProducer;
845 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
846 
847 	adapter->stats.txbytes += pktinfo->total_len;
848 	adapter->stats.xmitfinished++;
849 	adapter->stats.txcopyed++;
850 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
851 
852 	freemsg(mp);
853 	return (B_TRUE);
854 }
855 
856 /* Should be called with adapter->tx_lock held. */
857 static void
858 unm_return_dma_handle(unm_adapter *adapter, unm_dmah_node_t *head,
859     unm_dmah_node_t *tail, uint32_t num)
860 {
861 	ASSERT(tail != NULL);
862 	tail->next = adapter->dmahdl_pool;
863 	adapter->dmahdl_pool = head;
864 	adapter->freehdls += num;
865 }
866 
867 static unm_dmah_node_t *
868 unm_reserve_dma_handle(unm_adapter* adapter)
869 {
870 	unm_dmah_node_t *dmah = NULL;
871 
872 	dmah = adapter->dmahdl_pool;
873 	if (dmah != NULL) {
874 		adapter->dmahdl_pool = dmah->next;
875 		dmah->next = NULL;
876 		adapter->freehdls--;
877 		membar_exit();
878 	}
879 
880 	return (dmah);
881 }
882 
883 static boolean_t
884 unm_send_mapped(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
885 {
886 	hardware_context		*hw;
887 	u32				producer = 0;
888 	u32				saved_producer = 0;
889 	cmdDescType0_t			*hwdesc;
890 	struct unm_cmd_buffer		*pbuf = NULL;
891 	int				no_of_desc;
892 	int				k;
893 	int				MaxTxDescCount;
894 	mblk_t				*bp;
895 
896 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL, *hdlp;
897 	ddi_dma_cookie_t cookie[MAX_COOKIES_PER_CMD + 1];
898 	int ret, i;
899 	uint32_t hdl_reserved = 0;
900 	uint32_t mblen;
901 	uint32_t ncookies, index = 0, total_cookies = 0;
902 
903 	MaxTxDescCount = adapter->MaxTxDescCount;
904 
905 	UNM_SPIN_LOCK(&adapter->tx_lock);
906 
907 	/* bind all the mblks of the packet first */
908 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
909 		mblen = MBLKL(bp);
910 		if (mblen == 0)
911 			continue;
912 
913 		dmah = unm_reserve_dma_handle(adapter);
914 		if (dmah == NULL) {
915 			adapter->stats.outoftxdmahdl++;
916 			goto err_map;
917 		}
918 
919 		ret = ddi_dma_addr_bind_handle(dmah->dmahdl,
920 		    NULL, (caddr_t)bp->b_rptr, mblen,
921 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
922 		    DDI_DMA_DONTWAIT, NULL, &cookie[index], &ncookies);
923 
924 		if (ret != DDI_DMA_MAPPED)
925 			goto err_map;
926 
927 		if (tail == NULL) {
928 			head = tail = dmah;
929 		} else {
930 			tail->next = dmah;
931 			tail = dmah;
932 		}
933 		hdl_reserved++;
934 
935 		total_cookies += ncookies;
936 		if (total_cookies > MAX_COOKIES_PER_CMD) {
937 			dmah = NULL;
938 			goto err_map;
939 		}
940 
941 		if (index == 0) {
942 			size_t	hsize = cookie[0].dmac_size;
943 
944 			/*
945 			 * For TCP/UDP packets with checksum offload,
946 			 * MAC/IP headers need to be contiguous. Otherwise,
947 			 * there must be at least 16 bytes in the first
948 			 * descriptor.
949 			 */
950 			if ((pktinfo->l4_proto == IPPROTO_TCP) ||
951 			    (pktinfo->l4_proto == IPPROTO_UDP)) {
952 				if (hsize < (pktinfo->mac_hlen +
953 				    pktinfo->ip_hlen)) {
954 					dmah = NULL;
955 					goto err_map;
956 				}
957 			} else {
958 				if (hsize < 16) {
959 					dmah = NULL;
960 					goto err_map;
961 				}
962 			}
963 		}
964 
965 		index++;
966 		ncookies--;
967 		for (i = 0; i < ncookies; i++, index++)
968 			ddi_dma_nextcookie(dmah->dmahdl, &cookie[index]);
969 	}
970 
971 	dmah = NULL;
972 	hw = &adapter->ahw;
973 	no_of_desc = (total_cookies + 3) >> 2;
974 
975 	membar_enter();
976 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
977 	    MaxTxDescCount) < no_of_desc+2) {
978 		/*
979 		 * If we are going to be trying the copy path, no point
980 		 * scheduling an upcall when Tx resources are freed.
981 		 */
982 		if (pktinfo->total_len > adapter->maxmtu) {
983 			adapter->stats.outofcmddesc++;
984 			adapter->resched_needed = 1;
985 		}
986 		membar_exit();
987 		goto err_alloc_desc;
988 	}
989 	adapter->freecmds -= no_of_desc;
990 
991 	/* Copy the descriptors into the hardware    */
992 	producer = adapter->cmdProducer;
993 	saved_producer = producer;
994 	hwdesc = &hw->cmdDescHead[producer];
995 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
996 	pbuf = &adapter->cmd_buf_arr[producer];
997 
998 	pbuf->msg = mp;
999 	pbuf->head = head;
1000 	pbuf->tail = tail;
1001 
1002 	hwdesc->u1.s1.numOfBuffers = total_cookies;
1003 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
1004 	hwdesc->u3.s1.port = adapter->portnum;
1005 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
1006 	/* hwdesc->mss = 0; */
1007 	hwdesc->u3.s1.ctx_id = adapter->portnum;
1008 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
1009 	unm_tx_csum(hwdesc, mp, pktinfo);
1010 
1011 	for (i = k = 0; i < total_cookies; i++) {
1012 		if (k == 4) {
1013 			/* Move to the next descriptor */
1014 			k = 0;
1015 			producer = get_next_index(producer, MaxTxDescCount);
1016 			hwdesc = &hw->cmdDescHead[producer];
1017 			(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1018 		}
1019 
1020 		switch (k) {
1021 		case 0:
1022 			hwdesc->u6.s1.buffer1Length = cookie[i].dmac_size;
1023 			hwdesc->u5.AddrBuffer1 = cookie[i].dmac_laddress;
1024 			break;
1025 		case 1:
1026 			hwdesc->u6.s1.buffer2Length = cookie[i].dmac_size;
1027 			hwdesc->u2.AddrBuffer2 = cookie[i].dmac_laddress;
1028 			break;
1029 		case 2:
1030 			hwdesc->u6.s1.buffer3Length = cookie[i].dmac_size;
1031 			hwdesc->u4.AddrBuffer3 = cookie[i].dmac_laddress;
1032 			break;
1033 		case 3:
1034 			hwdesc->u6.s1.buffer4Length = cookie[i].dmac_size;
1035 			hwdesc->u7.AddrBuffer4 = cookie[i].dmac_laddress;
1036 			break;
1037 		}
1038 		k++;
1039 	}
1040 
1041 	unm_desc_dma_sync(hw->cmd_desc_dma_handle, saved_producer, no_of_desc,
1042 	    MaxTxDescCount, sizeof (cmdDescType0_t), DDI_DMA_SYNC_FORDEV);
1043 
1044 	adapter->cmdProducer = get_next_index(producer, MaxTxDescCount);
1045 	hw->cmdProducer = adapter->cmdProducer;
1046 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
1047 
1048 	adapter->stats.txbytes += pktinfo->total_len;
1049 	adapter->stats.xmitfinished++;
1050 	adapter->stats.txmapped++;
1051 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1052 	return (B_TRUE);
1053 
1054 err_alloc_desc:
1055 err_map:
1056 
1057 	hdlp = head;
1058 	while (hdlp != NULL) {
1059 		(void) ddi_dma_unbind_handle(hdlp->dmahdl);
1060 		hdlp = hdlp->next;
1061 	}
1062 
1063 	/*
1064 	 * add the reserved but bind failed one to the list to be returned
1065 	 */
1066 	if (dmah != NULL) {
1067 		if (tail == NULL)
1068 			head = tail = dmah;
1069 		else {
1070 			tail->next = dmah;
1071 			tail = dmah;
1072 		}
1073 		hdl_reserved++;
1074 	}
1075 
1076 	if (head != NULL)
1077 		unm_return_dma_handle(adapter, head, tail, hdl_reserved);
1078 
1079 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1080 	return (B_FALSE);
1081 }
1082 
1083 static boolean_t
1084 unm_nic_xmit_frame(unm_adapter *adapter, mblk_t *mp)
1085 {
1086 	pktinfo_t	pktinfo;
1087 	boolean_t	status = B_FALSE, send_mapped;
1088 
1089 	adapter->stats.xmitcalled++;
1090 
1091 	send_mapped = unm_get_pkt_info(mp, &pktinfo);
1092 
1093 	if (pktinfo.total_len <= adapter->tx_bcopy_threshold ||
1094 	    pktinfo.mblk_no >= MAX_COOKIES_PER_CMD)
1095 		send_mapped = B_FALSE;
1096 
1097 	if (send_mapped == B_TRUE)
1098 		status = unm_send_mapped(adapter, mp, &pktinfo);
1099 
1100 	if (status != B_TRUE) {
1101 		if (pktinfo.total_len <= adapter->maxmtu)
1102 			return (unm_send_copy(adapter, mp, &pktinfo));
1103 
1104 		/* message too large */
1105 		freemsg(mp);
1106 		adapter->stats.txdropped++;
1107 		status = B_TRUE;
1108 	}
1109 
1110 	return (status);
1111 }
1112 
1113 static int
1114 unm_nic_check_temp(struct unm_adapter_s *adapter)
1115 {
1116 	uint32_t temp, temp_state, temp_val;
1117 	int rv = 0;
1118 
1119 	if ((adapter->ahw.revision_id == NX_P3_A2) ||
1120 	    (adapter->ahw.revision_id == NX_P3_A0))
1121 		return (0);
1122 
1123 	temp = adapter->unm_nic_pci_read_normalize(adapter, CRB_TEMP_STATE);
1124 
1125 	temp_state = nx_get_temp_state(temp);
1126 	temp_val = nx_get_temp_val(temp);
1127 
1128 	if (temp_state == NX_TEMP_PANIC) {
1129 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds "
1130 		    "maximum allowed, device has been shut down\n",
1131 		    unm_nic_driver_name, temp_val);
1132 		rv = 1;
1133 	} else if (temp_state == NX_TEMP_WARN) {
1134 		if (adapter->temp == NX_TEMP_NORMAL) {
1135 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds"
1136 		    "operating range. Immediate action needed.\n",
1137 		    unm_nic_driver_name, temp_val);
1138 		}
1139 	} else {
1140 		if (adapter->temp == NX_TEMP_WARN) {
1141 			cmn_err(CE_WARN, "%s: Device temperature is now %d "
1142 			    "degrees C in normal range.\n",
1143 			    unm_nic_driver_name, temp_val);
1144 		}
1145 	}
1146 
1147 	adapter->temp = temp_state;
1148 	return (rv);
1149 }
1150 
1151 static void
1152 unm_watchdog(unsigned long v)
1153 {
1154 	unm_adapter *adapter = (unm_adapter *)v;
1155 
1156 	if ((adapter->portnum == 0) && unm_nic_check_temp(adapter)) {
1157 		/*
1158 		 * We return without turning on the netdev queue as there
1159 		 * was an overheated device
1160 		 */
1161 		return;
1162 	}
1163 
1164 	unm_nic_handle_phy_intr(adapter);
1165 
1166 	/*
1167 	 * This function schedules a call for itself.
1168 	 */
1169 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1170 	    (void *)adapter, 2 * drv_usectohz(1000000));
1171 
1172 }
1173 
1174 static void unm_nic_clear_stats(unm_adapter *adapter)
1175 {
1176 	(void) memset(&adapter->stats, 0, sizeof (adapter->stats));
1177 }
1178 
1179 static void
1180 unm_nic_poll(unm_adapter *adapter)
1181 {
1182 	int	work_done, tx_complete;
1183 
1184 	adapter->stats.polled++;
1185 
1186 loop:
1187 	tx_complete = unm_process_cmd_ring(adapter);
1188 	work_done = unm_process_rcv_ring(adapter, NX_RX_MAXBUFS);
1189 	if ((!tx_complete) || (!(work_done < NX_RX_MAXBUFS)))
1190 		goto loop;
1191 
1192 	UNM_READ_LOCK(&adapter->adapter_lock);
1193 	unm_nic_enable_int(adapter);
1194 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1195 }
1196 
1197 /* ARGSUSED */
1198 uint_t
1199 unm_intr(caddr_t data, caddr_t arg)
1200 {
1201 	unm_adapter	*adapter = (unm_adapter *)(uintptr_t)data;
1202 
1203 	if (unm_nic_clear_int(adapter))
1204 		return (DDI_INTR_UNCLAIMED);
1205 
1206 	unm_nic_poll(adapter);
1207 	return (DDI_INTR_CLAIMED);
1208 }
1209 
1210 /*
1211  * This is invoked from receive isr. Due to the single threaded nature
1212  * of the invocation, pool_lock acquisition is not neccesary to protect
1213  * pool_list.
1214  */
1215 static void
1216 unm_free_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc, unm_rx_buffer_t *rx_buffer)
1217 {
1218 	/* mutex_enter(rcv_desc->pool_lock); */
1219 	rx_buffer->next = rcv_desc->pool_list;
1220 	rcv_desc->pool_list = rx_buffer;
1221 	rcv_desc->rx_buf_free++;
1222 	/* mutex_exit(rcv_desc->pool_lock); */
1223 }
1224 
1225 /*
1226  * unm_process_rcv() send the received packet to the protocol stack.
1227  */
1228 static mblk_t *
1229 unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc)
1230 {
1231 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1232 	unm_rx_buffer_t		*rx_buffer;
1233 	mblk_t *mp;
1234 	u32			desc_ctx = desc->u1.s1.type;
1235 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
1236 	u32			pkt_length = desc->u1.s1.totalLength;
1237 	int			poff = desc->u1.s1.pkt_offset;
1238 	int			index, cksum_flags, docopy;
1239 	int			index_lo = desc->u1.s1.referenceHandle_lo;
1240 	char			*vaddr;
1241 
1242 	index = ((desc->u1.s1.referenceHandle_hi << 4) | index_lo);
1243 
1244 	rx_buffer = index2rxbuf(rcv_desc, index);
1245 
1246 	if (rx_buffer == NULL) {
1247 		cmn_err(CE_WARN, "\r\nNULL rx_buffer idx=%d", index);
1248 		return (NULL);
1249 	}
1250 	vaddr = (char *)rx_buffer->dma_info.vaddr;
1251 	if (vaddr == NULL) {
1252 		cmn_err(CE_WARN, "\r\nNULL vaddr");
1253 		return (NULL);
1254 	}
1255 	rcv_desc->rx_desc_handled++;
1256 	rcv_desc->rx_buf_card--;
1257 
1258 	(void) ddi_dma_sync(rx_buffer->dma_info.dma_hdl, 0,
1259 	    pkt_length + poff + (adapter->ahw.cut_through ? 0 :
1260 	    IP_ALIGNMENT_BYTES), DDI_DMA_SYNC_FORCPU);
1261 
1262 	/*
1263 	 * Copy packet into new allocated message buffer, if pkt_length
1264 	 * is below copy threshold.
1265 	 */
1266 	docopy = (pkt_length <= adapter->rx_bcopy_threshold) ? 1 : 0;
1267 
1268 	/*
1269 	 * If card is running out of rx buffers, then attempt to allocate
1270 	 * new mblk so we can feed this rx buffer back to card (we
1271 	 * _could_ look at what's pending on free and recycle lists).
1272 	 */
1273 	if (rcv_desc->rx_buf_card < NX_RX_THRESHOLD) {
1274 		docopy = 1;
1275 		adapter->stats.rxbufshort++;
1276 	}
1277 
1278 	if (docopy == 1) {
1279 		if ((mp = allocb(pkt_length + IP_ALIGNMENT_BYTES, 0)) == NULL) {
1280 			adapter->stats.allocbfailed++;
1281 			goto freebuf;
1282 		}
1283 
1284 		mp->b_rptr += IP_ALIGNMENT_BYTES;
1285 		vaddr += poff;
1286 		bcopy(vaddr, mp->b_rptr, pkt_length);
1287 		adapter->stats.rxcopyed++;
1288 		unm_free_rx_buffer(rcv_desc, rx_buffer);
1289 	} else {
1290 		mp = (mblk_t *)rx_buffer->mp;
1291 		if (mp == NULL) {
1292 			mp = desballoc(rx_buffer->dma_info.vaddr,
1293 			    rcv_desc->dma_size, 0, &rx_buffer->rx_recycle);
1294 			if (mp == NULL) {
1295 				adapter->stats.desballocfailed++;
1296 				goto freebuf;
1297 			}
1298 			rx_buffer->mp = mp;
1299 		}
1300 		mp->b_rptr += poff;
1301 		adapter->stats.rxmapped++;
1302 	}
1303 
1304 	mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + pkt_length);
1305 
1306 	if (desc->u1.s1.status == STATUS_CKSUM_OK) {
1307 		adapter->stats.csummed++;
1308 		cksum_flags =
1309 		    HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM | HCK_FULLCKSUM;
1310 	} else {
1311 		cksum_flags = 0;
1312 	}
1313 	(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, cksum_flags, 0);
1314 
1315 	adapter->stats.no_rcv++;
1316 	adapter->stats.rxbytes += pkt_length;
1317 	adapter->stats.uphappy++;
1318 
1319 	return (mp);
1320 
1321 freebuf:
1322 	unm_free_rx_buffer(rcv_desc, rx_buffer);
1323 	return (NULL);
1324 }
1325 
1326 /* Process Receive status ring */
1327 static int
1328 unm_process_rcv_ring(unm_adapter *adapter, int max)
1329 {
1330 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1331 	statusDesc_t		*desc_head = recv_ctx->rcvStatusDescHead;
1332 	statusDesc_t		*desc = NULL;
1333 	uint32_t		consumer, start;
1334 	int			count = 0, ring;
1335 	mblk_t *mp;
1336 
1337 	start = consumer = recv_ctx->statusRxConsumer;
1338 
1339 	unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start, max,
1340 	    adapter->MaxRxDescCount, sizeof (statusDesc_t),
1341 	    DDI_DMA_SYNC_FORCPU);
1342 
1343 	while (count < max) {
1344 		desc = &desc_head[consumer];
1345 		if (!(desc->u1.s1.owner & STATUS_OWNER_HOST))
1346 			break;
1347 
1348 		mp = unm_process_rcv(adapter, desc);
1349 		desc->u1.s1.owner = STATUS_OWNER_PHANTOM;
1350 
1351 		consumer = (consumer + 1) % adapter->MaxRxDescCount;
1352 		count++;
1353 		if (mp != NULL)
1354 			mac_rx(adapter->mach, NULL, mp);
1355 	}
1356 
1357 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1358 		if (recv_ctx->rcv_desc[ring].rx_desc_handled > 0)
1359 			unm_post_rx_buffers_nodb(adapter, ring);
1360 	}
1361 
1362 	if (count) {
1363 		unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start,
1364 		    count, adapter->MaxRxDescCount, sizeof (statusDesc_t),
1365 		    DDI_DMA_SYNC_FORDEV);
1366 
1367 		/* update the consumer index in phantom */
1368 		recv_ctx->statusRxConsumer = consumer;
1369 
1370 		UNM_READ_LOCK(&adapter->adapter_lock);
1371 		adapter->unm_nic_hw_write_wx(adapter,
1372 		    recv_ctx->host_sds_consumer, &consumer, 4);
1373 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1374 	}
1375 
1376 	return (count);
1377 }
1378 
1379 /* Process Command status ring */
1380 static int
1381 unm_process_cmd_ring(struct unm_adapter_s *adapter)
1382 {
1383 	u32			last_consumer;
1384 	u32			consumer;
1385 	int			count = 0;
1386 	struct unm_cmd_buffer	*buffer;
1387 	int			done;
1388 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL;
1389 	uint32_t	free_hdls = 0;
1390 
1391 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1392 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1393 
1394 	last_consumer = adapter->lastCmdConsumer;
1395 	consumer = *(adapter->cmdConsumer);
1396 
1397 	while (last_consumer != consumer) {
1398 		buffer = &adapter->cmd_buf_arr[last_consumer];
1399 		if (buffer->head != NULL) {
1400 			dmah = buffer->head;
1401 			while (dmah != NULL) {
1402 				(void) ddi_dma_unbind_handle(dmah->dmahdl);
1403 				dmah = dmah->next;
1404 				free_hdls++;
1405 			}
1406 
1407 			if (head == NULL) {
1408 				head = buffer->head;
1409 				tail = buffer->tail;
1410 			} else {
1411 				tail->next = buffer->head;
1412 				tail = buffer->tail;
1413 			}
1414 
1415 			buffer->head = NULL;
1416 			buffer->tail = NULL;
1417 
1418 			if (buffer->msg != NULL) {
1419 				freemsg(buffer->msg);
1420 				buffer->msg = NULL;
1421 			}
1422 		}
1423 
1424 		last_consumer = get_next_index(last_consumer,
1425 		    adapter->MaxTxDescCount);
1426 		if (++count > NX_MAX_TXCOMPS)
1427 			break;
1428 	}
1429 
1430 	if (count) {
1431 		int	doresched;
1432 
1433 		UNM_SPIN_LOCK(&adapter->tx_lock);
1434 		adapter->lastCmdConsumer = last_consumer;
1435 		adapter->freecmds += count;
1436 		membar_exit();
1437 
1438 		doresched = adapter->resched_needed;
1439 		if (doresched)
1440 			adapter->resched_needed = 0;
1441 
1442 		if (head != NULL)
1443 			unm_return_dma_handle(adapter, head, tail, free_hdls);
1444 
1445 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
1446 
1447 		if (doresched)
1448 			mac_tx_update(adapter->mach);
1449 	}
1450 
1451 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1452 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1453 
1454 	consumer = *(adapter->cmdConsumer);
1455 	done = (adapter->lastCmdConsumer == consumer);
1456 
1457 	return (done);
1458 }
1459 
1460 /*
1461  * This is invoked from receive isr, and at initialization time when no
1462  * rx buffers have been posted to card. Due to the single threaded nature
1463  * of the invocation, pool_lock acquisition is not neccesary to protect
1464  * pool_list.
1465  */
1466 static unm_rx_buffer_t *
1467 unm_reserve_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc)
1468 {
1469 	unm_rx_buffer_t *rx_buffer = NULL;
1470 
1471 	/* mutex_enter(rcv_desc->pool_lock); */
1472 	if (rcv_desc->rx_buf_free) {
1473 		rx_buffer = rcv_desc->pool_list;
1474 		rcv_desc->pool_list = rx_buffer->next;
1475 		rx_buffer->next = NULL;
1476 		rcv_desc->rx_buf_free--;
1477 	} else {
1478 		mutex_enter(rcv_desc->recycle_lock);
1479 
1480 		if (rcv_desc->rx_buf_recycle) {
1481 			rcv_desc->pool_list = rcv_desc->recycle_list;
1482 			rcv_desc->recycle_list = NULL;
1483 			rcv_desc->rx_buf_free += rcv_desc->rx_buf_recycle;
1484 			rcv_desc->rx_buf_recycle = 0;
1485 
1486 			rx_buffer = rcv_desc->pool_list;
1487 			rcv_desc->pool_list = rx_buffer->next;
1488 			rx_buffer->next = NULL;
1489 			rcv_desc->rx_buf_free--;
1490 		}
1491 
1492 		mutex_exit(rcv_desc->recycle_lock);
1493 	}
1494 
1495 	/* mutex_exit(rcv_desc->pool_lock); */
1496 	return (rx_buffer);
1497 }
1498 
1499 static void
1500 post_rx_doorbell(struct unm_adapter_s *adapter, uint32_t ringid, int count)
1501 {
1502 #define	UNM_RCV_PEG_DB_ID	2
1503 #define	UNM_RCV_PRODUCER_OFFSET	0
1504 	ctx_msg msg = {0};
1505 
1506 	/*
1507 	 * Write a doorbell msg to tell phanmon of change in
1508 	 * receive ring producer
1509 	 */
1510 	msg.PegId = UNM_RCV_PEG_DB_ID;
1511 	msg.privId = 1;
1512 	msg.Count = count;
1513 	msg.CtxId = adapter->portnum;
1514 	msg.Opcode = UNM_RCV_PRODUCER(ringid);
1515 	dbwritel(*((__uint32_t *)&msg),
1516 	    (void *)(DB_NORMALIZE(adapter, UNM_RCV_PRODUCER_OFFSET)));
1517 }
1518 
1519 static int
1520 unm_post_rx_buffers(struct unm_adapter_s *adapter, uint32_t ringid)
1521 {
1522 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1523 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1524 	unm_rx_buffer_t		*rx_buffer;
1525 	rcvDesc_t		*pdesc;
1526 	int			count;
1527 
1528 	for (count = 0; count < rcv_desc->MaxRxDescCount; count++) {
1529 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1530 		if (rx_buffer != NULL) {
1531 			pdesc = &rcv_desc->desc_head[count];
1532 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1533 			    rx_buffer);
1534 			pdesc->flags = ringid;
1535 			pdesc->bufferLength = rcv_desc->dma_size;
1536 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1537 		}
1538 		else
1539 			return (DDI_FAILURE);
1540 	}
1541 
1542 	rcv_desc->producer = count % rcv_desc->MaxRxDescCount;
1543 	count--;
1544 	unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle,
1545 	    0,		/* start */
1546 	    count,	/* count */
1547 	    count,	/* range */
1548 	    sizeof (rcvDesc_t),	/* unit_size */
1549 	    DDI_DMA_SYNC_FORDEV);	/* direction */
1550 
1551 	rcv_desc->rx_buf_card = rcv_desc->MaxRxDescCount;
1552 	UNM_READ_LOCK(&adapter->adapter_lock);
1553 	adapter->unm_nic_hw_write_wx(adapter, rcv_desc->host_rx_producer,
1554 	    &count, 4);
1555 	if (adapter->fw_major < 4)
1556 		post_rx_doorbell(adapter, ringid, count);
1557 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1558 
1559 	return (DDI_SUCCESS);
1560 }
1561 
1562 static void
1563 unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
1564     uint32_t ringid)
1565 {
1566 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1567 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1568 	struct unm_rx_buffer	*rx_buffer;
1569 	rcvDesc_t		*pdesc;
1570 	int 			count, producer = rcv_desc->producer;
1571 	int 			last_producer = producer;
1572 
1573 	for (count = 0; count < rcv_desc->rx_desc_handled; count++) {
1574 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1575 		if (rx_buffer != NULL) {
1576 			pdesc = &rcv_desc->desc_head[producer];
1577 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1578 			    rx_buffer);
1579 			pdesc->flags = ringid;
1580 			pdesc->bufferLength = rcv_desc->dma_size;
1581 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1582 		} else {
1583 			adapter->stats.outofrxbuf++;
1584 			break;
1585 		}
1586 		producer = get_next_index(producer, rcv_desc->MaxRxDescCount);
1587 	}
1588 
1589 	/* if we did allocate buffers, then write the count to Phantom */
1590 	if (count) {
1591 		/* Sync rx ring, considering case for wrap around */
1592 		unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle, last_producer,
1593 		    count, rcv_desc->MaxRxDescCount, sizeof (rcvDesc_t),
1594 		    DDI_DMA_SYNC_FORDEV);
1595 
1596 		rcv_desc->producer = producer;
1597 		rcv_desc->rx_desc_handled -= count;
1598 		rcv_desc->rx_buf_card += count;
1599 
1600 		producer = (producer - 1) % rcv_desc->MaxRxDescCount;
1601 		UNM_READ_LOCK(&adapter->adapter_lock);
1602 		adapter->unm_nic_hw_write_wx(adapter,
1603 		    rcv_desc->host_rx_producer, &producer, 4);
1604 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1605 	}
1606 }
1607 
1608 int
1609 unm_nic_fill_statistics_128M(struct unm_adapter_s *adapter,
1610 			    struct unm_statistics *unm_stats)
1611 {
1612 	void *addr;
1613 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1614 		UNM_WRITE_LOCK(&adapter->adapter_lock);
1615 		unm_nic_pci_change_crbwindow_128M(adapter, 0);
1616 
1617 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1618 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_BYTE_CNT,
1619 		    &(unm_stats->tx_bytes));
1620 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1621 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_FRAME_CNT,
1622 		    &(unm_stats->tx_packets));
1623 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1624 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_BYTE_CNT,
1625 		    &(unm_stats->rx_bytes));
1626 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1627 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_FRAME_CNT,
1628 		    &(unm_stats->rx_packets));
1629 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1630 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_AGGR_ERROR_CNT,
1631 		    &(unm_stats->rx_errors));
1632 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1633 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_CRC_ERROR_CNT,
1634 		    &(unm_stats->rx_CRC_errors));
1635 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1636 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1637 		    &(unm_stats->rx_long_length_error));
1638 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1639 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1640 		    &(unm_stats->rx_short_length_error));
1641 
1642 		/*
1643 		 * For reading rx_MAC_error bit different procedure
1644 		 * UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_TEST_MUX_CTL, 0x15);
1645 		 * UNM_NIC_LOCKED_READ_REG((UNM_CRB_NIU + 0xC0), &temp);
1646 		 * unm_stats->rx_MAC_errors = temp & 0xff;
1647 		 */
1648 
1649 		unm_nic_pci_change_crbwindow_128M(adapter, 1);
1650 		UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1651 	} else {
1652 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1653 		unm_stats->tx_bytes = adapter->stats.txbytes;
1654 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1655 		    adapter->stats.xmitfinished;
1656 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1657 		unm_stats->rx_packets = adapter->stats.no_rcv;
1658 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1659 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1660 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1661 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1662 		unm_stats->rx_CRC_errors = 0;
1663 		unm_stats->rx_MAC_errors = 0;
1664 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1665 	}
1666 	return (0);
1667 }
1668 
1669 int
1670 unm_nic_fill_statistics_2M(struct unm_adapter_s *adapter,
1671     struct unm_statistics *unm_stats)
1672 {
1673 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1674 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1675 		    &(unm_stats->tx_bytes), 4);
1676 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1677 		    &(unm_stats->tx_packets), 4);
1678 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1679 		    &(unm_stats->rx_bytes), 4);
1680 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1681 		    &(unm_stats->rx_packets), 4);
1682 		(void) unm_nic_hw_read_wx_2M(adapter,
1683 		    UNM_NIU_XGE_AGGR_ERROR_CNT, &(unm_stats->rx_errors), 4);
1684 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1685 		    &(unm_stats->rx_CRC_errors), 4);
1686 		(void) unm_nic_hw_read_wx_2M(adapter,
1687 		    UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1688 		    &(unm_stats->rx_long_length_error), 4);
1689 		(void) unm_nic_hw_read_wx_2M(adapter,
1690 		    UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1691 		    &(unm_stats->rx_short_length_error), 4);
1692 	} else {
1693 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1694 		unm_stats->tx_bytes = adapter->stats.txbytes;
1695 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1696 		    adapter->stats.xmitfinished;
1697 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1698 		unm_stats->rx_packets = adapter->stats.no_rcv;
1699 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1700 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1701 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1702 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1703 		unm_stats->rx_CRC_errors = 0;
1704 		unm_stats->rx_MAC_errors = 0;
1705 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1706 	}
1707 	return (0);
1708 }
1709 
1710 int
1711 unm_nic_clear_statistics_128M(struct unm_adapter_s *adapter)
1712 {
1713 	void *addr;
1714 	int data = 0;
1715 
1716 	UNM_WRITE_LOCK(&adapter->adapter_lock);
1717 	unm_nic_pci_change_crbwindow_128M(adapter, 0);
1718 
1719 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1720 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_BYTE_CNT, &data);
1721 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1722 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_FRAME_CNT, &data);
1723 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1724 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_BYTE_CNT, &data);
1725 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1726 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_FRAME_CNT, &data);
1727 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1728 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_AGGR_ERROR_CNT, &data);
1729 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1730 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_CRC_ERROR_CNT, &data);
1731 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1732 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR, &data);
1733 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1734 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR, &data);
1735 
1736 	unm_nic_pci_change_crbwindow_128M(adapter, 1);
1737 	UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1738 	unm_nic_clear_stats(adapter);
1739 	return (0);
1740 }
1741 
1742 int
1743 unm_nic_clear_statistics_2M(struct unm_adapter_s *adapter)
1744 {
1745 	int data = 0;
1746 
1747 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1748 	    &data, 4);
1749 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1750 	    &data, 4);
1751 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1752 	    &data, 4);
1753 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1754 	    &data, 4);
1755 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_AGGR_ERROR_CNT,
1756 	    &data, 4);
1757 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1758 	    &data, 4);
1759 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1760 	    &data, 4);
1761 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1762 	    &data, 4);
1763 	unm_nic_clear_stats(adapter);
1764 	return (0);
1765 }
1766 
1767 /*
1768  * unm_nic_ioctl ()    We provide the tcl/phanmon support
1769  * through these ioctls.
1770  */
1771 static void
1772 unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q, mblk_t *mp)
1773 {
1774 	void *ptr;
1775 
1776 	switch (cmd) {
1777 	case UNM_NIC_CMD:
1778 		(void) unm_nic_do_ioctl(adapter, q, mp);
1779 		break;
1780 
1781 	case UNM_NIC_NAME:
1782 		ptr = (void *) mp->b_cont->b_rptr;
1783 
1784 		/*
1785 		 * Phanmon checks for "UNM-UNM" string
1786 		 * Replace the hardcoded value with appropriate macro
1787 		 */
1788 		DPRINTF(-1, (CE_CONT, "UNM_NIC_NAME ioctl executed %d %d\n",
1789 		    cmd, __LINE__));
1790 		(void) memcpy(ptr, "UNM-UNM", 10);
1791 		miocack(q, mp, 10, 0);
1792 		break;
1793 
1794 	default:
1795 		cmn_err(CE_WARN, "Netxen ioctl cmd %x not supported\n", cmd);
1796 
1797 		miocnak(q, mp, 0, EINVAL);
1798 		break;
1799 	}
1800 }
1801 
1802 int
1803 unm_nic_resume(unm_adapter *adapter)
1804 {
1805 
1806 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1807 	    (void *) adapter, 50000);
1808 
1809 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1810 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
1811 	else
1812 		(void) ddi_intr_enable(adapter->intr_handle);
1813 	UNM_READ_LOCK(&adapter->adapter_lock);
1814 	unm_nic_enable_int(adapter);
1815 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1816 
1817 	mac_link_update(adapter->mach, LINK_STATE_UP);
1818 
1819 	return (DDI_SUCCESS);
1820 }
1821 
1822 int
1823 unm_nic_suspend(unm_adapter *adapter)
1824 {
1825 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
1826 
1827 	(void) untimeout(adapter->watchdog_timer);
1828 
1829 	UNM_READ_LOCK(&adapter->adapter_lock);
1830 	unm_nic_disable_int(adapter);
1831 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1832 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1833 		(void) ddi_intr_block_disable(&adapter->intr_handle, 1);
1834 	else
1835 		(void) ddi_intr_disable(adapter->intr_handle);
1836 
1837 	return (DDI_SUCCESS);
1838 }
1839 
1840 static int
1841 unm_nic_do_ioctl(unm_adapter *adapter, queue_t *wq, mblk_t *mp)
1842 {
1843 	unm_nic_ioctl_data_t		data;
1844 	struct unm_nic_ioctl_data	*up_data;
1845 	ddi_acc_handle_t		conf_handle;
1846 	int				retval = 0;
1847 	uint64_t			efuse_chip_id = 0;
1848 	char				*ptr1;
1849 	short				*ptr2;
1850 	int				*ptr4;
1851 
1852 	up_data = (struct unm_nic_ioctl_data *)(mp->b_cont->b_rptr);
1853 	(void) memcpy(&data, (void **)(uintptr_t)(mp->b_cont->b_rptr),
1854 	    sizeof (data));
1855 
1856 	/* Shouldn't access beyond legal limits of  "char u[64];" member */
1857 	if (data.size > sizeof (data.uabc)) {
1858 		/* evil user tried to crash the kernel */
1859 		cmn_err(CE_WARN, "bad size: %d\n", data.size);
1860 		retval = GLD_BADARG;
1861 		goto error_out;
1862 	}
1863 
1864 	switch (data.cmd) {
1865 	case unm_nic_cmd_pci_read:
1866 
1867 		if ((retval = adapter->unm_nic_hw_read_ioctl(adapter,
1868 		    data.off, up_data, data.size))) {
1869 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_read_wx "
1870 		    "returned %d\n", __FUNCTION__, __LINE__, retval));
1871 
1872 			retval = data.rv;
1873 			goto error_out;
1874 		}
1875 
1876 		data.rv = 0;
1877 		break;
1878 
1879 	case unm_nic_cmd_pci_write:
1880 		if ((data.rv = adapter->unm_nic_hw_write_ioctl(adapter,
1881 		    data.off, &(data.uabc), data.size))) {
1882 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_write_wx "
1883 			    "returned %d\n", __FUNCTION__,
1884 			    __LINE__, data.rv));
1885 			retval = data.rv;
1886 			goto error_out;
1887 		}
1888 		data.size = 0;
1889 		break;
1890 
1891 	case unm_nic_cmd_pci_mem_read:
1892 		if ((data.rv = adapter->unm_nic_pci_mem_read(adapter,
1893 		    data.off, up_data, data.size))) {
1894 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_pci_mem_read "
1895 			    "returned %d\n", __FUNCTION__,
1896 			    __LINE__, data.rv));
1897 			retval = data.rv;
1898 			goto error_out;
1899 		}
1900 		data.rv = 0;
1901 		break;
1902 
1903 	case unm_nic_cmd_pci_mem_write:
1904 		if ((data.rv = adapter->unm_nic_pci_mem_write(adapter,
1905 		    data.off, &(data.uabc), data.size))) {
1906 			DPRINTF(-1, (CE_WARN,
1907 			    "%s(%d) unm_nic_cmd_pci_mem_write "
1908 			    "returned %d\n",
1909 			    __FUNCTION__, __LINE__, data.rv));
1910 			retval = data.rv;
1911 			goto error_out;
1912 		}
1913 
1914 		data.size = 0;
1915 		data.rv = 0;
1916 		break;
1917 
1918 	case unm_nic_cmd_pci_config_read:
1919 
1920 		if (adapter->pci_cfg_handle != NULL) {
1921 			conf_handle = adapter->pci_cfg_handle;
1922 
1923 		} else if ((retval = pci_config_setup(adapter->dip,
1924 		    &conf_handle)) != DDI_SUCCESS) {
1925 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1926 			    " error:%d\n", unm_nic_driver_name, retval));
1927 			goto error_out;
1928 
1929 		} else
1930 			adapter->pci_cfg_handle = conf_handle;
1931 
1932 		switch (data.size) {
1933 		case 1:
1934 			ptr1 = (char *)up_data;
1935 			*ptr1 = (char)pci_config_get8(conf_handle, data.off);
1936 			break;
1937 		case 2:
1938 			ptr2 = (short *)up_data;
1939 			*ptr2 = (short)pci_config_get16(conf_handle, data.off);
1940 			break;
1941 		case 4:
1942 			ptr4 = (int *)up_data;
1943 			*ptr4 = (int)pci_config_get32(conf_handle, data.off);
1944 			break;
1945 		}
1946 
1947 		break;
1948 
1949 	case unm_nic_cmd_pci_config_write:
1950 
1951 		if (adapter->pci_cfg_handle != NULL) {
1952 			conf_handle = adapter->pci_cfg_handle;
1953 		} else if ((retval = pci_config_setup(adapter->dip,
1954 		    &conf_handle)) != DDI_SUCCESS) {
1955 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1956 			    " error:%d\n", unm_nic_driver_name, retval));
1957 			goto error_out;
1958 		} else {
1959 			adapter->pci_cfg_handle = conf_handle;
1960 		}
1961 
1962 		switch (data.size) {
1963 		case 1:
1964 			pci_config_put8(conf_handle,
1965 			    data.off, *(char *)&(data.uabc));
1966 			break;
1967 		case 2:
1968 			pci_config_put16(conf_handle,
1969 			    data.off, *(short *)(uintptr_t)&(data.uabc));
1970 			break;
1971 		case 4:
1972 			pci_config_put32(conf_handle,
1973 			    data.off, *(u32 *)(uintptr_t)&(data.uabc));
1974 			break;
1975 		}
1976 		data.size = 0;
1977 		break;
1978 
1979 	case unm_nic_cmd_get_stats:
1980 		data.rv = adapter->unm_nic_fill_statistics(adapter,
1981 		    (struct unm_statistics *)up_data);
1982 		data.size = sizeof (struct unm_statistics);
1983 
1984 		break;
1985 
1986 	case unm_nic_cmd_clear_stats:
1987 		data.rv = adapter->unm_nic_clear_statistics(adapter);
1988 		break;
1989 
1990 	case unm_nic_cmd_get_version:
1991 		(void) memcpy(up_data, UNM_NIC_VERSIONID,
1992 		    sizeof (UNM_NIC_VERSIONID));
1993 		data.size = sizeof (UNM_NIC_VERSIONID);
1994 
1995 		break;
1996 
1997 	case unm_nic_cmd_get_phy_type:
1998 		cmn_err(CE_WARN, "unm_nic_cmd_get_phy_type unimplemented\n");
1999 		break;
2000 
2001 	case unm_nic_cmd_efuse_chip_id:
2002 		efuse_chip_id = adapter->unm_nic_pci_read_normalize(adapter,
2003 		    UNM_EFUSE_CHIP_ID_HIGH);
2004 		efuse_chip_id <<= 32;
2005 		efuse_chip_id |= adapter->unm_nic_pci_read_normalize(adapter,
2006 		    UNM_EFUSE_CHIP_ID_LOW);
2007 		(void) memcpy(up_data, &efuse_chip_id, sizeof (uint64_t));
2008 		data.rv = 0;
2009 		break;
2010 
2011 	default:
2012 		cmn_err(CE_WARN, "%s%d: bad command %d\n", adapter->name,
2013 		    adapter->instance, data.cmd);
2014 		data.rv = GLD_NOTSUPPORTED;
2015 		data.size = 0;
2016 		goto error_out;
2017 	}
2018 
2019 work_done:
2020 	miocack(wq, mp, data.size, data.rv);
2021 	return (DDI_SUCCESS);
2022 
2023 error_out:
2024 	cmn_err(CE_WARN, "%s(%d) ioctl error\n", __FUNCTION__, data.cmd);
2025 	miocnak(wq, mp, 0, EINVAL);
2026 	return (retval);
2027 }
2028 
2029 /*
2030  * Local datatype for defining tables of (Offset, Name) pairs
2031  */
2032 typedef struct {
2033 	offset_t	index;
2034 	char		*name;
2035 } unm_ksindex_t;
2036 
2037 static const unm_ksindex_t unm_kstat[] = {
2038 	{ 0,		"freehdls"		},
2039 	{ 1,		"freecmds"		},
2040 	{ 2,		"tx_bcopy_threshold"	},
2041 	{ 3,		"rx_bcopy_threshold"	},
2042 	{ 4,		"xmitcalled"		},
2043 	{ 5,		"xmitedframes"		},
2044 	{ 6,		"xmitfinished"		},
2045 	{ 7,		"txbytes"		},
2046 	{ 8,		"txcopyed"		},
2047 	{ 9,		"txmapped"		},
2048 	{ 10,		"outoftxdmahdl"		},
2049 	{ 11,		"outofcmddesc"		},
2050 	{ 12,		"txdropped"		},
2051 	{ 13,		"polled"		},
2052 	{ 14,		"uphappy"		},
2053 	{ 15,		"updropped"		},
2054 	{ 16,		"csummed"		},
2055 	{ 17,		"no_rcv"		},
2056 	{ 18,		"rxbytes"		},
2057 	{ 19,		"rxcopyed"		},
2058 	{ 20,		"rxmapped"		},
2059 	{ 21,		"desballocfailed"	},
2060 	{ 22,		"outofrxbuf"		},
2061 	{ 23,		"promiscmode"		},
2062 	{ 24,		"rxbufshort"		},
2063 	{ 25,		"allocbfailed"		},
2064 	{ -1,		NULL			}
2065 };
2066 
2067 static int
2068 unm_kstat_update(kstat_t *ksp, int flag)
2069 {
2070 	unm_adapter *adapter;
2071 	kstat_named_t *knp;
2072 
2073 	if (flag != KSTAT_READ)
2074 		return (EACCES);
2075 
2076 	adapter = ksp->ks_private;
2077 	knp = ksp->ks_data;
2078 
2079 	(knp++)->value.ui32 = adapter->freehdls;
2080 	(knp++)->value.ui64 = adapter->freecmds;
2081 	(knp++)->value.ui64 = adapter->tx_bcopy_threshold;
2082 	(knp++)->value.ui64 = adapter->rx_bcopy_threshold;
2083 
2084 	(knp++)->value.ui64 = adapter->stats.xmitcalled;
2085 	(knp++)->value.ui64 = adapter->stats.xmitedframes;
2086 	(knp++)->value.ui64 = adapter->stats.xmitfinished;
2087 	(knp++)->value.ui64 = adapter->stats.txbytes;
2088 	(knp++)->value.ui64 = adapter->stats.txcopyed;
2089 	(knp++)->value.ui64 = adapter->stats.txmapped;
2090 	(knp++)->value.ui64 = adapter->stats.outoftxdmahdl;
2091 	(knp++)->value.ui64 = adapter->stats.outofcmddesc;
2092 	(knp++)->value.ui64 = adapter->stats.txdropped;
2093 	(knp++)->value.ui64 = adapter->stats.polled;
2094 	(knp++)->value.ui64 = adapter->stats.uphappy;
2095 	(knp++)->value.ui64 = adapter->stats.updropped;
2096 	(knp++)->value.ui64 = adapter->stats.csummed;
2097 	(knp++)->value.ui64 = adapter->stats.no_rcv;
2098 	(knp++)->value.ui64 = adapter->stats.rxbytes;
2099 	(knp++)->value.ui64 = adapter->stats.rxcopyed;
2100 	(knp++)->value.ui64 = adapter->stats.rxmapped;
2101 	(knp++)->value.ui64 = adapter->stats.desballocfailed;
2102 	(knp++)->value.ui64 = adapter->stats.outofrxbuf;
2103 	(knp++)->value.ui64 = adapter->stats.promiscmode;
2104 	(knp++)->value.ui64 = adapter->stats.rxbufshort;
2105 	(knp++)->value.ui64 = adapter->stats.allocbfailed;
2106 
2107 	return (0);
2108 }
2109 
2110 static kstat_t *
2111 unm_setup_named_kstat(unm_adapter *adapter, int instance, char *name,
2112 	const unm_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
2113 {
2114 	kstat_t *ksp;
2115 	kstat_named_t *knp;
2116 	char *np;
2117 	int type;
2118 	int count = 0;
2119 
2120 	size /= sizeof (unm_ksindex_t);
2121 	ksp = kstat_create(unm_nic_driver_name, instance, name, "net",
2122 	    KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT);
2123 	if (ksp == NULL)
2124 		return (NULL);
2125 
2126 	ksp->ks_private = adapter;
2127 	ksp->ks_update = update;
2128 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
2129 		count++;
2130 		switch (*np) {
2131 		default:
2132 			type = KSTAT_DATA_UINT64;
2133 			break;
2134 		case '%':
2135 			np += 1;
2136 			type = KSTAT_DATA_UINT32;
2137 			break;
2138 		case '$':
2139 			np += 1;
2140 			type = KSTAT_DATA_STRING;
2141 			break;
2142 		case '&':
2143 			np += 1;
2144 			type = KSTAT_DATA_CHAR;
2145 			break;
2146 		}
2147 		kstat_named_init(knp, np, type);
2148 	}
2149 	kstat_install(ksp);
2150 
2151 	return (ksp);
2152 }
2153 
2154 void
2155 unm_init_kstats(unm_adapter* adapter, int instance)
2156 {
2157 	adapter->kstats[0] = unm_setup_named_kstat(adapter,
2158 	    instance, "kstatinfo", unm_kstat,
2159 	    sizeof (unm_kstat), unm_kstat_update);
2160 }
2161 
2162 void
2163 unm_fini_kstats(unm_adapter* adapter)
2164 {
2165 
2166 	if (adapter->kstats[0] != NULL) {
2167 			kstat_delete(adapter->kstats[0]);
2168 			adapter->kstats[0] = NULL;
2169 		}
2170 }
2171 
2172 static int
2173 unm_nic_set_pauseparam(unm_adapter *adapter, unm_pauseparam_t *pause)
2174 {
2175 	int ret = 0;
2176 
2177 	if (adapter->ahw.board_type == UNM_NIC_GBE) {
2178 		if (unm_niu_gbe_set_rx_flow_ctl(adapter, pause->rx_pause))
2179 			ret = -EIO;
2180 
2181 		if (unm_niu_gbe_set_tx_flow_ctl(adapter, pause->tx_pause))
2182 			ret = -EIO;
2183 
2184 	} else if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2185 		if (unm_niu_xg_set_tx_flow_ctl(adapter, pause->tx_pause))
2186 			ret =  -EIO;
2187 	} else
2188 		ret = -EIO;
2189 
2190 	return (ret);
2191 }
2192 
2193 /*
2194  * GLD/MAC interfaces
2195  */
2196 static int
2197 ntxn_m_start(void *arg)
2198 {
2199 	unm_adapter	*adapter = arg;
2200 	int		ring;
2201 
2202 	UNM_SPIN_LOCK(&adapter->lock);
2203 	if (adapter->is_up == UNM_ADAPTER_UP_MAGIC) {
2204 		UNM_SPIN_UNLOCK(&adapter->lock);
2205 		return (DDI_SUCCESS);
2206 	}
2207 
2208 	if (create_rxtx_rings(adapter) != DDI_SUCCESS) {
2209 		UNM_SPIN_UNLOCK(&adapter->lock);
2210 		return (DDI_FAILURE);
2211 	}
2212 
2213 	if (init_firmware(adapter) != DDI_SUCCESS) {
2214 		UNM_SPIN_UNLOCK(&adapter->lock);
2215 		cmn_err(CE_WARN, "%s%d: Failed to init firmware\n",
2216 		    adapter->name, adapter->instance);
2217 		goto dest_rings;
2218 	}
2219 
2220 	unm_nic_clear_stats(adapter);
2221 
2222 	if (unm_nic_hw_resources(adapter) != 0) {
2223 		UNM_SPIN_UNLOCK(&adapter->lock);
2224 		cmn_err(CE_WARN, "%s%d: Error setting hw resources\n",
2225 		    adapter->name, adapter->instance);
2226 		goto dest_rings;
2227 	}
2228 
2229 	if (adapter->fw_major < 4) {
2230 		adapter->crb_addr_cmd_producer =
2231 		    crb_cmd_producer[adapter->portnum];
2232 		adapter->crb_addr_cmd_consumer =
2233 		    crb_cmd_consumer[adapter->portnum];
2234 		unm_nic_update_cmd_producer(adapter, 0);
2235 		unm_nic_update_cmd_consumer(adapter, 0);
2236 	}
2237 
2238 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
2239 		if (unm_post_rx_buffers(adapter, ring) != DDI_SUCCESS) {
2240 			UNM_SPIN_UNLOCK(&adapter->lock);
2241 			goto free_hw_res;
2242 		}
2243 	}
2244 
2245 	if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) {
2246 		UNM_SPIN_UNLOCK(&adapter->lock);
2247 		cmn_err(CE_WARN, "%s%d: Could not set mac address\n",
2248 		    adapter->name, adapter->instance);
2249 		goto free_hw_res;
2250 	}
2251 
2252 	if (unm_nic_init_port(adapter) != 0) {
2253 		UNM_SPIN_UNLOCK(&adapter->lock);
2254 		cmn_err(CE_WARN, "%s%d: Could not initialize port\n",
2255 		    adapter->name, adapter->instance);
2256 		goto free_hw_res;
2257 	}
2258 
2259 	unm_nic_set_link_parameters(adapter);
2260 
2261 	/*
2262 	 * P2 and P3 should be handled similarly.
2263 	 */
2264 	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
2265 		if (unm_nic_set_promisc_mode(adapter) != 0) {
2266 			UNM_SPIN_UNLOCK(&adapter->lock);
2267 			cmn_err(CE_WARN, "%s%d: Could not set promisc mode\n",
2268 			    adapter->name, adapter->instance);
2269 			goto stop_and_free;
2270 		}
2271 	} else {
2272 		nx_p3_nic_set_multi(adapter);
2273 	}
2274 	adapter->stats.promiscmode = 1;
2275 
2276 	if (unm_nic_set_mtu(adapter, adapter->mtu) != 0) {
2277 		UNM_SPIN_UNLOCK(&adapter->lock);
2278 		cmn_err(CE_WARN, "%s%d: Could not set mtu\n",
2279 		    adapter->name, adapter->instance);
2280 		goto stop_and_free;
2281 	}
2282 
2283 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
2284 	    (void *)adapter, 0);
2285 
2286 	adapter->is_up = UNM_ADAPTER_UP_MAGIC;
2287 
2288 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
2289 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
2290 	else
2291 		(void) ddi_intr_enable(adapter->intr_handle);
2292 	unm_nic_enable_int(adapter);
2293 
2294 	UNM_SPIN_UNLOCK(&adapter->lock);
2295 	return (GLD_SUCCESS);
2296 
2297 stop_and_free:
2298 	unm_nic_stop_port(adapter);
2299 free_hw_res:
2300 	unm_free_hw_resources(adapter);
2301 dest_rings:
2302 	destroy_rxtx_rings(adapter);
2303 	return (DDI_FAILURE);
2304 }
2305 
2306 
2307 /*
2308  * This code is kept here for reference so as to
2309  * see if something different is required to be done
2310  * in GLDV3. This will be deleted later.
2311  */
2312 /* ARGSUSED */
2313 static void
2314 ntxn_m_stop(void *arg)
2315 {
2316 }
2317 
2318 /*ARGSUSED*/
2319 static int
2320 ntxn_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
2321 {
2322 	/*
2323 	 * When we correctly implement this, invoke nx_p3_nic_set_multi()
2324 	 * or nx_p2_nic_set_multi() here.
2325 	 */
2326 	return (GLD_SUCCESS);
2327 }
2328 
2329 /*ARGSUSED*/
2330 static int
2331 ntxn_m_promisc(void *arg, boolean_t on)
2332 {
2333 #if 0
2334 	int err = 0;
2335 	struct unm_adapter_s *adapter = arg;
2336 
2337 	err = on ? unm_nic_set_promisc_mode(adapter) :
2338 	    unm_nic_unset_promisc_mode(adapter);
2339 
2340 	if (err)
2341 		return (GLD_FAILURE);
2342 #endif
2343 
2344 	return (GLD_SUCCESS);
2345 }
2346 
2347 static int
2348 ntxn_m_stat(void *arg, uint_t stat, uint64_t *val)
2349 {
2350 	struct unm_adapter_s		*adapter = arg;
2351 	struct unm_adapter_stats	*portstat = &adapter->stats;
2352 
2353 	switch (stat) {
2354 	case MAC_STAT_IFSPEED:
2355 		if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2356 			/* 10 Gigs */
2357 			*val = 10000000000ULL;
2358 		} else {
2359 			/* 1 Gig */
2360 			*val = 1000000000;
2361 		}
2362 		break;
2363 
2364 	case MAC_STAT_MULTIRCV:
2365 		*val = 0;
2366 		break;
2367 
2368 	case MAC_STAT_BRDCSTRCV:
2369 	case MAC_STAT_BRDCSTXMT:
2370 		*val = 0;
2371 		break;
2372 
2373 	case MAC_STAT_NORCVBUF:
2374 		*val = portstat->updropped;
2375 		break;
2376 
2377 	case MAC_STAT_NOXMTBUF:
2378 		*val = portstat->txdropped;
2379 		break;
2380 
2381 	case MAC_STAT_RBYTES:
2382 		*val = portstat->rxbytes;
2383 		break;
2384 
2385 	case MAC_STAT_OBYTES:
2386 		*val = portstat->txbytes;
2387 		break;
2388 
2389 	case MAC_STAT_OPACKETS:
2390 		*val = portstat->xmitedframes;
2391 		break;
2392 
2393 	case MAC_STAT_IPACKETS:
2394 		*val = portstat->uphappy;
2395 		break;
2396 
2397 	case MAC_STAT_OERRORS:
2398 		*val = portstat->xmitcalled - portstat->xmitedframes;
2399 		break;
2400 
2401 	case ETHER_STAT_LINK_DUPLEX:
2402 		*val = LINK_DUPLEX_FULL;
2403 		break;
2404 
2405 	default:
2406 		/*
2407 		 * Shouldn't reach here...
2408 		 */
2409 		*val = 0;
2410 		DPRINTF(0, (CE_WARN, ": unrecognized parameter = %d, value "
2411 		    "returned 1\n", stat));
2412 
2413 	}
2414 
2415 	return (0);
2416 }
2417 
2418 static int
2419 ntxn_m_unicst(void *arg, const uint8_t *mac)
2420 {
2421 	struct unm_adapter_s *adapter = arg;
2422 
2423 	DPRINTF(-1, (CE_CONT, "%s: called\n", __func__));
2424 
2425 	if (unm_nic_macaddr_set(adapter, (uint8_t *)mac))
2426 		return (EAGAIN);
2427 	bcopy(mac, adapter->mac_addr, ETHERADDRL);
2428 
2429 	return (0);
2430 }
2431 
2432 static mblk_t *
2433 ntxn_m_tx(void *arg, mblk_t *mp)
2434 {
2435 	unm_adapter *adapter = arg;
2436 	mblk_t *next;
2437 
2438 	while (mp != NULL) {
2439 		next = mp->b_next;
2440 		mp->b_next = NULL;
2441 
2442 		if (unm_nic_xmit_frame(adapter, mp) != B_TRUE) {
2443 			mp->b_next = next;
2444 			break;
2445 		}
2446 		mp = next;
2447 		adapter->stats.xmitedframes++;
2448 	}
2449 
2450 	return (mp);
2451 }
2452 
2453 static void
2454 ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2455 {
2456 	int		cmd;
2457 	struct iocblk   *iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
2458 	struct unm_adapter_s *adapter = (struct unm_adapter_s *)arg;
2459 	enum ioc_reply status = IOC_DONE;
2460 
2461 	iocp->ioc_error = 0;
2462 	cmd = iocp->ioc_cmd;
2463 
2464 	if (cmd == ND_GET || cmd == ND_SET) {
2465 		status = unm_nd_ioctl(adapter, wq, mp, iocp);
2466 		switch (status) {
2467 		default:
2468 		case IOC_INVAL:
2469 			miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2470 			    EINVAL : iocp->ioc_error);
2471 			break;
2472 
2473 		case IOC_DONE:
2474 			break;
2475 
2476 		case IOC_RESTART_ACK:
2477 		case IOC_ACK:
2478 			miocack(wq, mp, 0, 0);
2479 			break;
2480 
2481 		case IOC_RESTART_REPLY:
2482 		case IOC_REPLY:
2483 			mp->b_datap->db_type = iocp->ioc_error == 0 ?
2484 			    M_IOCACK : M_IOCNAK;
2485 			qreply(wq, mp);
2486 			break;
2487 		}
2488 	} else if (cmd <= UNM_NIC_NAME && cmd >= UNM_CMD_START) {
2489 		unm_nic_ioctl(adapter, cmd, wq, mp);
2490 		return;
2491 	} else {
2492 		miocnak(wq, mp, 0, EINVAL);
2493 		return;
2494 	}
2495 }
2496 
2497 /* ARGSUSED */
2498 static boolean_t
2499 ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2500 {
2501 	switch (cap) {
2502 	case MAC_CAPAB_HCKSUM:
2503 		{
2504 			uint32_t *txflags = cap_data;
2505 
2506 			*txflags = (HCKSUM_ENABLE |
2507 			    HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM);
2508 		}
2509 		break;
2510 
2511 #ifdef SOLARIS11
2512 	case MAC_CAPAB_ANCHOR_VNIC:
2513 	case MAC_CAPAB_MULTIFACTADDR:
2514 #else
2515 	case MAC_CAPAB_POLL:
2516 	case MAC_CAPAB_MULTIADDRESS:
2517 #endif
2518 	default:
2519 		return (B_FALSE);
2520 	}
2521 
2522 	return (B_TRUE);
2523 }
2524 
2525 #define	NETXEN_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
2526 
2527 static mac_callbacks_t ntxn_m_callbacks = {
2528 	NETXEN_M_CALLBACK_FLAGS,
2529 	ntxn_m_stat,
2530 	ntxn_m_start,
2531 	ntxn_m_stop,
2532 	ntxn_m_promisc,
2533 	ntxn_m_multicst,
2534 	ntxn_m_unicst,
2535 	ntxn_m_tx,
2536 #ifndef SOLARIS11
2537 	NULL,			/* mc_resources */
2538 #endif
2539 	ntxn_m_ioctl,
2540 	ntxn_m_getcapab,
2541 	NULL,			/* mc_open */
2542 	NULL,			/* mc_close */
2543 	NULL,			/* mc_setprop */
2544 	NULL			/* mc_getprop */
2545 };
2546 
2547 int
2548 unm_register_mac(unm_adapter *adapter)
2549 {
2550 	int ret;
2551 	mac_register_t *macp;
2552 	unm_pauseparam_t pause;
2553 
2554 	dev_info_t *dip = adapter->dip;
2555 
2556 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2557 		cmn_err(CE_WARN, "Memory not available\n");
2558 		return (DDI_FAILURE);
2559 	}
2560 
2561 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2562 	macp->m_driver = adapter;
2563 	macp->m_dip = dip;
2564 	macp->m_instance = adapter->instance;
2565 	macp->m_src_addr = adapter->mac_addr;
2566 	macp->m_callbacks = &ntxn_m_callbacks;
2567 	macp->m_min_sdu = 0;
2568 	macp->m_max_sdu = adapter->mtu;
2569 #ifdef SOLARIS11
2570 	macp->m_margin = VLAN_TAGSZ;
2571 #endif /* SOLARIS11 */
2572 
2573 	ret = mac_register(macp, &adapter->mach);
2574 	mac_free(macp);
2575 	if (ret != 0) {
2576 		cmn_err(CE_WARN, "mac_register failed for port %d\n",
2577 		    adapter->portnum);
2578 		return (DDI_FAILURE);
2579 	}
2580 
2581 	unm_init_kstats(adapter, adapter->instance);
2582 
2583 	/* Register NDD-tweakable parameters */
2584 	if (unm_nd_init(adapter)) {
2585 		cmn_err(CE_WARN, "unm_nd_init() failed");
2586 		return (DDI_FAILURE);
2587 	}
2588 
2589 	pause.rx_pause = adapter->nd_params[PARAM_ADV_PAUSE_CAP].ndp_val;
2590 	pause.tx_pause = adapter->nd_params[PARAM_ADV_ASYM_PAUSE_CAP].ndp_val;
2591 
2592 	if (unm_nic_set_pauseparam(adapter, &pause)) {
2593 		cmn_err(CE_WARN, "\nBad Pause settings RX %d, Tx %d",
2594 		    pause.rx_pause, pause.tx_pause);
2595 	}
2596 	adapter->nd_params[PARAM_PAUSE_CAP].ndp_val = pause.rx_pause;
2597 	adapter->nd_params[PARAM_ASYM_PAUSE_CAP].ndp_val = pause.tx_pause;
2598 
2599 	return (DDI_SUCCESS);
2600 }
2601