xref: /titanic_41/usr/src/uts/common/io/ntxn/unm_nic_main.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 NetXen, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 #include <sys/types.h>
30 #include <sys/conf.h>
31 #include <sys/debug.h>
32 #include <sys/stropts.h>
33 #include <sys/stream.h>
34 #include <sys/strlog.h>
35 #include <sys/kmem.h>
36 #include <sys/stat.h>
37 #include <sys/kstat.h>
38 #include <sys/vtrace.h>
39 #include <sys/dlpi.h>
40 #include <sys/strsun.h>
41 #include <sys/ethernet.h>
42 #include <sys/modctl.h>
43 #include <sys/errno.h>
44 #include <sys/dditypes.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sysmacros.h>
48 #include <sys/pci.h>
49 
50 #include <sys/gld.h>
51 #include <netinet/in.h>
52 #include <inet/ip.h>
53 #include <inet/tcp.h>
54 
55 #include <sys/rwlock.h>
56 #include <sys/mutex.h>
57 #include <sys/pattr.h>
58 #include <sys/strsubr.h>
59 #include <sys/ddi_impldefs.h>
60 #include<sys/task.h>
61 
62 #include "unm_nic_hw.h"
63 #include "unm_nic.h"
64 
65 #include "nic_phan_reg.h"
66 #include "unm_nic_ioctl.h"
67 #include "nic_cmn.h"
68 #include "unm_version.h"
69 #include "unm_brdcfg.h"
70 
71 #if defined(lint)
72 #undef MBLKL
73 #define	MBLKL(_mp_)	((uintptr_t)(_mp_)->b_wptr - (uintptr_t)(_mp_)->b_rptr)
74 #endif /* lint */
75 
76 #undef UNM_LOOPBACK
77 #undef SINGLE_DMA_BUF
78 
79 #define	UNM_ADAPTER_UP_MAGIC	777
80 #define	VLAN_TAGSZ		0x4
81 
82 #define	index2rxbuf(_rdp_, _idx_)	((_rdp_)->rx_buf_pool + (_idx_))
83 #define	rxbuf2index(_rdp_, _bufp_)	((_bufp_) - (_rdp_)->rx_buf_pool)
84 
85 /*
86  * Receive ISR processes NX_RX_MAXBUFS incoming packets at most, then posts
87  * as many buffers as packets processed. This loop repeats as required to
88  * process all incoming packets delivered in a single interrupt. Higher
89  * value of NX_RX_MAXBUFS improves performance by posting rx buffers less
90  * frequently, but at the cost of not posting quickly enough when card is
91  * running out of rx buffers.
92  */
93 #define	NX_RX_THRESHOLD		32
94 #define	NX_RX_MAXBUFS		128
95 #define	NX_MAX_TXCOMPS		256
96 
97 extern int create_rxtx_rings(unm_adapter *adapter);
98 extern void destroy_rxtx_rings(unm_adapter *adapter);
99 
100 static void unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
101     uint32_t ringid);
102 static mblk_t *unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc);
103 static int unm_process_rcv_ring(unm_adapter *, int);
104 static int unm_process_cmd_ring(struct unm_adapter_s *adapter);
105 
106 static int unm_nic_do_ioctl(unm_adapter *adapter, queue_t *q, mblk_t *mp);
107 static void unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q,
108     mblk_t *mp);
109 
110 /* GLDv3 interface functions */
111 static int ntxn_m_start(void *);
112 static void ntxn_m_stop(void *);
113 static int ntxn_m_multicst(void *, boolean_t, const uint8_t *);
114 static int ntxn_m_promisc(void *, boolean_t);
115 static int ntxn_m_stat(void *arg, uint_t stat, uint64_t *val);
116 static mblk_t *ntxn_m_tx(void *, mblk_t *);
117 static void ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
118 static boolean_t ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data);
119 
120 /*
121  * Allocates DMA handle, virtual memory and binds them
122  * returns size of actual memory binded and the physical address.
123  */
124 int
unm_pci_alloc_consistent(unm_adapter * adapter,int size,caddr_t * address,ddi_dma_cookie_t * cookie,ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * handlep)125 unm_pci_alloc_consistent(unm_adapter *adapter,
126 		int size, caddr_t *address, ddi_dma_cookie_t *cookie,
127 		ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep)
128 {
129 	int			err;
130 	uint32_t		ncookies;
131 	size_t			ring_len;
132 	uint_t			dma_flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
133 
134 	*dma_handle = NULL;
135 
136 	if (size <= 0)
137 		return (DDI_ENOMEM);
138 
139 	err = ddi_dma_alloc_handle(adapter->dip,
140 	    &adapter->gc_dma_attr_desc,
141 	    DDI_DMA_DONTWAIT, NULL, dma_handle);
142 	if (err != DDI_SUCCESS) {
143 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_alloc_handle FAILED:"
144 		    " %d", unm_nic_driver_name, __func__, err);
145 		return (DDI_ENOMEM);
146 	}
147 
148 	err = ddi_dma_mem_alloc(*dma_handle,
149 	    size, &adapter->gc_attr_desc,
150 	    dma_flags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
151 	    DDI_DMA_DONTWAIT, NULL, address, &ring_len,
152 	    handlep);
153 	if (err != DDI_SUCCESS) {
154 		cmn_err(CE_WARN, "!%s: %s: ddi_dma_mem_alloc failed:"
155 		    "ret %d, request size: %d",
156 		    unm_nic_driver_name, __func__, err, size);
157 		ddi_dma_free_handle(dma_handle);
158 		return (DDI_ENOMEM);
159 	}
160 
161 	if (ring_len < size) {
162 		cmn_err(CE_WARN, "%s: %s: could not allocate required "
163 		    "memory :%d\n", unm_nic_driver_name,
164 		    __func__, err);
165 		ddi_dma_mem_free(handlep);
166 		ddi_dma_free_handle(dma_handle);
167 		return (DDI_FAILURE);
168 	}
169 
170 	(void) memset(*address, 0, size);
171 
172 	if (((err = ddi_dma_addr_bind_handle(*dma_handle,
173 	    NULL, *address, ring_len,
174 	    dma_flags,
175 	    DDI_DMA_DONTWAIT, NULL,
176 	    cookie, &ncookies)) != DDI_DMA_MAPPED) ||
177 	    (ncookies != 1)) {
178 		cmn_err(CE_WARN,
179 		    "!%s: %s: ddi_dma_addr_bind_handle FAILED: %d",
180 		    unm_nic_driver_name, __func__, err);
181 		ddi_dma_mem_free(handlep);
182 		ddi_dma_free_handle(dma_handle);
183 		return (DDI_FAILURE);
184 	}
185 
186 	return (DDI_SUCCESS);
187 }
188 
189 /*
190  * Unbinds the memory, frees the DMA handle and at the end, frees the memory
191  */
192 void
unm_pci_free_consistent(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)193 unm_pci_free_consistent(ddi_dma_handle_t *dma_handle,
194     ddi_acc_handle_t *acc_handle)
195 {
196 	int err;
197 
198 	err = ddi_dma_unbind_handle(*dma_handle);
199 	if (err != DDI_SUCCESS) {
200 		cmn_err(CE_WARN, "%s: Error unbinding memory\n", __func__);
201 		return;
202 	}
203 
204 	ddi_dma_mem_free(acc_handle);
205 	ddi_dma_free_handle(dma_handle);
206 }
207 
208 static uint32_t msi_tgt_status[] = {
209     ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
210     ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
211     ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
212     ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
213 };
214 
215 static void
unm_nic_disable_int(unm_adapter * adapter)216 unm_nic_disable_int(unm_adapter *adapter)
217 {
218 	__uint32_t	temp = 0;
219 
220 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
221 	    &temp, 4);
222 }
223 
224 static inline int
unm_nic_clear_int(unm_adapter * adapter)225 unm_nic_clear_int(unm_adapter *adapter)
226 {
227 	uint32_t	mask, temp, our_int, status;
228 
229 	UNM_READ_LOCK(&adapter->adapter_lock);
230 
231 	/* check whether it's our interrupt */
232 	if (!UNM_IS_MSI_FAMILY(adapter)) {
233 
234 		/* Legacy Interrupt case */
235 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
236 		    &status);
237 
238 		if (!(status & adapter->legacy_intr.int_vec_bit)) {
239 			UNM_READ_UNLOCK(&adapter->adapter_lock);
240 			return (-1);
241 		}
242 
243 		if (adapter->ahw.revision_id >= NX_P3_B1) {
244 			adapter->unm_nic_pci_read_immediate(adapter,
245 			    ISR_INT_STATE_REG, &temp);
246 			if (!ISR_IS_LEGACY_INTR_TRIGGERED(temp)) {
247 				UNM_READ_UNLOCK(&adapter->adapter_lock);
248 				return (-1);
249 			}
250 		} else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
251 			our_int = adapter->unm_nic_pci_read_normalize(adapter,
252 			    CRB_INT_VECTOR);
253 
254 			/* FIXME: Assumes pci_func is same as ctx */
255 			if ((our_int & (0x80 << adapter->portnum)) == 0) {
256 				if (our_int != 0) {
257 					/* not our interrupt */
258 					UNM_READ_UNLOCK(&adapter->adapter_lock);
259 					return (-1);
260 				}
261 			}
262 			temp = our_int & ~((u32)(0x80 << adapter->portnum));
263 			adapter->unm_nic_pci_write_normalize(adapter,
264 			    CRB_INT_VECTOR, temp);
265 		}
266 
267 		if (adapter->fw_major < 4)
268 			unm_nic_disable_int(adapter);
269 
270 		/* claim interrupt */
271 		temp = 0xffffffff;
272 		adapter->unm_nic_pci_write_immediate(adapter,
273 		    adapter->legacy_intr.tgt_status_reg, &temp);
274 
275 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
276 		    &mask);
277 
278 		/*
279 		 * Read again to make sure the legacy interrupt message got
280 		 * flushed out
281 		 */
282 		adapter->unm_nic_pci_read_immediate(adapter, ISR_INT_VECTOR,
283 		    &mask);
284 	} else if (adapter->flags & UNM_NIC_MSI_ENABLED) {
285 		/* clear interrupt */
286 		temp = 0xffffffff;
287 		adapter->unm_nic_pci_write_immediate(adapter,
288 		    msi_tgt_status[adapter->ahw.pci_func], &temp);
289 	}
290 
291 	UNM_READ_UNLOCK(&adapter->adapter_lock);
292 
293 	return (0);
294 }
295 
296 static void
unm_nic_enable_int(unm_adapter * adapter)297 unm_nic_enable_int(unm_adapter *adapter)
298 {
299 	u32	temp = 1;
300 
301 	adapter->unm_nic_hw_write_wx(adapter, adapter->interrupt_crb,
302 	    &temp, 4);
303 
304 	if (!UNM_IS_MSI_FAMILY(adapter)) {
305 		u32	mask = 0xfbff;
306 
307 		adapter->unm_nic_pci_write_immediate(adapter,
308 		    adapter->legacy_intr.tgt_mask_reg, &mask);
309 	}
310 }
311 
312 static void
unm_free_hw_resources(unm_adapter * adapter)313 unm_free_hw_resources(unm_adapter *adapter)
314 {
315 	unm_recv_context_t *recv_ctx;
316 	unm_rcv_desc_ctx_t *rcv_desc;
317 	int ctx, ring;
318 
319 	if (adapter->context_alloced == 1) {
320 		netxen_destroy_rxtx(adapter);
321 		adapter->context_alloced = 0;
322 	}
323 
324 	if (adapter->ctxDesc != NULL) {
325 		unm_pci_free_consistent(&adapter->ctxDesc_dma_handle,
326 		    &adapter->ctxDesc_acc_handle);
327 		adapter->ctxDesc = NULL;
328 	}
329 
330 	if (adapter->ahw.cmdDescHead != NULL) {
331 		unm_pci_free_consistent(&adapter->ahw.cmd_desc_dma_handle,
332 		    &adapter->ahw.cmd_desc_acc_handle);
333 		adapter->ahw.cmdDesc_physAddr = NULL;
334 		adapter->ahw.cmdDescHead = NULL;
335 	}
336 
337 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
338 		recv_ctx = &adapter->recv_ctx[ctx];
339 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
340 			rcv_desc = &recv_ctx->rcv_desc[ring];
341 
342 			if (rcv_desc->desc_head != NULL) {
343 				unm_pci_free_consistent(
344 				    &rcv_desc->rx_desc_dma_handle,
345 				    &rcv_desc->rx_desc_acc_handle);
346 				rcv_desc->desc_head = NULL;
347 				rcv_desc->phys_addr = NULL;
348 			}
349 		}
350 
351 		if (recv_ctx->rcvStatusDescHead != NULL) {
352 			unm_pci_free_consistent(
353 			    &recv_ctx->status_desc_dma_handle,
354 			    &recv_ctx->status_desc_acc_handle);
355 			recv_ctx->rcvStatusDesc_physAddr = NULL;
356 			recv_ctx->rcvStatusDescHead = NULL;
357 		}
358 	}
359 }
360 
361 static void
cleanup_adapter(struct unm_adapter_s * adapter)362 cleanup_adapter(struct unm_adapter_s *adapter)
363 {
364 	ddi_regs_map_free(&(adapter->regs_handle));
365 	ddi_regs_map_free(&(adapter->db_handle));
366 	kmem_free(adapter, sizeof (unm_adapter));
367 }
368 
369 void
unm_nic_remove(unm_adapter * adapter)370 unm_nic_remove(unm_adapter *adapter)
371 {
372 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
373 	unm_nic_stop_port(adapter);
374 
375 	if (adapter->interrupt_crb) {
376 		UNM_READ_LOCK(&adapter->adapter_lock);
377 		unm_nic_disable_int(adapter);
378 		UNM_READ_UNLOCK(&adapter->adapter_lock);
379 	}
380 	(void) untimeout(adapter->watchdog_timer);
381 
382 	unm_free_hw_resources(adapter);
383 
384 	if (adapter->is_up == UNM_ADAPTER_UP_MAGIC)
385 		destroy_rxtx_rings(adapter);
386 
387 	if (adapter->portnum == 0)
388 		unm_free_dummy_dma(adapter);
389 
390 	unm_destroy_intr(adapter);
391 
392 	ddi_set_driver_private(adapter->dip, NULL);
393 	cleanup_adapter(adapter);
394 }
395 
396 static int
init_firmware(unm_adapter * adapter)397 init_firmware(unm_adapter *adapter)
398 {
399 	uint32_t state = 0, loops = 0, tempout;
400 
401 	/* Window 1 call */
402 	UNM_READ_LOCK(&adapter->adapter_lock);
403 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_CMDPEG_STATE);
404 	UNM_READ_UNLOCK(&adapter->adapter_lock);
405 
406 	if (state == PHAN_INITIALIZE_ACK)
407 		return (0);
408 
409 	while (state != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
410 		drv_usecwait(100);
411 		/* Window 1 call */
412 		UNM_READ_LOCK(&adapter->adapter_lock);
413 		state = adapter->unm_nic_pci_read_normalize(adapter,
414 		    CRB_CMDPEG_STATE);
415 		UNM_READ_UNLOCK(&adapter->adapter_lock);
416 		loops++;
417 	}
418 
419 	if (loops >= 200000) {
420 		cmn_err(CE_WARN, "%s%d: CmdPeg init incomplete:%x\n",
421 		    adapter->name, adapter->instance, state);
422 		return (-EIO);
423 	}
424 
425 	/* Window 1 call */
426 	UNM_READ_LOCK(&adapter->adapter_lock);
427 	tempout = INTR_SCHEME_PERPORT;
428 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_CAPABILITIES_HOST,
429 	    &tempout, 4);
430 	tempout = MSI_MODE_MULTIFUNC;
431 	adapter->unm_nic_hw_write_wx(adapter, CRB_NIC_MSI_MODE_HOST,
432 	    &tempout, 4);
433 	tempout = MPORT_MULTI_FUNCTION_MODE;
434 	adapter->unm_nic_hw_write_wx(adapter, CRB_MPORT_MODE, &tempout, 4);
435 	tempout = PHAN_INITIALIZE_ACK;
436 	adapter->unm_nic_hw_write_wx(adapter, CRB_CMDPEG_STATE, &tempout, 4);
437 	UNM_READ_UNLOCK(&adapter->adapter_lock);
438 
439 	return (0);
440 }
441 
442 /*
443  * Utility to synchronize with receive peg.
444  *  Returns   0 on sucess
445  *         -EIO on error
446  */
447 int
receive_peg_ready(struct unm_adapter_s * adapter)448 receive_peg_ready(struct unm_adapter_s *adapter)
449 {
450 	uint32_t state = 0;
451 	int loops = 0, err = 0;
452 
453 	/* Window 1 call */
454 	UNM_READ_LOCK(&adapter->adapter_lock);
455 	state = adapter->unm_nic_pci_read_normalize(adapter, CRB_RCVPEG_STATE);
456 	UNM_READ_UNLOCK(&adapter->adapter_lock);
457 
458 	while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 20000)) {
459 		drv_usecwait(100);
460 		/* Window 1 call */
461 
462 		UNM_READ_LOCK(&adapter->adapter_lock);
463 		state = adapter->unm_nic_pci_read_normalize(adapter,
464 		    CRB_RCVPEG_STATE);
465 		UNM_READ_UNLOCK(&adapter->adapter_lock);
466 
467 		loops++;
468 	}
469 
470 	if (loops >= 20000) {
471 		cmn_err(CE_WARN, "Receive Peg initialization incomplete 0x%x\n",
472 		    state);
473 		err = -EIO;
474 	}
475 
476 	return (err);
477 }
478 
479 /*
480  * check if the firmware has been downloaded and ready to run  and
481  * setup the address for the descriptors in the adapter
482  */
483 static int
unm_nic_hw_resources(unm_adapter * adapter)484 unm_nic_hw_resources(unm_adapter *adapter)
485 {
486 	hardware_context	*hw = &adapter->ahw;
487 	void			*addr;
488 	int			err;
489 	int			ctx, ring;
490 	unm_recv_context_t	*recv_ctx;
491 	unm_rcv_desc_ctx_t	*rcv_desc;
492 	ddi_dma_cookie_t	cookie;
493 	int			size;
494 
495 	if (err = receive_peg_ready(adapter))
496 		return (err);
497 
498 	size = (sizeof (RingContext) + sizeof (uint32_t));
499 
500 	err = unm_pci_alloc_consistent(adapter,
501 	    size, (caddr_t *)&addr, &cookie,
502 	    &adapter->ctxDesc_dma_handle,
503 	    &adapter->ctxDesc_acc_handle);
504 	if (err != DDI_SUCCESS) {
505 		cmn_err(CE_WARN, "Failed to allocate HW context\n");
506 		return (err);
507 	}
508 
509 	adapter->ctxDesc_physAddr = cookie.dmac_laddress;
510 
511 	(void) memset(addr, 0, sizeof (RingContext));
512 
513 	adapter->ctxDesc = (RingContext *) addr;
514 	adapter->ctxDesc->CtxId = adapter->portnum;
515 	adapter->ctxDesc->CMD_CONSUMER_OFFSET =
516 	    adapter->ctxDesc_physAddr + sizeof (RingContext);
517 	adapter->cmdConsumer =
518 	    (uint32_t *)(uintptr_t)(((char *)addr) + sizeof (RingContext));
519 
520 	ASSERT(!((unsigned long)adapter->ctxDesc_physAddr & 0x3f));
521 
522 	/*
523 	 * Allocate command descriptor ring.
524 	 */
525 	size = (sizeof (cmdDescType0_t) * adapter->MaxTxDescCount);
526 	err = unm_pci_alloc_consistent(adapter,
527 	    size, (caddr_t *)&addr, &cookie,
528 	    &hw->cmd_desc_dma_handle,
529 	    &hw->cmd_desc_acc_handle);
530 	if (err != DDI_SUCCESS) {
531 		cmn_err(CE_WARN, "Failed to allocate cmd desc ring\n");
532 		return (err);
533 	}
534 
535 	hw->cmdDesc_physAddr = cookie.dmac_laddress;
536 	hw->cmdDescHead = (cmdDescType0_t *)addr;
537 
538 	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
539 		recv_ctx = &adapter->recv_ctx[ctx];
540 
541 		size = (sizeof (statusDesc_t)* adapter->MaxRxDescCount);
542 		err = unm_pci_alloc_consistent(adapter,
543 		    size, (caddr_t *)&addr,
544 		    &recv_ctx->status_desc_dma_cookie,
545 		    &recv_ctx->status_desc_dma_handle,
546 		    &recv_ctx->status_desc_acc_handle);
547 		if (err != DDI_SUCCESS) {
548 			cmn_err(CE_WARN, "Failed to allocate sts desc ring\n");
549 			goto free_cmd_desc;
550 		}
551 
552 		(void) memset(addr, 0, size);
553 		recv_ctx->rcvStatusDesc_physAddr =
554 		    recv_ctx->status_desc_dma_cookie.dmac_laddress;
555 		recv_ctx->rcvStatusDescHead = (statusDesc_t *)addr;
556 
557 		/* rds rings */
558 		for (ring = 0; ring < adapter->max_rds_rings; ring++) {
559 			rcv_desc = &recv_ctx->rcv_desc[ring];
560 
561 			size = (sizeof (rcvDesc_t) * adapter->MaxRxDescCount);
562 			err = unm_pci_alloc_consistent(adapter,
563 			    size, (caddr_t *)&addr,
564 			    &rcv_desc->rx_desc_dma_cookie,
565 			    &rcv_desc->rx_desc_dma_handle,
566 			    &rcv_desc->rx_desc_acc_handle);
567 			if (err != DDI_SUCCESS) {
568 				cmn_err(CE_WARN, "Failed to allocate "
569 				    "rx desc ring %d\n", ring);
570 				goto free_status_desc;
571 			}
572 
573 			rcv_desc->phys_addr =
574 			    rcv_desc->rx_desc_dma_cookie.dmac_laddress;
575 			rcv_desc->desc_head = (rcvDesc_t *)addr;
576 		}
577 	}
578 
579 	if (err = netxen_create_rxtx(adapter))
580 		goto free_statusrx_desc;
581 	adapter->context_alloced = 1;
582 
583 	return (DDI_SUCCESS);
584 
585 free_statusrx_desc:
586 free_status_desc:
587 free_cmd_desc:
588 	unm_free_hw_resources(adapter);
589 
590 	return (err);
591 }
592 
unm_desc_dma_sync(ddi_dma_handle_t handle,uint_t start,uint_t count,uint_t range,uint_t unit_size,uint_t direction)593 void unm_desc_dma_sync(ddi_dma_handle_t handle, uint_t start, uint_t count,
594     uint_t range, uint_t unit_size, uint_t direction)
595 {
596 	if ((start + count) < range) {
597 		(void) ddi_dma_sync(handle, start * unit_size,
598 		    count * unit_size, direction);
599 	} else {
600 		(void) ddi_dma_sync(handle, start * unit_size, 0, direction);
601 		(void) ddi_dma_sync(handle, 0,
602 		    (start + count - range) * unit_size, DDI_DMA_SYNC_FORCPU);
603 	}
604 }
605 
606 static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET,
607     CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2,
608     CRB_CMD_PRODUCER_OFFSET_3 };
609 
610 static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET,
611     CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2,
612     CRB_CMD_CONSUMER_OFFSET_3 };
613 
614 void
unm_nic_update_cmd_producer(struct unm_adapter_s * adapter,uint32_t crb_producer)615 unm_nic_update_cmd_producer(struct unm_adapter_s *adapter,
616     uint32_t crb_producer)
617 {
618 	int data = crb_producer;
619 
620 	if (adapter->crb_addr_cmd_producer) {
621 		UNM_READ_LOCK(&adapter->adapter_lock);
622 		adapter->unm_nic_hw_write_wx(adapter,
623 		    adapter->crb_addr_cmd_producer, &data, 4);
624 		UNM_READ_UNLOCK(&adapter->adapter_lock);
625 	}
626 }
627 
628 static void
unm_nic_update_cmd_consumer(struct unm_adapter_s * adapter,uint32_t crb_producer)629 unm_nic_update_cmd_consumer(struct unm_adapter_s *adapter,
630     uint32_t crb_producer)
631 {
632 	int data = crb_producer;
633 
634 	if (adapter->crb_addr_cmd_consumer)
635 		adapter->unm_nic_hw_write_wx(adapter,
636 		    adapter->crb_addr_cmd_consumer, &data, 4);
637 }
638 
639 /*
640  * Looks for type of packet and sets opcode accordingly
641  * so that checksum offload can be used.
642  */
643 static void
unm_tx_csum(cmdDescType0_t * desc,mblk_t * mp,pktinfo_t * pktinfo)644 unm_tx_csum(cmdDescType0_t *desc, mblk_t *mp, pktinfo_t *pktinfo)
645 {
646 	if (pktinfo->mac_hlen == sizeof (struct ether_vlan_header))
647 		desc->u1.s1.flags = FLAGS_VLAN_TAGGED;
648 
649 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
650 		uint32_t	start, flags;
651 
652 		mac_hcksum_get(mp, &start, NULL, NULL, NULL, &flags);
653 		if ((flags & (HCK_FULLCKSUM | HCK_IPV4_HDRCKSUM)) == 0)
654 			return;
655 
656 		/*
657 		 * For TCP/UDP, ask hardware to do both IP header and
658 		 * full checksum, even if stack has already done one or
659 		 * the other. Hardware will always get it correct even
660 		 * if stack has already done it.
661 		 */
662 		switch (pktinfo->l4_proto) {
663 			case IPPROTO_TCP:
664 				desc->u1.s1.opcode = TX_TCP_PKT;
665 				break;
666 			case IPPROTO_UDP:
667 				desc->u1.s1.opcode = TX_UDP_PKT;
668 				break;
669 			default:
670 				/* Must be here with HCK_IPV4_HDRCKSUM */
671 				desc->u1.s1.opcode = TX_IP_PKT;
672 				return;
673 		}
674 
675 		desc->u1.s1.ipHdrOffset = pktinfo->mac_hlen;
676 		desc->u1.s1.tcpHdrOffset = pktinfo->mac_hlen + pktinfo->ip_hlen;
677 	}
678 }
679 
680 /*
681  * For IP/UDP/TCP checksum offload, this checks for MAC+IP header in one
682  * contiguous block ending at 8 byte aligned address as required by hardware.
683  * Caller assumes pktinfo->total_len will be updated by this function and
684  * if pktinfo->etype is set to 0, it will need to linearize the mblk and
685  * invoke unm_update_pkt_info() to determine ethertype, IP header len and
686  * protocol.
687  */
688 static boolean_t
unm_get_pkt_info(mblk_t * mp,pktinfo_t * pktinfo)689 unm_get_pkt_info(mblk_t *mp, pktinfo_t *pktinfo)
690 {
691 	mblk_t		*bp;
692 	ushort_t	type;
693 
694 	(void) memset(pktinfo, 0, sizeof (pktinfo_t));
695 
696 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
697 		if (MBLKL(bp) == 0)
698 			continue;
699 		pktinfo->mblk_no++;
700 		pktinfo->total_len += MBLKL(bp);
701 	}
702 
703 	if (MBLKL(mp) < (sizeof (struct ether_header) + sizeof (ipha_t)))
704 		return (B_FALSE);
705 
706 	/*
707 	 * We just need non 1 byte aligned address, since ether_type is
708 	 * ushort.
709 	 */
710 	if ((uintptr_t)mp->b_rptr & 1)
711 		return (B_FALSE);
712 
713 	type = ((struct ether_header *)(uintptr_t)mp->b_rptr)->ether_type;
714 	if (type == htons(ETHERTYPE_VLAN)) {
715 		if (MBLKL(mp) < (sizeof (struct ether_vlan_header) +
716 		    sizeof (ipha_t)))
717 			return (B_FALSE);
718 		type = ((struct ether_vlan_header *) \
719 		    (uintptr_t)mp->b_rptr)->ether_type;
720 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
721 	} else {
722 		pktinfo->mac_hlen = sizeof (struct ether_header);
723 	}
724 	pktinfo->etype = type;
725 
726 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
727 		uchar_t *ip_off = mp->b_rptr + pktinfo->mac_hlen;
728 
729 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ip_off);
730 		pktinfo->l4_proto =
731 		    ((ipha_t *)(uintptr_t)ip_off)->ipha_protocol;
732 
733 		/* IP header not aligned to quadward boundary? */
734 		if ((unsigned long)(ip_off + pktinfo->ip_hlen) % 8 != 0)
735 			return (B_FALSE);
736 	}
737 
738 	return (B_TRUE);
739 }
740 
741 static void
unm_update_pkt_info(char * ptr,pktinfo_t * pktinfo)742 unm_update_pkt_info(char *ptr, pktinfo_t *pktinfo)
743 {
744 	ushort_t	type;
745 
746 	type = ((struct ether_header *)(uintptr_t)ptr)->ether_type;
747 	if (type == htons(ETHERTYPE_VLAN)) {
748 		type = ((struct ether_vlan_header *)(uintptr_t)ptr)->ether_type;
749 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
750 	} else {
751 		pktinfo->mac_hlen = sizeof (struct ether_header);
752 	}
753 	pktinfo->etype = type;
754 
755 	if (pktinfo->etype == htons(ETHERTYPE_IP)) {
756 		char *ipp = ptr + pktinfo->mac_hlen;
757 
758 		pktinfo->ip_hlen = IPH_HDR_LENGTH((uintptr_t)ipp);
759 		pktinfo->l4_proto = ((ipha_t *)(uintptr_t)ipp)->ipha_protocol;
760 	}
761 }
762 
763 static boolean_t
unm_send_copy(struct unm_adapter_s * adapter,mblk_t * mp,pktinfo_t * pktinfo)764 unm_send_copy(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
765 {
766 	hardware_context *hw;
767 	u32				producer = 0;
768 	cmdDescType0_t			*hwdesc;
769 	struct unm_cmd_buffer		*pbuf = NULL;
770 	u32				mblen;
771 	int				no_of_desc = 1;
772 	int				MaxTxDescCount;
773 	mblk_t				*bp;
774 	char				*txb;
775 
776 	hw = &adapter->ahw;
777 	MaxTxDescCount = adapter->MaxTxDescCount;
778 
779 	UNM_SPIN_LOCK(&adapter->tx_lock);
780 	membar_enter();
781 
782 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
783 	    MaxTxDescCount) <= 2) {
784 		adapter->stats.outofcmddesc++;
785 		adapter->resched_needed = 1;
786 		membar_exit();
787 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
788 		return (B_FALSE);
789 	}
790 	adapter->freecmds -= no_of_desc;
791 
792 	producer = adapter->cmdProducer;
793 
794 	adapter->cmdProducer = get_index_range(adapter->cmdProducer,
795 	    MaxTxDescCount, no_of_desc);
796 
797 	hwdesc = &hw->cmdDescHead[producer];
798 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
799 	pbuf = &adapter->cmd_buf_arr[producer];
800 
801 	pbuf->msg = NULL;
802 	pbuf->head = NULL;
803 	pbuf->tail = NULL;
804 
805 	txb = pbuf->dma_area.vaddr;
806 
807 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
808 		if ((mblen = MBLKL(bp)) == 0)
809 			continue;
810 		bcopy(bp->b_rptr, txb, mblen);
811 		txb += mblen;
812 	}
813 
814 	/*
815 	 * Determine metadata if not previously done due to fragmented mblk.
816 	 */
817 	if (pktinfo->etype == 0)
818 		unm_update_pkt_info(pbuf->dma_area.vaddr, pktinfo);
819 
820 	(void) ddi_dma_sync(pbuf->dma_area.dma_hdl,
821 	    0, pktinfo->total_len, DDI_DMA_SYNC_FORDEV);
822 
823 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
824 	/* hwdesc->mss = 0; */
825 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
826 	hwdesc->u3.s1.port = adapter->portnum;
827 	hwdesc->u3.s1.ctx_id = adapter->portnum;
828 
829 	hwdesc->u6.s1.buffer1Length = pktinfo->total_len;
830 	hwdesc->u5.AddrBuffer1 = pbuf->dma_area.dma_addr;
831 	hwdesc->u1.s1.numOfBuffers = 1;
832 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
833 
834 	unm_tx_csum(hwdesc, mp, pktinfo);
835 
836 	unm_desc_dma_sync(hw->cmd_desc_dma_handle,
837 	    producer,
838 	    no_of_desc,
839 	    MaxTxDescCount,
840 	    sizeof (cmdDescType0_t),
841 	    DDI_DMA_SYNC_FORDEV);
842 
843 	hw->cmdProducer = adapter->cmdProducer;
844 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
845 
846 	adapter->stats.txbytes += pktinfo->total_len;
847 	adapter->stats.xmitfinished++;
848 	adapter->stats.txcopyed++;
849 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
850 
851 	freemsg(mp);
852 	return (B_TRUE);
853 }
854 
855 /* Should be called with adapter->tx_lock held. */
856 static void
unm_return_dma_handle(unm_adapter * adapter,unm_dmah_node_t * head,unm_dmah_node_t * tail,uint32_t num)857 unm_return_dma_handle(unm_adapter *adapter, unm_dmah_node_t *head,
858     unm_dmah_node_t *tail, uint32_t num)
859 {
860 	ASSERT(tail != NULL);
861 	tail->next = adapter->dmahdl_pool;
862 	adapter->dmahdl_pool = head;
863 	adapter->freehdls += num;
864 }
865 
866 static unm_dmah_node_t *
unm_reserve_dma_handle(unm_adapter * adapter)867 unm_reserve_dma_handle(unm_adapter* adapter)
868 {
869 	unm_dmah_node_t *dmah = NULL;
870 
871 	dmah = adapter->dmahdl_pool;
872 	if (dmah != NULL) {
873 		adapter->dmahdl_pool = dmah->next;
874 		dmah->next = NULL;
875 		adapter->freehdls--;
876 		membar_exit();
877 	}
878 
879 	return (dmah);
880 }
881 
882 static boolean_t
unm_send_mapped(struct unm_adapter_s * adapter,mblk_t * mp,pktinfo_t * pktinfo)883 unm_send_mapped(struct unm_adapter_s *adapter, mblk_t *mp, pktinfo_t *pktinfo)
884 {
885 	hardware_context		*hw;
886 	u32				producer = 0;
887 	u32				saved_producer = 0;
888 	cmdDescType0_t			*hwdesc;
889 	struct unm_cmd_buffer		*pbuf = NULL;
890 	int				no_of_desc;
891 	int				k;
892 	int				MaxTxDescCount;
893 	mblk_t				*bp;
894 
895 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL, *hdlp;
896 	ddi_dma_cookie_t cookie[MAX_COOKIES_PER_CMD + 1];
897 	int ret, i;
898 	uint32_t hdl_reserved = 0;
899 	uint32_t mblen;
900 	uint32_t ncookies, index = 0, total_cookies = 0;
901 
902 	MaxTxDescCount = adapter->MaxTxDescCount;
903 
904 	UNM_SPIN_LOCK(&adapter->tx_lock);
905 
906 	/* bind all the mblks of the packet first */
907 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
908 		mblen = MBLKL(bp);
909 		if (mblen == 0)
910 			continue;
911 
912 		dmah = unm_reserve_dma_handle(adapter);
913 		if (dmah == NULL) {
914 			adapter->stats.outoftxdmahdl++;
915 			goto err_map;
916 		}
917 
918 		ret = ddi_dma_addr_bind_handle(dmah->dmahdl,
919 		    NULL, (caddr_t)bp->b_rptr, mblen,
920 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
921 		    DDI_DMA_DONTWAIT, NULL, &cookie[index], &ncookies);
922 
923 		if (ret != DDI_DMA_MAPPED)
924 			goto err_map;
925 
926 		if (tail == NULL) {
927 			head = tail = dmah;
928 		} else {
929 			tail->next = dmah;
930 			tail = dmah;
931 		}
932 		hdl_reserved++;
933 
934 		total_cookies += ncookies;
935 		if (total_cookies > MAX_COOKIES_PER_CMD) {
936 			dmah = NULL;
937 			goto err_map;
938 		}
939 
940 		if (index == 0) {
941 			size_t	hsize = cookie[0].dmac_size;
942 
943 			/*
944 			 * For TCP/UDP packets with checksum offload,
945 			 * MAC/IP headers need to be contiguous. Otherwise,
946 			 * there must be at least 16 bytes in the first
947 			 * descriptor.
948 			 */
949 			if ((pktinfo->l4_proto == IPPROTO_TCP) ||
950 			    (pktinfo->l4_proto == IPPROTO_UDP)) {
951 				if (hsize < (pktinfo->mac_hlen +
952 				    pktinfo->ip_hlen)) {
953 					dmah = NULL;
954 					goto err_map;
955 				}
956 			} else {
957 				if (hsize < 16) {
958 					dmah = NULL;
959 					goto err_map;
960 				}
961 			}
962 		}
963 
964 		index++;
965 		ncookies--;
966 		for (i = 0; i < ncookies; i++, index++)
967 			ddi_dma_nextcookie(dmah->dmahdl, &cookie[index]);
968 	}
969 
970 	dmah = NULL;
971 	hw = &adapter->ahw;
972 	no_of_desc = (total_cookies + 3) >> 2;
973 
974 	membar_enter();
975 	if (find_diff_among(adapter->cmdProducer, adapter->lastCmdConsumer,
976 	    MaxTxDescCount) < no_of_desc+2) {
977 		/*
978 		 * If we are going to be trying the copy path, no point
979 		 * scheduling an upcall when Tx resources are freed.
980 		 */
981 		if (pktinfo->total_len > adapter->maxmtu) {
982 			adapter->stats.outofcmddesc++;
983 			adapter->resched_needed = 1;
984 		}
985 		membar_exit();
986 		goto err_alloc_desc;
987 	}
988 	adapter->freecmds -= no_of_desc;
989 
990 	/* Copy the descriptors into the hardware    */
991 	producer = adapter->cmdProducer;
992 	saved_producer = producer;
993 	hwdesc = &hw->cmdDescHead[producer];
994 	(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
995 	pbuf = &adapter->cmd_buf_arr[producer];
996 
997 	pbuf->msg = mp;
998 	pbuf->head = head;
999 	pbuf->tail = tail;
1000 
1001 	hwdesc->u1.s1.numOfBuffers = total_cookies;
1002 	hwdesc->u1.s1.opcode = TX_ETHER_PKT;
1003 	hwdesc->u3.s1.port = adapter->portnum;
1004 	/* hwdesc->u1.s1.tcpHdrOffset = 0; */
1005 	/* hwdesc->mss = 0; */
1006 	hwdesc->u3.s1.ctx_id = adapter->portnum;
1007 	hwdesc->u1.s1.totalLength = pktinfo->total_len;
1008 	unm_tx_csum(hwdesc, mp, pktinfo);
1009 
1010 	for (i = k = 0; i < total_cookies; i++) {
1011 		if (k == 4) {
1012 			/* Move to the next descriptor */
1013 			k = 0;
1014 			producer = get_next_index(producer, MaxTxDescCount);
1015 			hwdesc = &hw->cmdDescHead[producer];
1016 			(void) memset(hwdesc, 0, sizeof (cmdDescType0_t));
1017 		}
1018 
1019 		switch (k) {
1020 		case 0:
1021 			hwdesc->u6.s1.buffer1Length = cookie[i].dmac_size;
1022 			hwdesc->u5.AddrBuffer1 = cookie[i].dmac_laddress;
1023 			break;
1024 		case 1:
1025 			hwdesc->u6.s1.buffer2Length = cookie[i].dmac_size;
1026 			hwdesc->u2.AddrBuffer2 = cookie[i].dmac_laddress;
1027 			break;
1028 		case 2:
1029 			hwdesc->u6.s1.buffer3Length = cookie[i].dmac_size;
1030 			hwdesc->u4.AddrBuffer3 = cookie[i].dmac_laddress;
1031 			break;
1032 		case 3:
1033 			hwdesc->u6.s1.buffer4Length = cookie[i].dmac_size;
1034 			hwdesc->u7.AddrBuffer4 = cookie[i].dmac_laddress;
1035 			break;
1036 		}
1037 		k++;
1038 	}
1039 
1040 	unm_desc_dma_sync(hw->cmd_desc_dma_handle, saved_producer, no_of_desc,
1041 	    MaxTxDescCount, sizeof (cmdDescType0_t), DDI_DMA_SYNC_FORDEV);
1042 
1043 	adapter->cmdProducer = get_next_index(producer, MaxTxDescCount);
1044 	hw->cmdProducer = adapter->cmdProducer;
1045 	unm_nic_update_cmd_producer(adapter, adapter->cmdProducer);
1046 
1047 	adapter->stats.txbytes += pktinfo->total_len;
1048 	adapter->stats.xmitfinished++;
1049 	adapter->stats.txmapped++;
1050 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1051 	return (B_TRUE);
1052 
1053 err_alloc_desc:
1054 err_map:
1055 
1056 	hdlp = head;
1057 	while (hdlp != NULL) {
1058 		(void) ddi_dma_unbind_handle(hdlp->dmahdl);
1059 		hdlp = hdlp->next;
1060 	}
1061 
1062 	/*
1063 	 * add the reserved but bind failed one to the list to be returned
1064 	 */
1065 	if (dmah != NULL) {
1066 		if (tail == NULL)
1067 			head = tail = dmah;
1068 		else {
1069 			tail->next = dmah;
1070 			tail = dmah;
1071 		}
1072 		hdl_reserved++;
1073 	}
1074 
1075 	if (head != NULL)
1076 		unm_return_dma_handle(adapter, head, tail, hdl_reserved);
1077 
1078 	UNM_SPIN_UNLOCK(&adapter->tx_lock);
1079 	return (B_FALSE);
1080 }
1081 
1082 static boolean_t
unm_nic_xmit_frame(unm_adapter * adapter,mblk_t * mp)1083 unm_nic_xmit_frame(unm_adapter *adapter, mblk_t *mp)
1084 {
1085 	pktinfo_t	pktinfo;
1086 	boolean_t	status = B_FALSE, send_mapped;
1087 
1088 	adapter->stats.xmitcalled++;
1089 
1090 	send_mapped = unm_get_pkt_info(mp, &pktinfo);
1091 
1092 	if (pktinfo.total_len <= adapter->tx_bcopy_threshold ||
1093 	    pktinfo.mblk_no >= MAX_COOKIES_PER_CMD)
1094 		send_mapped = B_FALSE;
1095 
1096 	if (send_mapped == B_TRUE)
1097 		status = unm_send_mapped(adapter, mp, &pktinfo);
1098 
1099 	if (status != B_TRUE) {
1100 		if (pktinfo.total_len <= adapter->maxmtu)
1101 			return (unm_send_copy(adapter, mp, &pktinfo));
1102 
1103 		/* message too large */
1104 		freemsg(mp);
1105 		adapter->stats.txdropped++;
1106 		status = B_TRUE;
1107 	}
1108 
1109 	return (status);
1110 }
1111 
1112 static int
unm_nic_check_temp(struct unm_adapter_s * adapter)1113 unm_nic_check_temp(struct unm_adapter_s *adapter)
1114 {
1115 	uint32_t temp, temp_state, temp_val;
1116 	int rv = 0;
1117 
1118 	if ((adapter->ahw.revision_id == NX_P3_A2) ||
1119 	    (adapter->ahw.revision_id == NX_P3_A0))
1120 		return (0);
1121 
1122 	temp = adapter->unm_nic_pci_read_normalize(adapter, CRB_TEMP_STATE);
1123 
1124 	temp_state = nx_get_temp_state(temp);
1125 	temp_val = nx_get_temp_val(temp);
1126 
1127 	if (temp_state == NX_TEMP_PANIC) {
1128 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds "
1129 		    "maximum allowed, device has been shut down\n",
1130 		    unm_nic_driver_name, temp_val);
1131 		rv = 1;
1132 	} else if (temp_state == NX_TEMP_WARN) {
1133 		if (adapter->temp == NX_TEMP_NORMAL) {
1134 		cmn_err(CE_WARN, "%s: Device temperature %d C exceeds"
1135 		    "operating range. Immediate action needed.\n",
1136 		    unm_nic_driver_name, temp_val);
1137 		}
1138 	} else {
1139 		if (adapter->temp == NX_TEMP_WARN) {
1140 			cmn_err(CE_WARN, "%s: Device temperature is now %d "
1141 			    "degrees C in normal range.\n",
1142 			    unm_nic_driver_name, temp_val);
1143 		}
1144 	}
1145 
1146 	adapter->temp = temp_state;
1147 	return (rv);
1148 }
1149 
1150 static void
unm_watchdog(unsigned long v)1151 unm_watchdog(unsigned long v)
1152 {
1153 	unm_adapter *adapter = (unm_adapter *)v;
1154 
1155 	if ((adapter->portnum == 0) && unm_nic_check_temp(adapter)) {
1156 		/*
1157 		 * We return without turning on the netdev queue as there
1158 		 * was an overheated device
1159 		 */
1160 		return;
1161 	}
1162 
1163 	unm_nic_handle_phy_intr(adapter);
1164 
1165 	/*
1166 	 * This function schedules a call for itself.
1167 	 */
1168 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1169 	    (void *)adapter, 2 * drv_usectohz(1000000));
1170 
1171 }
1172 
unm_nic_clear_stats(unm_adapter * adapter)1173 static void unm_nic_clear_stats(unm_adapter *adapter)
1174 {
1175 	(void) memset(&adapter->stats, 0, sizeof (adapter->stats));
1176 }
1177 
1178 static void
unm_nic_poll(unm_adapter * adapter)1179 unm_nic_poll(unm_adapter *adapter)
1180 {
1181 	int	work_done, tx_complete;
1182 
1183 	adapter->stats.polled++;
1184 
1185 loop:
1186 	tx_complete = unm_process_cmd_ring(adapter);
1187 	work_done = unm_process_rcv_ring(adapter, NX_RX_MAXBUFS);
1188 	if ((!tx_complete) || (!(work_done < NX_RX_MAXBUFS)))
1189 		goto loop;
1190 
1191 	UNM_READ_LOCK(&adapter->adapter_lock);
1192 	unm_nic_enable_int(adapter);
1193 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1194 }
1195 
1196 /* ARGSUSED */
1197 uint_t
unm_intr(caddr_t data,caddr_t arg)1198 unm_intr(caddr_t data, caddr_t arg)
1199 {
1200 	unm_adapter	*adapter = (unm_adapter *)(uintptr_t)data;
1201 
1202 	if (unm_nic_clear_int(adapter))
1203 		return (DDI_INTR_UNCLAIMED);
1204 
1205 	unm_nic_poll(adapter);
1206 	return (DDI_INTR_CLAIMED);
1207 }
1208 
1209 /*
1210  * This is invoked from receive isr. Due to the single threaded nature
1211  * of the invocation, pool_lock acquisition is not neccesary to protect
1212  * pool_list.
1213  */
1214 static void
unm_free_rx_buffer(unm_rcv_desc_ctx_t * rcv_desc,unm_rx_buffer_t * rx_buffer)1215 unm_free_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc, unm_rx_buffer_t *rx_buffer)
1216 {
1217 	/* mutex_enter(rcv_desc->pool_lock); */
1218 	rx_buffer->next = rcv_desc->pool_list;
1219 	rcv_desc->pool_list = rx_buffer;
1220 	rcv_desc->rx_buf_free++;
1221 	/* mutex_exit(rcv_desc->pool_lock); */
1222 }
1223 
1224 /*
1225  * unm_process_rcv() send the received packet to the protocol stack.
1226  */
1227 static mblk_t *
unm_process_rcv(unm_adapter * adapter,statusDesc_t * desc)1228 unm_process_rcv(unm_adapter *adapter, statusDesc_t *desc)
1229 {
1230 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1231 	unm_rx_buffer_t		*rx_buffer;
1232 	mblk_t *mp;
1233 	u32			desc_ctx = desc->u1.s1.type;
1234 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
1235 	u32			pkt_length = desc->u1.s1.totalLength;
1236 	int			poff = desc->u1.s1.pkt_offset;
1237 	int			index, cksum_flags, docopy;
1238 	int			index_lo = desc->u1.s1.referenceHandle_lo;
1239 	char			*vaddr;
1240 
1241 	index = ((desc->u1.s1.referenceHandle_hi << 4) | index_lo);
1242 
1243 	rx_buffer = index2rxbuf(rcv_desc, index);
1244 
1245 	if (rx_buffer == NULL) {
1246 		cmn_err(CE_WARN, "\r\nNULL rx_buffer idx=%d", index);
1247 		return (NULL);
1248 	}
1249 	vaddr = (char *)rx_buffer->dma_info.vaddr;
1250 	if (vaddr == NULL) {
1251 		cmn_err(CE_WARN, "\r\nNULL vaddr");
1252 		return (NULL);
1253 	}
1254 	rcv_desc->rx_desc_handled++;
1255 	rcv_desc->rx_buf_card--;
1256 
1257 	(void) ddi_dma_sync(rx_buffer->dma_info.dma_hdl, 0,
1258 	    pkt_length + poff + (adapter->ahw.cut_through ? 0 :
1259 	    IP_ALIGNMENT_BYTES), DDI_DMA_SYNC_FORCPU);
1260 
1261 	/*
1262 	 * Copy packet into new allocated message buffer, if pkt_length
1263 	 * is below copy threshold.
1264 	 */
1265 	docopy = (pkt_length <= adapter->rx_bcopy_threshold) ? 1 : 0;
1266 
1267 	/*
1268 	 * If card is running out of rx buffers, then attempt to allocate
1269 	 * new mblk so we can feed this rx buffer back to card (we
1270 	 * _could_ look at what's pending on free and recycle lists).
1271 	 */
1272 	if (rcv_desc->rx_buf_card < NX_RX_THRESHOLD) {
1273 		docopy = 1;
1274 		adapter->stats.rxbufshort++;
1275 	}
1276 
1277 	if (docopy == 1) {
1278 		if ((mp = allocb(pkt_length + IP_ALIGNMENT_BYTES, 0)) == NULL) {
1279 			adapter->stats.allocbfailed++;
1280 			goto freebuf;
1281 		}
1282 
1283 		mp->b_rptr += IP_ALIGNMENT_BYTES;
1284 		vaddr += poff;
1285 		bcopy(vaddr, mp->b_rptr, pkt_length);
1286 		adapter->stats.rxcopyed++;
1287 		unm_free_rx_buffer(rcv_desc, rx_buffer);
1288 	} else {
1289 		mp = (mblk_t *)rx_buffer->mp;
1290 		if (mp == NULL) {
1291 			mp = desballoc(rx_buffer->dma_info.vaddr,
1292 			    rcv_desc->dma_size, 0, &rx_buffer->rx_recycle);
1293 			if (mp == NULL) {
1294 				adapter->stats.desballocfailed++;
1295 				goto freebuf;
1296 			}
1297 			rx_buffer->mp = mp;
1298 		}
1299 		mp->b_rptr += poff;
1300 		adapter->stats.rxmapped++;
1301 	}
1302 
1303 	mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + pkt_length);
1304 
1305 	if (desc->u1.s1.status == STATUS_CKSUM_OK) {
1306 		adapter->stats.csummed++;
1307 		cksum_flags =
1308 		    HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM_OK;
1309 	} else {
1310 		cksum_flags = 0;
1311 	}
1312 	mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
1313 
1314 	adapter->stats.no_rcv++;
1315 	adapter->stats.rxbytes += pkt_length;
1316 	adapter->stats.uphappy++;
1317 
1318 	return (mp);
1319 
1320 freebuf:
1321 	unm_free_rx_buffer(rcv_desc, rx_buffer);
1322 	return (NULL);
1323 }
1324 
1325 /* Process Receive status ring */
1326 static int
unm_process_rcv_ring(unm_adapter * adapter,int max)1327 unm_process_rcv_ring(unm_adapter *adapter, int max)
1328 {
1329 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1330 	statusDesc_t		*desc_head = recv_ctx->rcvStatusDescHead;
1331 	statusDesc_t		*desc = NULL;
1332 	uint32_t		consumer, start;
1333 	int			count = 0, ring;
1334 	mblk_t *mp;
1335 
1336 	start = consumer = recv_ctx->statusRxConsumer;
1337 
1338 	unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start, max,
1339 	    adapter->MaxRxDescCount, sizeof (statusDesc_t),
1340 	    DDI_DMA_SYNC_FORCPU);
1341 
1342 	while (count < max) {
1343 		desc = &desc_head[consumer];
1344 		if (!(desc->u1.s1.owner & STATUS_OWNER_HOST))
1345 			break;
1346 
1347 		mp = unm_process_rcv(adapter, desc);
1348 		desc->u1.s1.owner = STATUS_OWNER_PHANTOM;
1349 
1350 		consumer = (consumer + 1) % adapter->MaxRxDescCount;
1351 		count++;
1352 		if (mp != NULL)
1353 			mac_rx(adapter->mach, NULL, mp);
1354 	}
1355 
1356 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1357 		if (recv_ctx->rcv_desc[ring].rx_desc_handled > 0)
1358 			unm_post_rx_buffers_nodb(adapter, ring);
1359 	}
1360 
1361 	if (count) {
1362 		unm_desc_dma_sync(recv_ctx->status_desc_dma_handle, start,
1363 		    count, adapter->MaxRxDescCount, sizeof (statusDesc_t),
1364 		    DDI_DMA_SYNC_FORDEV);
1365 
1366 		/* update the consumer index in phantom */
1367 		recv_ctx->statusRxConsumer = consumer;
1368 
1369 		UNM_READ_LOCK(&adapter->adapter_lock);
1370 		adapter->unm_nic_hw_write_wx(adapter,
1371 		    recv_ctx->host_sds_consumer, &consumer, 4);
1372 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1373 	}
1374 
1375 	return (count);
1376 }
1377 
1378 /* Process Command status ring */
1379 static int
unm_process_cmd_ring(struct unm_adapter_s * adapter)1380 unm_process_cmd_ring(struct unm_adapter_s *adapter)
1381 {
1382 	u32			last_consumer;
1383 	u32			consumer;
1384 	int			count = 0;
1385 	struct unm_cmd_buffer	*buffer;
1386 	int			done;
1387 	unm_dmah_node_t *dmah, *head = NULL, *tail = NULL;
1388 	uint32_t	free_hdls = 0;
1389 
1390 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1391 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1392 
1393 	last_consumer = adapter->lastCmdConsumer;
1394 	consumer = *(adapter->cmdConsumer);
1395 
1396 	while (last_consumer != consumer) {
1397 		buffer = &adapter->cmd_buf_arr[last_consumer];
1398 		if (buffer->head != NULL) {
1399 			dmah = buffer->head;
1400 			while (dmah != NULL) {
1401 				(void) ddi_dma_unbind_handle(dmah->dmahdl);
1402 				dmah = dmah->next;
1403 				free_hdls++;
1404 			}
1405 
1406 			if (head == NULL) {
1407 				head = buffer->head;
1408 				tail = buffer->tail;
1409 			} else {
1410 				tail->next = buffer->head;
1411 				tail = buffer->tail;
1412 			}
1413 
1414 			buffer->head = NULL;
1415 			buffer->tail = NULL;
1416 
1417 			if (buffer->msg != NULL) {
1418 				freemsg(buffer->msg);
1419 				buffer->msg = NULL;
1420 			}
1421 		}
1422 
1423 		last_consumer = get_next_index(last_consumer,
1424 		    adapter->MaxTxDescCount);
1425 		if (++count > NX_MAX_TXCOMPS)
1426 			break;
1427 	}
1428 
1429 	if (count) {
1430 		int	doresched;
1431 
1432 		UNM_SPIN_LOCK(&adapter->tx_lock);
1433 		adapter->lastCmdConsumer = last_consumer;
1434 		adapter->freecmds += count;
1435 		membar_exit();
1436 
1437 		doresched = adapter->resched_needed;
1438 		if (doresched)
1439 			adapter->resched_needed = 0;
1440 
1441 		if (head != NULL)
1442 			unm_return_dma_handle(adapter, head, tail, free_hdls);
1443 
1444 		UNM_SPIN_UNLOCK(&adapter->tx_lock);
1445 
1446 		if (doresched)
1447 			mac_tx_update(adapter->mach);
1448 	}
1449 
1450 	(void) ddi_dma_sync(adapter->ctxDesc_dma_handle, sizeof (RingContext),
1451 	    sizeof (uint32_t), DDI_DMA_SYNC_FORCPU);
1452 
1453 	consumer = *(adapter->cmdConsumer);
1454 	done = (adapter->lastCmdConsumer == consumer);
1455 
1456 	return (done);
1457 }
1458 
1459 /*
1460  * This is invoked from receive isr, and at initialization time when no
1461  * rx buffers have been posted to card. Due to the single threaded nature
1462  * of the invocation, pool_lock acquisition is not neccesary to protect
1463  * pool_list.
1464  */
1465 static unm_rx_buffer_t *
unm_reserve_rx_buffer(unm_rcv_desc_ctx_t * rcv_desc)1466 unm_reserve_rx_buffer(unm_rcv_desc_ctx_t *rcv_desc)
1467 {
1468 	unm_rx_buffer_t *rx_buffer = NULL;
1469 
1470 	/* mutex_enter(rcv_desc->pool_lock); */
1471 	if (rcv_desc->rx_buf_free) {
1472 		rx_buffer = rcv_desc->pool_list;
1473 		rcv_desc->pool_list = rx_buffer->next;
1474 		rx_buffer->next = NULL;
1475 		rcv_desc->rx_buf_free--;
1476 	} else {
1477 		mutex_enter(rcv_desc->recycle_lock);
1478 
1479 		if (rcv_desc->rx_buf_recycle) {
1480 			rcv_desc->pool_list = rcv_desc->recycle_list;
1481 			rcv_desc->recycle_list = NULL;
1482 			rcv_desc->rx_buf_free += rcv_desc->rx_buf_recycle;
1483 			rcv_desc->rx_buf_recycle = 0;
1484 
1485 			rx_buffer = rcv_desc->pool_list;
1486 			rcv_desc->pool_list = rx_buffer->next;
1487 			rx_buffer->next = NULL;
1488 			rcv_desc->rx_buf_free--;
1489 		}
1490 
1491 		mutex_exit(rcv_desc->recycle_lock);
1492 	}
1493 
1494 	/* mutex_exit(rcv_desc->pool_lock); */
1495 	return (rx_buffer);
1496 }
1497 
1498 static void
post_rx_doorbell(struct unm_adapter_s * adapter,uint32_t ringid,int count)1499 post_rx_doorbell(struct unm_adapter_s *adapter, uint32_t ringid, int count)
1500 {
1501 #define	UNM_RCV_PEG_DB_ID	2
1502 #define	UNM_RCV_PRODUCER_OFFSET	0
1503 	ctx_msg msg = {0};
1504 
1505 	/*
1506 	 * Write a doorbell msg to tell phanmon of change in
1507 	 * receive ring producer
1508 	 */
1509 	msg.PegId = UNM_RCV_PEG_DB_ID;
1510 	msg.privId = 1;
1511 	msg.Count = count;
1512 	msg.CtxId = adapter->portnum;
1513 	msg.Opcode = UNM_RCV_PRODUCER(ringid);
1514 	dbwritel(*((__uint32_t *)&msg),
1515 	    (void *)(DB_NORMALIZE(adapter, UNM_RCV_PRODUCER_OFFSET)));
1516 }
1517 
1518 static int
unm_post_rx_buffers(struct unm_adapter_s * adapter,uint32_t ringid)1519 unm_post_rx_buffers(struct unm_adapter_s *adapter, uint32_t ringid)
1520 {
1521 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1522 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1523 	unm_rx_buffer_t		*rx_buffer;
1524 	rcvDesc_t		*pdesc;
1525 	int			count;
1526 
1527 	for (count = 0; count < rcv_desc->MaxRxDescCount; count++) {
1528 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1529 		if (rx_buffer != NULL) {
1530 			pdesc = &rcv_desc->desc_head[count];
1531 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1532 			    rx_buffer);
1533 			pdesc->flags = ringid;
1534 			pdesc->bufferLength = rcv_desc->dma_size;
1535 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1536 		}
1537 		else
1538 			return (DDI_FAILURE);
1539 	}
1540 
1541 	rcv_desc->producer = count % rcv_desc->MaxRxDescCount;
1542 	count--;
1543 	unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle,
1544 	    0,		/* start */
1545 	    count,	/* count */
1546 	    count,	/* range */
1547 	    sizeof (rcvDesc_t),	/* unit_size */
1548 	    DDI_DMA_SYNC_FORDEV);	/* direction */
1549 
1550 	rcv_desc->rx_buf_card = rcv_desc->MaxRxDescCount;
1551 	UNM_READ_LOCK(&adapter->adapter_lock);
1552 	adapter->unm_nic_hw_write_wx(adapter, rcv_desc->host_rx_producer,
1553 	    &count, 4);
1554 	if (adapter->fw_major < 4)
1555 		post_rx_doorbell(adapter, ringid, count);
1556 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1557 
1558 	return (DDI_SUCCESS);
1559 }
1560 
1561 static void
unm_post_rx_buffers_nodb(struct unm_adapter_s * adapter,uint32_t ringid)1562 unm_post_rx_buffers_nodb(struct unm_adapter_s *adapter,
1563     uint32_t ringid)
1564 {
1565 	unm_recv_context_t	*recv_ctx = &(adapter->recv_ctx[0]);
1566 	unm_rcv_desc_ctx_t	*rcv_desc = &recv_ctx->rcv_desc[ringid];
1567 	struct unm_rx_buffer	*rx_buffer;
1568 	rcvDesc_t		*pdesc;
1569 	int 			count, producer = rcv_desc->producer;
1570 	int 			last_producer = producer;
1571 
1572 	for (count = 0; count < rcv_desc->rx_desc_handled; count++) {
1573 		rx_buffer = unm_reserve_rx_buffer(rcv_desc);
1574 		if (rx_buffer != NULL) {
1575 			pdesc = &rcv_desc->desc_head[producer];
1576 			pdesc->referenceHandle = rxbuf2index(rcv_desc,
1577 			    rx_buffer);
1578 			pdesc->flags = ringid;
1579 			pdesc->bufferLength = rcv_desc->dma_size;
1580 			pdesc->AddrBuffer = rx_buffer->dma_info.dma_addr;
1581 		} else {
1582 			adapter->stats.outofrxbuf++;
1583 			break;
1584 		}
1585 		producer = get_next_index(producer, rcv_desc->MaxRxDescCount);
1586 	}
1587 
1588 	/* if we did allocate buffers, then write the count to Phantom */
1589 	if (count) {
1590 		/* Sync rx ring, considering case for wrap around */
1591 		unm_desc_dma_sync(rcv_desc->rx_desc_dma_handle, last_producer,
1592 		    count, rcv_desc->MaxRxDescCount, sizeof (rcvDesc_t),
1593 		    DDI_DMA_SYNC_FORDEV);
1594 
1595 		rcv_desc->producer = producer;
1596 		rcv_desc->rx_desc_handled -= count;
1597 		rcv_desc->rx_buf_card += count;
1598 
1599 		producer = (producer - 1) % rcv_desc->MaxRxDescCount;
1600 		UNM_READ_LOCK(&adapter->adapter_lock);
1601 		adapter->unm_nic_hw_write_wx(adapter,
1602 		    rcv_desc->host_rx_producer, &producer, 4);
1603 		UNM_READ_UNLOCK(&adapter->adapter_lock);
1604 	}
1605 }
1606 
1607 int
unm_nic_fill_statistics_128M(struct unm_adapter_s * adapter,struct unm_statistics * unm_stats)1608 unm_nic_fill_statistics_128M(struct unm_adapter_s *adapter,
1609 			    struct unm_statistics *unm_stats)
1610 {
1611 	void *addr;
1612 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1613 		UNM_WRITE_LOCK(&adapter->adapter_lock);
1614 		unm_nic_pci_change_crbwindow_128M(adapter, 0);
1615 
1616 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1617 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_BYTE_CNT,
1618 		    &(unm_stats->tx_bytes));
1619 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1620 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_FRAME_CNT,
1621 		    &(unm_stats->tx_packets));
1622 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1623 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_BYTE_CNT,
1624 		    &(unm_stats->rx_bytes));
1625 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1626 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_FRAME_CNT,
1627 		    &(unm_stats->rx_packets));
1628 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1629 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_AGGR_ERROR_CNT,
1630 		    &(unm_stats->rx_errors));
1631 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1632 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_CRC_ERROR_CNT,
1633 		    &(unm_stats->rx_CRC_errors));
1634 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1635 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1636 		    &(unm_stats->rx_long_length_error));
1637 		/* LINTED: E_FALSE_LOGICAL_EXPR */
1638 		UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1639 		    &(unm_stats->rx_short_length_error));
1640 
1641 		/*
1642 		 * For reading rx_MAC_error bit different procedure
1643 		 * UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_TEST_MUX_CTL, 0x15);
1644 		 * UNM_NIC_LOCKED_READ_REG((UNM_CRB_NIU + 0xC0), &temp);
1645 		 * unm_stats->rx_MAC_errors = temp & 0xff;
1646 		 */
1647 
1648 		unm_nic_pci_change_crbwindow_128M(adapter, 1);
1649 		UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1650 	} else {
1651 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1652 		unm_stats->tx_bytes = adapter->stats.txbytes;
1653 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1654 		    adapter->stats.xmitfinished;
1655 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1656 		unm_stats->rx_packets = adapter->stats.no_rcv;
1657 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1658 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1659 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1660 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1661 		unm_stats->rx_CRC_errors = 0;
1662 		unm_stats->rx_MAC_errors = 0;
1663 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1664 	}
1665 	return (0);
1666 }
1667 
1668 int
unm_nic_fill_statistics_2M(struct unm_adapter_s * adapter,struct unm_statistics * unm_stats)1669 unm_nic_fill_statistics_2M(struct unm_adapter_s *adapter,
1670     struct unm_statistics *unm_stats)
1671 {
1672 	if (adapter->ahw.board_type == UNM_NIC_XGBE) {
1673 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1674 		    &(unm_stats->tx_bytes), 4);
1675 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1676 		    &(unm_stats->tx_packets), 4);
1677 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1678 		    &(unm_stats->rx_bytes), 4);
1679 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1680 		    &(unm_stats->rx_packets), 4);
1681 		(void) unm_nic_hw_read_wx_2M(adapter,
1682 		    UNM_NIU_XGE_AGGR_ERROR_CNT, &(unm_stats->rx_errors), 4);
1683 		(void) unm_nic_hw_read_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1684 		    &(unm_stats->rx_CRC_errors), 4);
1685 		(void) unm_nic_hw_read_wx_2M(adapter,
1686 		    UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1687 		    &(unm_stats->rx_long_length_error), 4);
1688 		(void) unm_nic_hw_read_wx_2M(adapter,
1689 		    UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1690 		    &(unm_stats->rx_short_length_error), 4);
1691 	} else {
1692 		UNM_SPIN_LOCK_ISR(&adapter->tx_lock);
1693 		unm_stats->tx_bytes = adapter->stats.txbytes;
1694 		unm_stats->tx_packets = adapter->stats.xmitedframes +
1695 		    adapter->stats.xmitfinished;
1696 		unm_stats->rx_bytes = adapter->stats.rxbytes;
1697 		unm_stats->rx_packets = adapter->stats.no_rcv;
1698 		unm_stats->rx_errors = adapter->stats.rcvdbadmsg;
1699 		unm_stats->tx_errors = adapter->stats.nocmddescriptor;
1700 		unm_stats->rx_short_length_error = adapter->stats.uplcong;
1701 		unm_stats->rx_long_length_error = adapter->stats.uphcong;
1702 		unm_stats->rx_CRC_errors = 0;
1703 		unm_stats->rx_MAC_errors = 0;
1704 		UNM_SPIN_UNLOCK_ISR(&adapter->tx_lock);
1705 	}
1706 	return (0);
1707 }
1708 
1709 int
unm_nic_clear_statistics_128M(struct unm_adapter_s * adapter)1710 unm_nic_clear_statistics_128M(struct unm_adapter_s *adapter)
1711 {
1712 	void *addr;
1713 	int data = 0;
1714 
1715 	UNM_WRITE_LOCK(&adapter->adapter_lock);
1716 	unm_nic_pci_change_crbwindow_128M(adapter, 0);
1717 
1718 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1719 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_BYTE_CNT, &data);
1720 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1721 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_FRAME_CNT, &data);
1722 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1723 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_BYTE_CNT, &data);
1724 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1725 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_FRAME_CNT, &data);
1726 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1727 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_AGGR_ERROR_CNT, &data);
1728 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1729 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_CRC_ERROR_CNT, &data);
1730 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1731 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR, &data);
1732 	/* LINTED: E_FALSE_LOGICAL_EXPR */
1733 	UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR, &data);
1734 
1735 	unm_nic_pci_change_crbwindow_128M(adapter, 1);
1736 	UNM_WRITE_UNLOCK(&adapter->adapter_lock);
1737 	unm_nic_clear_stats(adapter);
1738 	return (0);
1739 }
1740 
1741 int
unm_nic_clear_statistics_2M(struct unm_adapter_s * adapter)1742 unm_nic_clear_statistics_2M(struct unm_adapter_s *adapter)
1743 {
1744 	int data = 0;
1745 
1746 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_BYTE_CNT,
1747 	    &data, 4);
1748 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_TX_FRAME_CNT,
1749 	    &data, 4);
1750 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_BYTE_CNT,
1751 	    &data, 4);
1752 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_RX_FRAME_CNT,
1753 	    &data, 4);
1754 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_AGGR_ERROR_CNT,
1755 	    &data, 4);
1756 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_CRC_ERROR_CNT,
1757 	    &data, 4);
1758 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_OVERSIZE_FRAME_ERR,
1759 	    &data, 4);
1760 	(void) unm_nic_hw_write_wx_2M(adapter, UNM_NIU_XGE_UNDERSIZE_FRAME_ERR,
1761 	    &data, 4);
1762 	unm_nic_clear_stats(adapter);
1763 	return (0);
1764 }
1765 
1766 /*
1767  * unm_nic_ioctl ()    We provide the tcl/phanmon support
1768  * through these ioctls.
1769  */
1770 static void
unm_nic_ioctl(struct unm_adapter_s * adapter,int cmd,queue_t * q,mblk_t * mp)1771 unm_nic_ioctl(struct unm_adapter_s *adapter, int cmd, queue_t *q, mblk_t *mp)
1772 {
1773 	void *ptr;
1774 
1775 	switch (cmd) {
1776 	case UNM_NIC_CMD:
1777 		(void) unm_nic_do_ioctl(adapter, q, mp);
1778 		break;
1779 
1780 	case UNM_NIC_NAME:
1781 		ptr = (void *) mp->b_cont->b_rptr;
1782 
1783 		/*
1784 		 * Phanmon checks for "UNM-UNM" string
1785 		 * Replace the hardcoded value with appropriate macro
1786 		 */
1787 		DPRINTF(-1, (CE_CONT, "UNM_NIC_NAME ioctl executed %d %d\n",
1788 		    cmd, __LINE__));
1789 		(void) memcpy(ptr, "UNM-UNM", 10);
1790 		miocack(q, mp, 10, 0);
1791 		break;
1792 
1793 	default:
1794 		cmn_err(CE_WARN, "Netxen ioctl cmd %x not supported\n", cmd);
1795 
1796 		miocnak(q, mp, 0, EINVAL);
1797 		break;
1798 	}
1799 }
1800 
1801 int
unm_nic_resume(unm_adapter * adapter)1802 unm_nic_resume(unm_adapter *adapter)
1803 {
1804 
1805 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
1806 	    (void *) adapter, 50000);
1807 
1808 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1809 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
1810 	else
1811 		(void) ddi_intr_enable(adapter->intr_handle);
1812 	UNM_READ_LOCK(&adapter->adapter_lock);
1813 	unm_nic_enable_int(adapter);
1814 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1815 
1816 	mac_link_update(adapter->mach, LINK_STATE_UP);
1817 
1818 	return (DDI_SUCCESS);
1819 }
1820 
1821 int
unm_nic_suspend(unm_adapter * adapter)1822 unm_nic_suspend(unm_adapter *adapter)
1823 {
1824 	mac_link_update(adapter->mach, LINK_STATE_DOWN);
1825 
1826 	(void) untimeout(adapter->watchdog_timer);
1827 
1828 	UNM_READ_LOCK(&adapter->adapter_lock);
1829 	unm_nic_disable_int(adapter);
1830 	UNM_READ_UNLOCK(&adapter->adapter_lock);
1831 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
1832 		(void) ddi_intr_block_disable(&adapter->intr_handle, 1);
1833 	else
1834 		(void) ddi_intr_disable(adapter->intr_handle);
1835 
1836 	return (DDI_SUCCESS);
1837 }
1838 
1839 static int
unm_nic_do_ioctl(unm_adapter * adapter,queue_t * wq,mblk_t * mp)1840 unm_nic_do_ioctl(unm_adapter *adapter, queue_t *wq, mblk_t *mp)
1841 {
1842 	unm_nic_ioctl_data_t		data;
1843 	struct unm_nic_ioctl_data	*up_data;
1844 	ddi_acc_handle_t		conf_handle;
1845 	int				retval = 0;
1846 	uint64_t			efuse_chip_id = 0;
1847 	char				*ptr1;
1848 	short				*ptr2;
1849 	int				*ptr4;
1850 
1851 	up_data = (struct unm_nic_ioctl_data *)(mp->b_cont->b_rptr);
1852 	(void) memcpy(&data, (void **)(uintptr_t)(mp->b_cont->b_rptr),
1853 	    sizeof (data));
1854 
1855 	/* Shouldn't access beyond legal limits of  "char u[64];" member */
1856 	if (data.size > sizeof (data.uabc)) {
1857 		/* evil user tried to crash the kernel */
1858 		cmn_err(CE_WARN, "bad size: %d\n", data.size);
1859 		retval = GLD_BADARG;
1860 		goto error_out;
1861 	}
1862 
1863 	switch (data.cmd) {
1864 	case unm_nic_cmd_pci_read:
1865 
1866 		if ((retval = adapter->unm_nic_hw_read_ioctl(adapter,
1867 		    data.off, up_data, data.size))) {
1868 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_read_wx "
1869 		    "returned %d\n", __FUNCTION__, __LINE__, retval));
1870 
1871 			retval = data.rv;
1872 			goto error_out;
1873 		}
1874 
1875 		data.rv = 0;
1876 		break;
1877 
1878 	case unm_nic_cmd_pci_write:
1879 		if ((data.rv = adapter->unm_nic_hw_write_ioctl(adapter,
1880 		    data.off, &(data.uabc), data.size))) {
1881 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_hw_write_wx "
1882 			    "returned %d\n", __FUNCTION__,
1883 			    __LINE__, data.rv));
1884 			retval = data.rv;
1885 			goto error_out;
1886 		}
1887 		data.size = 0;
1888 		break;
1889 
1890 	case unm_nic_cmd_pci_mem_read:
1891 		if ((data.rv = adapter->unm_nic_pci_mem_read(adapter,
1892 		    data.off, up_data, data.size))) {
1893 			DPRINTF(-1, (CE_WARN, "%s(%d) unm_nic_pci_mem_read "
1894 			    "returned %d\n", __FUNCTION__,
1895 			    __LINE__, data.rv));
1896 			retval = data.rv;
1897 			goto error_out;
1898 		}
1899 		data.rv = 0;
1900 		break;
1901 
1902 	case unm_nic_cmd_pci_mem_write:
1903 		if ((data.rv = adapter->unm_nic_pci_mem_write(adapter,
1904 		    data.off, &(data.uabc), data.size))) {
1905 			DPRINTF(-1, (CE_WARN,
1906 			    "%s(%d) unm_nic_cmd_pci_mem_write "
1907 			    "returned %d\n",
1908 			    __FUNCTION__, __LINE__, data.rv));
1909 			retval = data.rv;
1910 			goto error_out;
1911 		}
1912 
1913 		data.size = 0;
1914 		data.rv = 0;
1915 		break;
1916 
1917 	case unm_nic_cmd_pci_config_read:
1918 
1919 		if (adapter->pci_cfg_handle != NULL) {
1920 			conf_handle = adapter->pci_cfg_handle;
1921 
1922 		} else if ((retval = pci_config_setup(adapter->dip,
1923 		    &conf_handle)) != DDI_SUCCESS) {
1924 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1925 			    " error:%d\n", unm_nic_driver_name, retval));
1926 			goto error_out;
1927 
1928 		} else
1929 			adapter->pci_cfg_handle = conf_handle;
1930 
1931 		switch (data.size) {
1932 		case 1:
1933 			ptr1 = (char *)up_data;
1934 			*ptr1 = (char)pci_config_get8(conf_handle, data.off);
1935 			break;
1936 		case 2:
1937 			ptr2 = (short *)up_data;
1938 			*ptr2 = (short)pci_config_get16(conf_handle, data.off);
1939 			break;
1940 		case 4:
1941 			ptr4 = (int *)up_data;
1942 			*ptr4 = (int)pci_config_get32(conf_handle, data.off);
1943 			break;
1944 		}
1945 
1946 		break;
1947 
1948 	case unm_nic_cmd_pci_config_write:
1949 
1950 		if (adapter->pci_cfg_handle != NULL) {
1951 			conf_handle = adapter->pci_cfg_handle;
1952 		} else if ((retval = pci_config_setup(adapter->dip,
1953 		    &conf_handle)) != DDI_SUCCESS) {
1954 			DPRINTF(-1, (CE_WARN, "!%s: pci_config_setup failed"
1955 			    " error:%d\n", unm_nic_driver_name, retval));
1956 			goto error_out;
1957 		} else {
1958 			adapter->pci_cfg_handle = conf_handle;
1959 		}
1960 
1961 		switch (data.size) {
1962 		case 1:
1963 			pci_config_put8(conf_handle,
1964 			    data.off, *(char *)&(data.uabc));
1965 			break;
1966 		case 2:
1967 			pci_config_put16(conf_handle,
1968 			    data.off, *(short *)(uintptr_t)&(data.uabc));
1969 			break;
1970 		case 4:
1971 			pci_config_put32(conf_handle,
1972 			    data.off, *(u32 *)(uintptr_t)&(data.uabc));
1973 			break;
1974 		}
1975 		data.size = 0;
1976 		break;
1977 
1978 	case unm_nic_cmd_get_stats:
1979 		data.rv = adapter->unm_nic_fill_statistics(adapter,
1980 		    (struct unm_statistics *)up_data);
1981 		data.size = sizeof (struct unm_statistics);
1982 
1983 		break;
1984 
1985 	case unm_nic_cmd_clear_stats:
1986 		data.rv = adapter->unm_nic_clear_statistics(adapter);
1987 		break;
1988 
1989 	case unm_nic_cmd_get_version:
1990 		(void) memcpy(up_data, UNM_NIC_VERSIONID,
1991 		    sizeof (UNM_NIC_VERSIONID));
1992 		data.size = sizeof (UNM_NIC_VERSIONID);
1993 
1994 		break;
1995 
1996 	case unm_nic_cmd_get_phy_type:
1997 		cmn_err(CE_WARN, "unm_nic_cmd_get_phy_type unimplemented\n");
1998 		break;
1999 
2000 	case unm_nic_cmd_efuse_chip_id:
2001 		efuse_chip_id = adapter->unm_nic_pci_read_normalize(adapter,
2002 		    UNM_EFUSE_CHIP_ID_HIGH);
2003 		efuse_chip_id <<= 32;
2004 		efuse_chip_id |= adapter->unm_nic_pci_read_normalize(adapter,
2005 		    UNM_EFUSE_CHIP_ID_LOW);
2006 		(void) memcpy(up_data, &efuse_chip_id, sizeof (uint64_t));
2007 		data.rv = 0;
2008 		break;
2009 
2010 	default:
2011 		cmn_err(CE_WARN, "%s%d: bad command %d\n", adapter->name,
2012 		    adapter->instance, data.cmd);
2013 		data.rv = GLD_NOTSUPPORTED;
2014 		data.size = 0;
2015 		goto error_out;
2016 	}
2017 
2018 work_done:
2019 	miocack(wq, mp, data.size, data.rv);
2020 	return (DDI_SUCCESS);
2021 
2022 error_out:
2023 	cmn_err(CE_WARN, "%s(%d) ioctl error\n", __FUNCTION__, data.cmd);
2024 	miocnak(wq, mp, 0, EINVAL);
2025 	return (retval);
2026 }
2027 
2028 /*
2029  * Local datatype for defining tables of (Offset, Name) pairs
2030  */
2031 typedef struct {
2032 	offset_t	index;
2033 	char		*name;
2034 } unm_ksindex_t;
2035 
2036 static const unm_ksindex_t unm_kstat[] = {
2037 	{ 0,		"freehdls"		},
2038 	{ 1,		"freecmds"		},
2039 	{ 2,		"tx_bcopy_threshold"	},
2040 	{ 3,		"rx_bcopy_threshold"	},
2041 	{ 4,		"xmitcalled"		},
2042 	{ 5,		"xmitedframes"		},
2043 	{ 6,		"xmitfinished"		},
2044 	{ 7,		"txbytes"		},
2045 	{ 8,		"txcopyed"		},
2046 	{ 9,		"txmapped"		},
2047 	{ 10,		"outoftxdmahdl"		},
2048 	{ 11,		"outofcmddesc"		},
2049 	{ 12,		"txdropped"		},
2050 	{ 13,		"polled"		},
2051 	{ 14,		"uphappy"		},
2052 	{ 15,		"updropped"		},
2053 	{ 16,		"csummed"		},
2054 	{ 17,		"no_rcv"		},
2055 	{ 18,		"rxbytes"		},
2056 	{ 19,		"rxcopyed"		},
2057 	{ 20,		"rxmapped"		},
2058 	{ 21,		"desballocfailed"	},
2059 	{ 22,		"outofrxbuf"		},
2060 	{ 23,		"promiscmode"		},
2061 	{ 24,		"rxbufshort"		},
2062 	{ 25,		"allocbfailed"		},
2063 	{ -1,		NULL			}
2064 };
2065 
2066 static int
unm_kstat_update(kstat_t * ksp,int flag)2067 unm_kstat_update(kstat_t *ksp, int flag)
2068 {
2069 	unm_adapter *adapter;
2070 	kstat_named_t *knp;
2071 
2072 	if (flag != KSTAT_READ)
2073 		return (EACCES);
2074 
2075 	adapter = ksp->ks_private;
2076 	knp = ksp->ks_data;
2077 
2078 	(knp++)->value.ui32 = adapter->freehdls;
2079 	(knp++)->value.ui64 = adapter->freecmds;
2080 	(knp++)->value.ui64 = adapter->tx_bcopy_threshold;
2081 	(knp++)->value.ui64 = adapter->rx_bcopy_threshold;
2082 
2083 	(knp++)->value.ui64 = adapter->stats.xmitcalled;
2084 	(knp++)->value.ui64 = adapter->stats.xmitedframes;
2085 	(knp++)->value.ui64 = adapter->stats.xmitfinished;
2086 	(knp++)->value.ui64 = adapter->stats.txbytes;
2087 	(knp++)->value.ui64 = adapter->stats.txcopyed;
2088 	(knp++)->value.ui64 = adapter->stats.txmapped;
2089 	(knp++)->value.ui64 = adapter->stats.outoftxdmahdl;
2090 	(knp++)->value.ui64 = adapter->stats.outofcmddesc;
2091 	(knp++)->value.ui64 = adapter->stats.txdropped;
2092 	(knp++)->value.ui64 = adapter->stats.polled;
2093 	(knp++)->value.ui64 = adapter->stats.uphappy;
2094 	(knp++)->value.ui64 = adapter->stats.updropped;
2095 	(knp++)->value.ui64 = adapter->stats.csummed;
2096 	(knp++)->value.ui64 = adapter->stats.no_rcv;
2097 	(knp++)->value.ui64 = adapter->stats.rxbytes;
2098 	(knp++)->value.ui64 = adapter->stats.rxcopyed;
2099 	(knp++)->value.ui64 = adapter->stats.rxmapped;
2100 	(knp++)->value.ui64 = adapter->stats.desballocfailed;
2101 	(knp++)->value.ui64 = adapter->stats.outofrxbuf;
2102 	(knp++)->value.ui64 = adapter->stats.promiscmode;
2103 	(knp++)->value.ui64 = adapter->stats.rxbufshort;
2104 	(knp++)->value.ui64 = adapter->stats.allocbfailed;
2105 
2106 	return (0);
2107 }
2108 
2109 static kstat_t *
unm_setup_named_kstat(unm_adapter * adapter,int instance,char * name,const unm_ksindex_t * ksip,size_t size,int (* update)(kstat_t *,int))2110 unm_setup_named_kstat(unm_adapter *adapter, int instance, char *name,
2111 	const unm_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
2112 {
2113 	kstat_t *ksp;
2114 	kstat_named_t *knp;
2115 	char *np;
2116 	int type;
2117 	int count = 0;
2118 
2119 	size /= sizeof (unm_ksindex_t);
2120 	ksp = kstat_create(unm_nic_driver_name, instance, name, "net",
2121 	    KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT);
2122 	if (ksp == NULL)
2123 		return (NULL);
2124 
2125 	ksp->ks_private = adapter;
2126 	ksp->ks_update = update;
2127 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
2128 		count++;
2129 		switch (*np) {
2130 		default:
2131 			type = KSTAT_DATA_UINT64;
2132 			break;
2133 		case '%':
2134 			np += 1;
2135 			type = KSTAT_DATA_UINT32;
2136 			break;
2137 		case '$':
2138 			np += 1;
2139 			type = KSTAT_DATA_STRING;
2140 			break;
2141 		case '&':
2142 			np += 1;
2143 			type = KSTAT_DATA_CHAR;
2144 			break;
2145 		}
2146 		kstat_named_init(knp, np, type);
2147 	}
2148 	kstat_install(ksp);
2149 
2150 	return (ksp);
2151 }
2152 
2153 void
unm_init_kstats(unm_adapter * adapter,int instance)2154 unm_init_kstats(unm_adapter* adapter, int instance)
2155 {
2156 	adapter->kstats[0] = unm_setup_named_kstat(adapter,
2157 	    instance, "kstatinfo", unm_kstat,
2158 	    sizeof (unm_kstat), unm_kstat_update);
2159 }
2160 
2161 void
unm_fini_kstats(unm_adapter * adapter)2162 unm_fini_kstats(unm_adapter* adapter)
2163 {
2164 
2165 	if (adapter->kstats[0] != NULL) {
2166 			kstat_delete(adapter->kstats[0]);
2167 			adapter->kstats[0] = NULL;
2168 		}
2169 }
2170 
2171 static int
unm_nic_set_pauseparam(unm_adapter * adapter,unm_pauseparam_t * pause)2172 unm_nic_set_pauseparam(unm_adapter *adapter, unm_pauseparam_t *pause)
2173 {
2174 	int ret = 0;
2175 
2176 	if (adapter->ahw.board_type == UNM_NIC_GBE) {
2177 		if (unm_niu_gbe_set_rx_flow_ctl(adapter, pause->rx_pause))
2178 			ret = -EIO;
2179 
2180 		if (unm_niu_gbe_set_tx_flow_ctl(adapter, pause->tx_pause))
2181 			ret = -EIO;
2182 
2183 	} else if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2184 		if (unm_niu_xg_set_tx_flow_ctl(adapter, pause->tx_pause))
2185 			ret =  -EIO;
2186 	} else
2187 		ret = -EIO;
2188 
2189 	return (ret);
2190 }
2191 
2192 /*
2193  * GLD/MAC interfaces
2194  */
2195 static int
ntxn_m_start(void * arg)2196 ntxn_m_start(void *arg)
2197 {
2198 	unm_adapter	*adapter = arg;
2199 	int		ring;
2200 
2201 	UNM_SPIN_LOCK(&adapter->lock);
2202 	if (adapter->is_up == UNM_ADAPTER_UP_MAGIC) {
2203 		UNM_SPIN_UNLOCK(&adapter->lock);
2204 		return (DDI_SUCCESS);
2205 	}
2206 
2207 	if (create_rxtx_rings(adapter) != DDI_SUCCESS) {
2208 		UNM_SPIN_UNLOCK(&adapter->lock);
2209 		return (DDI_FAILURE);
2210 	}
2211 
2212 	if (init_firmware(adapter) != DDI_SUCCESS) {
2213 		UNM_SPIN_UNLOCK(&adapter->lock);
2214 		cmn_err(CE_WARN, "%s%d: Failed to init firmware\n",
2215 		    adapter->name, adapter->instance);
2216 		goto dest_rings;
2217 	}
2218 
2219 	unm_nic_clear_stats(adapter);
2220 
2221 	if (unm_nic_hw_resources(adapter) != 0) {
2222 		UNM_SPIN_UNLOCK(&adapter->lock);
2223 		cmn_err(CE_WARN, "%s%d: Error setting hw resources\n",
2224 		    adapter->name, adapter->instance);
2225 		goto dest_rings;
2226 	}
2227 
2228 	if (adapter->fw_major < 4) {
2229 		adapter->crb_addr_cmd_producer =
2230 		    crb_cmd_producer[adapter->portnum];
2231 		adapter->crb_addr_cmd_consumer =
2232 		    crb_cmd_consumer[adapter->portnum];
2233 		unm_nic_update_cmd_producer(adapter, 0);
2234 		unm_nic_update_cmd_consumer(adapter, 0);
2235 	}
2236 
2237 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
2238 		if (unm_post_rx_buffers(adapter, ring) != DDI_SUCCESS) {
2239 			UNM_SPIN_UNLOCK(&adapter->lock);
2240 			goto free_hw_res;
2241 		}
2242 	}
2243 
2244 	if (unm_nic_macaddr_set(adapter, adapter->mac_addr) != 0) {
2245 		UNM_SPIN_UNLOCK(&adapter->lock);
2246 		cmn_err(CE_WARN, "%s%d: Could not set mac address\n",
2247 		    adapter->name, adapter->instance);
2248 		goto free_hw_res;
2249 	}
2250 
2251 	if (unm_nic_init_port(adapter) != 0) {
2252 		UNM_SPIN_UNLOCK(&adapter->lock);
2253 		cmn_err(CE_WARN, "%s%d: Could not initialize port\n",
2254 		    adapter->name, adapter->instance);
2255 		goto free_hw_res;
2256 	}
2257 
2258 	unm_nic_set_link_parameters(adapter);
2259 
2260 	/*
2261 	 * P2 and P3 should be handled similarly.
2262 	 */
2263 	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
2264 		if (unm_nic_set_promisc_mode(adapter) != 0) {
2265 			UNM_SPIN_UNLOCK(&adapter->lock);
2266 			cmn_err(CE_WARN, "%s%d: Could not set promisc mode\n",
2267 			    adapter->name, adapter->instance);
2268 			goto stop_and_free;
2269 		}
2270 	} else {
2271 		nx_p3_nic_set_multi(adapter);
2272 	}
2273 	adapter->stats.promiscmode = 1;
2274 
2275 	if (unm_nic_set_mtu(adapter, adapter->mtu) != 0) {
2276 		UNM_SPIN_UNLOCK(&adapter->lock);
2277 		cmn_err(CE_WARN, "%s%d: Could not set mtu\n",
2278 		    adapter->name, adapter->instance);
2279 		goto stop_and_free;
2280 	}
2281 
2282 	adapter->watchdog_timer = timeout((void (*)(void *))&unm_watchdog,
2283 	    (void *)adapter, 0);
2284 
2285 	adapter->is_up = UNM_ADAPTER_UP_MAGIC;
2286 
2287 	if (adapter->intr_type == DDI_INTR_TYPE_MSI)
2288 		(void) ddi_intr_block_enable(&adapter->intr_handle, 1);
2289 	else
2290 		(void) ddi_intr_enable(adapter->intr_handle);
2291 	unm_nic_enable_int(adapter);
2292 
2293 	UNM_SPIN_UNLOCK(&adapter->lock);
2294 	return (GLD_SUCCESS);
2295 
2296 stop_and_free:
2297 	unm_nic_stop_port(adapter);
2298 free_hw_res:
2299 	unm_free_hw_resources(adapter);
2300 dest_rings:
2301 	destroy_rxtx_rings(adapter);
2302 	return (DDI_FAILURE);
2303 }
2304 
2305 
2306 /*
2307  * This code is kept here for reference so as to
2308  * see if something different is required to be done
2309  * in GLDV3. This will be deleted later.
2310  */
2311 /* ARGSUSED */
2312 static void
ntxn_m_stop(void * arg)2313 ntxn_m_stop(void *arg)
2314 {
2315 }
2316 
2317 /*ARGSUSED*/
2318 static int
ntxn_m_multicst(void * arg,boolean_t add,const uint8_t * ep)2319 ntxn_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
2320 {
2321 	/*
2322 	 * When we correctly implement this, invoke nx_p3_nic_set_multi()
2323 	 * or nx_p2_nic_set_multi() here.
2324 	 */
2325 	return (GLD_SUCCESS);
2326 }
2327 
2328 /*ARGSUSED*/
2329 static int
ntxn_m_promisc(void * arg,boolean_t on)2330 ntxn_m_promisc(void *arg, boolean_t on)
2331 {
2332 #if 0
2333 	int err = 0;
2334 	struct unm_adapter_s *adapter = arg;
2335 
2336 	err = on ? unm_nic_set_promisc_mode(adapter) :
2337 	    unm_nic_unset_promisc_mode(adapter);
2338 
2339 	if (err)
2340 		return (GLD_FAILURE);
2341 #endif
2342 
2343 	return (GLD_SUCCESS);
2344 }
2345 
2346 static int
ntxn_m_stat(void * arg,uint_t stat,uint64_t * val)2347 ntxn_m_stat(void *arg, uint_t stat, uint64_t *val)
2348 {
2349 	struct unm_adapter_s		*adapter = arg;
2350 	struct unm_adapter_stats	*portstat = &adapter->stats;
2351 
2352 	switch (stat) {
2353 	case MAC_STAT_IFSPEED:
2354 		if (adapter->ahw.board_type == UNM_NIC_XGBE) {
2355 			/* 10 Gigs */
2356 			*val = 10000000000ULL;
2357 		} else {
2358 			/* 1 Gig */
2359 			*val = 1000000000;
2360 		}
2361 		break;
2362 
2363 	case MAC_STAT_MULTIRCV:
2364 		*val = 0;
2365 		break;
2366 
2367 	case MAC_STAT_BRDCSTRCV:
2368 	case MAC_STAT_BRDCSTXMT:
2369 		*val = 0;
2370 		break;
2371 
2372 	case MAC_STAT_NORCVBUF:
2373 		*val = portstat->updropped;
2374 		break;
2375 
2376 	case MAC_STAT_NOXMTBUF:
2377 		*val = portstat->txdropped;
2378 		break;
2379 
2380 	case MAC_STAT_RBYTES:
2381 		*val = portstat->rxbytes;
2382 		break;
2383 
2384 	case MAC_STAT_OBYTES:
2385 		*val = portstat->txbytes;
2386 		break;
2387 
2388 	case MAC_STAT_OPACKETS:
2389 		*val = portstat->xmitedframes;
2390 		break;
2391 
2392 	case MAC_STAT_IPACKETS:
2393 		*val = portstat->uphappy;
2394 		break;
2395 
2396 	case MAC_STAT_OERRORS:
2397 		*val = portstat->xmitcalled - portstat->xmitedframes;
2398 		break;
2399 
2400 	case ETHER_STAT_LINK_DUPLEX:
2401 		*val = LINK_DUPLEX_FULL;
2402 		break;
2403 
2404 	default:
2405 		/*
2406 		 * Shouldn't reach here...
2407 		 */
2408 		*val = 0;
2409 		DPRINTF(0, (CE_WARN, ": unrecognized parameter = %d, value "
2410 		    "returned 1\n", stat));
2411 
2412 	}
2413 
2414 	return (0);
2415 }
2416 
2417 static int
ntxn_m_unicst(void * arg,const uint8_t * mac)2418 ntxn_m_unicst(void *arg, const uint8_t *mac)
2419 {
2420 	struct unm_adapter_s *adapter = arg;
2421 
2422 	DPRINTF(-1, (CE_CONT, "%s: called\n", __func__));
2423 
2424 	if (unm_nic_macaddr_set(adapter, (uint8_t *)mac))
2425 		return (EAGAIN);
2426 	bcopy(mac, adapter->mac_addr, ETHERADDRL);
2427 
2428 	return (0);
2429 }
2430 
2431 static mblk_t *
ntxn_m_tx(void * arg,mblk_t * mp)2432 ntxn_m_tx(void *arg, mblk_t *mp)
2433 {
2434 	unm_adapter *adapter = arg;
2435 	mblk_t *next;
2436 
2437 	while (mp != NULL) {
2438 		next = mp->b_next;
2439 		mp->b_next = NULL;
2440 
2441 		if (unm_nic_xmit_frame(adapter, mp) != B_TRUE) {
2442 			mp->b_next = next;
2443 			break;
2444 		}
2445 		mp = next;
2446 		adapter->stats.xmitedframes++;
2447 	}
2448 
2449 	return (mp);
2450 }
2451 
2452 static void
ntxn_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2453 ntxn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2454 {
2455 	int		cmd;
2456 	struct iocblk   *iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
2457 	struct unm_adapter_s *adapter = (struct unm_adapter_s *)arg;
2458 	enum ioc_reply status = IOC_DONE;
2459 
2460 	iocp->ioc_error = 0;
2461 	cmd = iocp->ioc_cmd;
2462 
2463 	if (cmd == ND_GET || cmd == ND_SET) {
2464 		status = unm_nd_ioctl(adapter, wq, mp, iocp);
2465 		switch (status) {
2466 		default:
2467 		case IOC_INVAL:
2468 			miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2469 			    EINVAL : iocp->ioc_error);
2470 			break;
2471 
2472 		case IOC_DONE:
2473 			break;
2474 
2475 		case IOC_RESTART_ACK:
2476 		case IOC_ACK:
2477 			miocack(wq, mp, 0, 0);
2478 			break;
2479 
2480 		case IOC_RESTART_REPLY:
2481 		case IOC_REPLY:
2482 			mp->b_datap->db_type = iocp->ioc_error == 0 ?
2483 			    M_IOCACK : M_IOCNAK;
2484 			qreply(wq, mp);
2485 			break;
2486 		}
2487 	} else if (cmd <= UNM_NIC_NAME && cmd >= UNM_CMD_START) {
2488 		unm_nic_ioctl(adapter, cmd, wq, mp);
2489 		return;
2490 	} else {
2491 		miocnak(wq, mp, 0, EINVAL);
2492 		return;
2493 	}
2494 }
2495 
2496 /* ARGSUSED */
2497 static boolean_t
ntxn_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)2498 ntxn_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2499 {
2500 	switch (cap) {
2501 	case MAC_CAPAB_HCKSUM:
2502 		{
2503 			uint32_t *txflags = cap_data;
2504 
2505 			*txflags = (HCKSUM_ENABLE |
2506 			    HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM);
2507 		}
2508 		break;
2509 
2510 #ifdef SOLARIS11
2511 	case MAC_CAPAB_ANCHOR_VNIC:
2512 	case MAC_CAPAB_MULTIFACTADDR:
2513 #else
2514 	case MAC_CAPAB_POLL:
2515 	case MAC_CAPAB_MULTIADDRESS:
2516 #endif
2517 	default:
2518 		return (B_FALSE);
2519 	}
2520 
2521 	return (B_TRUE);
2522 }
2523 
2524 #define	NETXEN_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
2525 
2526 static mac_callbacks_t ntxn_m_callbacks = {
2527 	NETXEN_M_CALLBACK_FLAGS,
2528 	ntxn_m_stat,
2529 	ntxn_m_start,
2530 	ntxn_m_stop,
2531 	ntxn_m_promisc,
2532 	ntxn_m_multicst,
2533 	ntxn_m_unicst,
2534 	ntxn_m_tx,
2535 	NULL,			/* mc_reserved */
2536 	ntxn_m_ioctl,
2537 	ntxn_m_getcapab,
2538 	NULL,			/* mc_open */
2539 	NULL,			/* mc_close */
2540 	NULL,			/* mc_setprop */
2541 	NULL			/* mc_getprop */
2542 };
2543 
2544 int
unm_register_mac(unm_adapter * adapter)2545 unm_register_mac(unm_adapter *adapter)
2546 {
2547 	int ret;
2548 	mac_register_t *macp;
2549 	unm_pauseparam_t pause;
2550 
2551 	dev_info_t *dip = adapter->dip;
2552 
2553 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2554 		cmn_err(CE_WARN, "Memory not available\n");
2555 		return (DDI_FAILURE);
2556 	}
2557 
2558 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2559 	macp->m_driver = adapter;
2560 	macp->m_dip = dip;
2561 	macp->m_instance = adapter->instance;
2562 	macp->m_src_addr = adapter->mac_addr;
2563 	macp->m_callbacks = &ntxn_m_callbacks;
2564 	macp->m_min_sdu = 0;
2565 	macp->m_max_sdu = adapter->mtu;
2566 #ifdef SOLARIS11
2567 	macp->m_margin = VLAN_TAGSZ;
2568 #endif /* SOLARIS11 */
2569 
2570 	ret = mac_register(macp, &adapter->mach);
2571 	mac_free(macp);
2572 	if (ret != 0) {
2573 		cmn_err(CE_WARN, "mac_register failed for port %d\n",
2574 		    adapter->portnum);
2575 		return (DDI_FAILURE);
2576 	}
2577 
2578 	unm_init_kstats(adapter, adapter->instance);
2579 
2580 	/* Register NDD-tweakable parameters */
2581 	if (unm_nd_init(adapter)) {
2582 		cmn_err(CE_WARN, "unm_nd_init() failed");
2583 		return (DDI_FAILURE);
2584 	}
2585 
2586 	pause.rx_pause = adapter->nd_params[PARAM_ADV_PAUSE_CAP].ndp_val;
2587 	pause.tx_pause = adapter->nd_params[PARAM_ADV_ASYM_PAUSE_CAP].ndp_val;
2588 
2589 	if (unm_nic_set_pauseparam(adapter, &pause)) {
2590 		cmn_err(CE_WARN, "\nBad Pause settings RX %d, Tx %d",
2591 		    pause.rx_pause, pause.tx_pause);
2592 	}
2593 	adapter->nd_params[PARAM_PAUSE_CAP].ndp_val = pause.rx_pause;
2594 	adapter->nd_params[PARAM_ASYM_PAUSE_CAP].ndp_val = pause.tx_pause;
2595 
2596 	return (DDI_SUCCESS);
2597 }
2598