xref: /illumos-gate/usr/src/uts/common/io/igb/igb_gld.c (revision 2dea4eed7ad1c66ae4770263aa2911815a8b86eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 #include "igb_sw.h"
32 
33 int
34 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
35 {
36 	igb_t *igb = (igb_t *)arg;
37 	struct e1000_hw *hw = &igb->hw;
38 	igb_stat_t *igb_ks;
39 	uint32_t low_val, high_val;
40 
41 	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
42 
43 	mutex_enter(&igb->gen_lock);
44 
45 	if (igb->igb_state & IGB_SUSPENDED) {
46 		mutex_exit(&igb->gen_lock);
47 		return (ECANCELED);
48 	}
49 
50 	switch (stat) {
51 	case MAC_STAT_IFSPEED:
52 		*val = igb->link_speed * 1000000ull;
53 		break;
54 
55 	case MAC_STAT_MULTIRCV:
56 		igb_ks->mprc.value.ui64 +=
57 		    E1000_READ_REG(hw, E1000_MPRC);
58 		*val = igb_ks->mprc.value.ui64;
59 		break;
60 
61 	case MAC_STAT_BRDCSTRCV:
62 		igb_ks->bprc.value.ui64 +=
63 		    E1000_READ_REG(hw, E1000_BPRC);
64 		*val = igb_ks->bprc.value.ui64;
65 		break;
66 
67 	case MAC_STAT_MULTIXMT:
68 		igb_ks->mptc.value.ui64 +=
69 		    E1000_READ_REG(hw, E1000_MPTC);
70 		*val = igb_ks->mptc.value.ui64;
71 		break;
72 
73 	case MAC_STAT_BRDCSTXMT:
74 		igb_ks->bptc.value.ui64 +=
75 		    E1000_READ_REG(hw, E1000_BPTC);
76 		*val = igb_ks->bptc.value.ui64;
77 		break;
78 
79 	case MAC_STAT_NORCVBUF:
80 		igb_ks->rnbc.value.ui64 +=
81 		    E1000_READ_REG(hw, E1000_RNBC);
82 		*val = igb_ks->rnbc.value.ui64;
83 		break;
84 
85 	case MAC_STAT_IERRORS:
86 		igb_ks->rxerrc.value.ui64 +=
87 		    E1000_READ_REG(hw, E1000_RXERRC);
88 		igb_ks->algnerrc.value.ui64 +=
89 		    E1000_READ_REG(hw, E1000_ALGNERRC);
90 		igb_ks->rlec.value.ui64 +=
91 		    E1000_READ_REG(hw, E1000_RLEC);
92 		igb_ks->crcerrs.value.ui64 +=
93 		    E1000_READ_REG(hw, E1000_CRCERRS);
94 		igb_ks->cexterr.value.ui64 +=
95 		    E1000_READ_REG(hw, E1000_CEXTERR);
96 		*val = igb_ks->rxerrc.value.ui64 +
97 		    igb_ks->algnerrc.value.ui64 +
98 		    igb_ks->rlec.value.ui64 +
99 		    igb_ks->crcerrs.value.ui64 +
100 		    igb_ks->cexterr.value.ui64;
101 		break;
102 
103 	case MAC_STAT_NOXMTBUF:
104 		*val = 0;
105 		break;
106 
107 	case MAC_STAT_OERRORS:
108 		igb_ks->ecol.value.ui64 +=
109 		    E1000_READ_REG(hw, E1000_ECOL);
110 		*val = igb_ks->ecol.value.ui64;
111 		break;
112 
113 	case MAC_STAT_COLLISIONS:
114 		igb_ks->colc.value.ui64 +=
115 		    E1000_READ_REG(hw, E1000_COLC);
116 		*val = igb_ks->colc.value.ui64;
117 		break;
118 
119 	case MAC_STAT_RBYTES:
120 		/*
121 		 * The 64-bit register will reset whenever the upper
122 		 * 32 bits are read. So we need to read the lower
123 		 * 32 bits first, then read the upper 32 bits.
124 		 */
125 		low_val = E1000_READ_REG(hw, E1000_TORL);
126 		high_val = E1000_READ_REG(hw, E1000_TORH);
127 		igb_ks->tor.value.ui64 +=
128 		    (uint64_t)high_val << 32 | (uint64_t)low_val;
129 		*val = igb_ks->tor.value.ui64;
130 		break;
131 
132 	case MAC_STAT_IPACKETS:
133 		igb_ks->tpr.value.ui64 +=
134 		    E1000_READ_REG(hw, E1000_TPR);
135 		*val = igb_ks->tpr.value.ui64;
136 		break;
137 
138 	case MAC_STAT_OBYTES:
139 		/*
140 		 * The 64-bit register will reset whenever the upper
141 		 * 32 bits are read. So we need to read the lower
142 		 * 32 bits first, then read the upper 32 bits.
143 		 */
144 		low_val = E1000_READ_REG(hw, E1000_TOTL);
145 		high_val = E1000_READ_REG(hw, E1000_TOTH);
146 		igb_ks->tot.value.ui64 +=
147 		    (uint64_t)high_val << 32 | (uint64_t)low_val;
148 		*val = igb_ks->tot.value.ui64;
149 		break;
150 
151 	case MAC_STAT_OPACKETS:
152 		igb_ks->tpt.value.ui64 +=
153 		    E1000_READ_REG(hw, E1000_TPT);
154 		*val = igb_ks->tpt.value.ui64;
155 		break;
156 
157 	/* RFC 1643 stats */
158 	case ETHER_STAT_ALIGN_ERRORS:
159 		igb_ks->algnerrc.value.ui64 +=
160 		    E1000_READ_REG(hw, E1000_ALGNERRC);
161 		*val = igb_ks->algnerrc.value.ui64;
162 		break;
163 
164 	case ETHER_STAT_FCS_ERRORS:
165 		igb_ks->crcerrs.value.ui64 +=
166 		    E1000_READ_REG(hw, E1000_CRCERRS);
167 		*val = igb_ks->crcerrs.value.ui64;
168 		break;
169 
170 	case ETHER_STAT_FIRST_COLLISIONS:
171 		igb_ks->scc.value.ui64 +=
172 		    E1000_READ_REG(hw, E1000_SCC);
173 		*val = igb_ks->scc.value.ui64;
174 		break;
175 
176 	case ETHER_STAT_MULTI_COLLISIONS:
177 		igb_ks->mcc.value.ui64 +=
178 		    E1000_READ_REG(hw, E1000_MCC);
179 		*val = igb_ks->mcc.value.ui64;
180 		break;
181 
182 	case ETHER_STAT_SQE_ERRORS:
183 		igb_ks->sec.value.ui64 +=
184 		    E1000_READ_REG(hw, E1000_SEC);
185 		*val = igb_ks->sec.value.ui64;
186 		break;
187 
188 	case ETHER_STAT_DEFER_XMTS:
189 		igb_ks->dc.value.ui64 +=
190 		    E1000_READ_REG(hw, E1000_DC);
191 		*val = igb_ks->dc.value.ui64;
192 		break;
193 
194 	case ETHER_STAT_TX_LATE_COLLISIONS:
195 		igb_ks->latecol.value.ui64 +=
196 		    E1000_READ_REG(hw, E1000_LATECOL);
197 		*val = igb_ks->latecol.value.ui64;
198 		break;
199 
200 	case ETHER_STAT_EX_COLLISIONS:
201 		igb_ks->ecol.value.ui64 +=
202 		    E1000_READ_REG(hw, E1000_ECOL);
203 		*val = igb_ks->ecol.value.ui64;
204 		break;
205 
206 	case ETHER_STAT_MACXMT_ERRORS:
207 		igb_ks->ecol.value.ui64 +=
208 		    E1000_READ_REG(hw, E1000_ECOL);
209 		*val = igb_ks->ecol.value.ui64;
210 		break;
211 
212 	case ETHER_STAT_CARRIER_ERRORS:
213 		igb_ks->cexterr.value.ui64 +=
214 		    E1000_READ_REG(hw, E1000_CEXTERR);
215 		*val = igb_ks->cexterr.value.ui64;
216 		break;
217 
218 	case ETHER_STAT_TOOLONG_ERRORS:
219 		igb_ks->roc.value.ui64 +=
220 		    E1000_READ_REG(hw, E1000_ROC);
221 		*val = igb_ks->roc.value.ui64;
222 		break;
223 
224 	case ETHER_STAT_MACRCV_ERRORS:
225 		igb_ks->rxerrc.value.ui64 +=
226 		    E1000_READ_REG(hw, E1000_RXERRC);
227 		*val = igb_ks->rxerrc.value.ui64;
228 		break;
229 
230 	/* MII/GMII stats */
231 	case ETHER_STAT_XCVR_ADDR:
232 		/* The Internal PHY's MDI address for each MAC is 1 */
233 		*val = 1;
234 		break;
235 
236 	case ETHER_STAT_XCVR_ID:
237 		*val = hw->phy.id | hw->phy.revision;
238 		break;
239 
240 	case ETHER_STAT_XCVR_INUSE:
241 		switch (igb->link_speed) {
242 		case SPEED_1000:
243 			*val =
244 			    (hw->phy.media_type == e1000_media_type_copper) ?
245 			    XCVR_1000T : XCVR_1000X;
246 			break;
247 		case SPEED_100:
248 			*val =
249 			    (hw->phy.media_type == e1000_media_type_copper) ?
250 			    (igb->param_100t4_cap == 1) ?
251 			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
252 			break;
253 		case SPEED_10:
254 			*val = XCVR_10;
255 			break;
256 		default:
257 			*val = XCVR_NONE;
258 			break;
259 		}
260 		break;
261 
262 	case ETHER_STAT_CAP_1000FDX:
263 		*val = igb->param_1000fdx_cap;
264 		break;
265 
266 	case ETHER_STAT_CAP_1000HDX:
267 		*val = igb->param_1000hdx_cap;
268 		break;
269 
270 	case ETHER_STAT_CAP_100FDX:
271 		*val = igb->param_100fdx_cap;
272 		break;
273 
274 	case ETHER_STAT_CAP_100HDX:
275 		*val = igb->param_100hdx_cap;
276 		break;
277 
278 	case ETHER_STAT_CAP_10FDX:
279 		*val = igb->param_10fdx_cap;
280 		break;
281 
282 	case ETHER_STAT_CAP_10HDX:
283 		*val = igb->param_10hdx_cap;
284 		break;
285 
286 	case ETHER_STAT_CAP_ASMPAUSE:
287 		*val = igb->param_asym_pause_cap;
288 		break;
289 
290 	case ETHER_STAT_CAP_PAUSE:
291 		*val = igb->param_pause_cap;
292 		break;
293 
294 	case ETHER_STAT_CAP_AUTONEG:
295 		*val = igb->param_autoneg_cap;
296 		break;
297 
298 	case ETHER_STAT_ADV_CAP_1000FDX:
299 		*val = igb->param_adv_1000fdx_cap;
300 		break;
301 
302 	case ETHER_STAT_ADV_CAP_1000HDX:
303 		*val = igb->param_adv_1000hdx_cap;
304 		break;
305 
306 	case ETHER_STAT_ADV_CAP_100FDX:
307 		*val = igb->param_adv_100fdx_cap;
308 		break;
309 
310 	case ETHER_STAT_ADV_CAP_100HDX:
311 		*val = igb->param_adv_100hdx_cap;
312 		break;
313 
314 	case ETHER_STAT_ADV_CAP_10FDX:
315 		*val = igb->param_adv_10fdx_cap;
316 		break;
317 
318 	case ETHER_STAT_ADV_CAP_10HDX:
319 		*val = igb->param_adv_10hdx_cap;
320 		break;
321 
322 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
323 		*val = igb->param_adv_asym_pause_cap;
324 		break;
325 
326 	case ETHER_STAT_ADV_CAP_PAUSE:
327 		*val = igb->param_adv_pause_cap;
328 		break;
329 
330 	case ETHER_STAT_ADV_CAP_AUTONEG:
331 		*val = hw->mac.autoneg;
332 		break;
333 
334 	case ETHER_STAT_LP_CAP_1000FDX:
335 		*val = igb->param_lp_1000fdx_cap;
336 		break;
337 
338 	case ETHER_STAT_LP_CAP_1000HDX:
339 		*val = igb->param_lp_1000hdx_cap;
340 		break;
341 
342 	case ETHER_STAT_LP_CAP_100FDX:
343 		*val = igb->param_lp_100fdx_cap;
344 		break;
345 
346 	case ETHER_STAT_LP_CAP_100HDX:
347 		*val = igb->param_lp_100hdx_cap;
348 		break;
349 
350 	case ETHER_STAT_LP_CAP_10FDX:
351 		*val = igb->param_lp_10fdx_cap;
352 		break;
353 
354 	case ETHER_STAT_LP_CAP_10HDX:
355 		*val = igb->param_lp_10hdx_cap;
356 		break;
357 
358 	case ETHER_STAT_LP_CAP_ASMPAUSE:
359 		*val = igb->param_lp_asym_pause_cap;
360 		break;
361 
362 	case ETHER_STAT_LP_CAP_PAUSE:
363 		*val = igb->param_lp_pause_cap;
364 		break;
365 
366 	case ETHER_STAT_LP_CAP_AUTONEG:
367 		*val = igb->param_lp_autoneg_cap;
368 		break;
369 
370 	case ETHER_STAT_LINK_ASMPAUSE:
371 		*val = igb->param_asym_pause_cap;
372 		break;
373 
374 	case ETHER_STAT_LINK_PAUSE:
375 		*val = igb->param_pause_cap;
376 		break;
377 
378 	case ETHER_STAT_LINK_AUTONEG:
379 		*val = hw->mac.autoneg;
380 		break;
381 
382 	case ETHER_STAT_LINK_DUPLEX:
383 		*val = (igb->link_duplex == FULL_DUPLEX) ?
384 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
385 		break;
386 
387 	case ETHER_STAT_TOOSHORT_ERRORS:
388 		igb_ks->ruc.value.ui64 +=
389 		    E1000_READ_REG(hw, E1000_RUC);
390 		*val = igb_ks->ruc.value.ui64;
391 		break;
392 
393 	case ETHER_STAT_CAP_REMFAULT:
394 		*val = igb->param_rem_fault;
395 		break;
396 
397 	case ETHER_STAT_ADV_REMFAULT:
398 		*val = igb->param_adv_rem_fault;
399 		break;
400 
401 	case ETHER_STAT_LP_REMFAULT:
402 		*val = igb->param_lp_rem_fault;
403 		break;
404 
405 	case ETHER_STAT_JABBER_ERRORS:
406 		igb_ks->rjc.value.ui64 +=
407 		    E1000_READ_REG(hw, E1000_RJC);
408 		*val = igb_ks->rjc.value.ui64;
409 		break;
410 
411 	case ETHER_STAT_CAP_100T4:
412 		*val = igb->param_100t4_cap;
413 		break;
414 
415 	case ETHER_STAT_ADV_CAP_100T4:
416 		*val = igb->param_adv_100t4_cap;
417 		break;
418 
419 	case ETHER_STAT_LP_CAP_100T4:
420 		*val = igb->param_lp_100t4_cap;
421 		break;
422 
423 	default:
424 		mutex_exit(&igb->gen_lock);
425 		return (ENOTSUP);
426 	}
427 
428 	mutex_exit(&igb->gen_lock);
429 
430 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
431 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
432 		return (EIO);
433 	}
434 
435 	return (0);
436 }
437 
438 /*
439  * Bring the device out of the reset/quiesced state that it
440  * was in when the interface was registered.
441  */
442 int
443 igb_m_start(void *arg)
444 {
445 	igb_t *igb = (igb_t *)arg;
446 
447 	mutex_enter(&igb->gen_lock);
448 
449 	if (igb->igb_state & IGB_SUSPENDED) {
450 		mutex_exit(&igb->gen_lock);
451 		return (ECANCELED);
452 	}
453 
454 	if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
455 		mutex_exit(&igb->gen_lock);
456 		return (EIO);
457 	}
458 
459 	atomic_or_32(&igb->igb_state, IGB_STARTED);
460 
461 	mutex_exit(&igb->gen_lock);
462 
463 	/*
464 	 * Enable and start the watchdog timer
465 	 */
466 	igb_enable_watchdog_timer(igb);
467 
468 	return (0);
469 }
470 
471 /*
472  * Stop the device and put it in a reset/quiesced state such
473  * that the interface can be unregistered.
474  */
475 void
476 igb_m_stop(void *arg)
477 {
478 	igb_t *igb = (igb_t *)arg;
479 
480 	mutex_enter(&igb->gen_lock);
481 
482 	if (igb->igb_state & IGB_SUSPENDED) {
483 		mutex_exit(&igb->gen_lock);
484 		return;
485 	}
486 
487 	atomic_and_32(&igb->igb_state, ~IGB_STARTED);
488 
489 	igb_stop(igb, B_TRUE);
490 
491 	mutex_exit(&igb->gen_lock);
492 
493 	/*
494 	 * Disable and stop the watchdog timer
495 	 */
496 	igb_disable_watchdog_timer(igb);
497 }
498 
499 /*
500  * Set the promiscuity of the device.
501  */
502 int
503 igb_m_promisc(void *arg, boolean_t on)
504 {
505 	igb_t *igb = (igb_t *)arg;
506 	uint32_t reg_val;
507 
508 	mutex_enter(&igb->gen_lock);
509 
510 	if (igb->igb_state & IGB_SUSPENDED) {
511 		mutex_exit(&igb->gen_lock);
512 		return (ECANCELED);
513 	}
514 
515 	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
516 
517 	if (on)
518 		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
519 	else
520 		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
521 
522 	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
523 
524 	mutex_exit(&igb->gen_lock);
525 
526 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
527 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
528 		return (EIO);
529 	}
530 
531 	return (0);
532 }
533 
534 /*
535  * Add/remove the addresses to/from the set of multicast
536  * addresses for which the device will receive packets.
537  */
538 int
539 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
540 {
541 	igb_t *igb = (igb_t *)arg;
542 	int result;
543 
544 	mutex_enter(&igb->gen_lock);
545 
546 	if (igb->igb_state & IGB_SUSPENDED) {
547 		mutex_exit(&igb->gen_lock);
548 		return (ECANCELED);
549 	}
550 
551 	result = (add) ? igb_multicst_add(igb, mcst_addr)
552 	    : igb_multicst_remove(igb, mcst_addr);
553 
554 	mutex_exit(&igb->gen_lock);
555 
556 	return (result);
557 }
558 
559 /*
560  * Pass on M_IOCTL messages passed to the DLD, and support
561  * private IOCTLs for debugging and ndd.
562  */
563 void
564 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
565 {
566 	igb_t *igb = (igb_t *)arg;
567 	struct iocblk *iocp;
568 	enum ioc_reply status;
569 
570 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
571 	iocp->ioc_error = 0;
572 
573 	mutex_enter(&igb->gen_lock);
574 	if (igb->igb_state & IGB_SUSPENDED) {
575 		mutex_exit(&igb->gen_lock);
576 		miocnak(q, mp, 0, EINVAL);
577 		return;
578 	}
579 	mutex_exit(&igb->gen_lock);
580 
581 	switch (iocp->ioc_cmd) {
582 	case LB_GET_INFO_SIZE:
583 	case LB_GET_INFO:
584 	case LB_GET_MODE:
585 	case LB_SET_MODE:
586 		status = igb_loopback_ioctl(igb, iocp, mp);
587 		break;
588 
589 	default:
590 		status = IOC_INVAL;
591 		break;
592 	}
593 
594 	/*
595 	 * Decide how to reply
596 	 */
597 	switch (status) {
598 	default:
599 	case IOC_INVAL:
600 		/*
601 		 * Error, reply with a NAK and EINVAL or the specified error
602 		 */
603 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
604 		    EINVAL : iocp->ioc_error);
605 		break;
606 
607 	case IOC_DONE:
608 		/*
609 		 * OK, reply already sent
610 		 */
611 		break;
612 
613 	case IOC_ACK:
614 		/*
615 		 * OK, reply with an ACK
616 		 */
617 		miocack(q, mp, 0, 0);
618 		break;
619 
620 	case IOC_REPLY:
621 		/*
622 		 * OK, send prepared reply as ACK or NAK
623 		 */
624 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
625 		    M_IOCACK : M_IOCNAK;
626 		qreply(q, mp);
627 		break;
628 	}
629 }
630 
631 /*
632  * Add a MAC address to the target RX group.
633  */
634 static int
635 igb_addmac(void *arg, const uint8_t *mac_addr)
636 {
637 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
638 	igb_t *igb = rx_group->igb;
639 	struct e1000_hw *hw = &igb->hw;
640 	int i, slot;
641 
642 	mutex_enter(&igb->gen_lock);
643 
644 	if (igb->igb_state & IGB_SUSPENDED) {
645 		mutex_exit(&igb->gen_lock);
646 		return (ECANCELED);
647 	}
648 
649 	if (igb->unicst_avail == 0) {
650 		/* no slots available */
651 		mutex_exit(&igb->gen_lock);
652 		return (ENOSPC);
653 	}
654 
655 	/*
656 	 * The slots from 0 to igb->num_rx_groups are reserved slots which
657 	 * are 1 to 1 mapped with group index directly. The other slots are
658 	 * shared between the all of groups. While adding a MAC address,
659 	 * it will try to set the reserved slots first, then the shared slots.
660 	 */
661 	slot = -1;
662 	if (igb->unicst_addr[rx_group->index].mac.set == 1) {
663 		/*
664 		 * The reserved slot for current group is used, find the free
665 		 * slots in the shared slots.
666 		 */
667 		for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
668 			if (igb->unicst_addr[i].mac.set == 0) {
669 				slot = i;
670 				break;
671 			}
672 		}
673 	} else
674 		slot = rx_group->index;
675 
676 	if (slot == -1) {
677 		/* no slots available in the shared slots */
678 		mutex_exit(&igb->gen_lock);
679 		return (ENOSPC);
680 	}
681 
682 	/* Set VMDq according to the mode supported by hardware. */
683 	e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
684 
685 	bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
686 	igb->unicst_addr[slot].mac.group_index = rx_group->index;
687 	igb->unicst_addr[slot].mac.set = 1;
688 	igb->unicst_avail--;
689 
690 	mutex_exit(&igb->gen_lock);
691 
692 	return (0);
693 }
694 
695 /*
696  * Remove a MAC address from the specified RX group.
697  */
698 static int
699 igb_remmac(void *arg, const uint8_t *mac_addr)
700 {
701 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
702 	igb_t *igb = rx_group->igb;
703 	struct e1000_hw *hw = &igb->hw;
704 	int slot;
705 
706 	mutex_enter(&igb->gen_lock);
707 
708 	if (igb->igb_state & IGB_SUSPENDED) {
709 		mutex_exit(&igb->gen_lock);
710 		return (ECANCELED);
711 	}
712 
713 	slot = igb_unicst_find(igb, mac_addr);
714 	if (slot == -1) {
715 		mutex_exit(&igb->gen_lock);
716 		return (EINVAL);
717 	}
718 
719 	if (igb->unicst_addr[slot].mac.set == 0) {
720 		mutex_exit(&igb->gen_lock);
721 		return (EINVAL);
722 	}
723 
724 	/* Clear the MAC ddress in the slot */
725 	e1000_rar_clear(hw, slot);
726 	igb->unicst_addr[slot].mac.set = 0;
727 	igb->unicst_avail++;
728 
729 	mutex_exit(&igb->gen_lock);
730 
731 	return (0);
732 }
733 
734 /*
735  * Enable interrupt on the specificed rx ring.
736  */
737 int
738 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
739 {
740 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
741 	igb_t *igb = rx_ring->igb;
742 	struct e1000_hw *hw = &igb->hw;
743 	uint32_t index = rx_ring->index;
744 
745 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
746 		/* Interrupt enabling for MSI-X */
747 		igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
748 		E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
749 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
750 	} else {
751 		ASSERT(index == 0);
752 		/* Interrupt enabling for MSI and legacy */
753 		igb->ims_mask |= E1000_IMS_RXT0;
754 		E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
755 	}
756 
757 	E1000_WRITE_FLUSH(hw);
758 
759 	return (0);
760 }
761 
762 /*
763  * Disable interrupt on the specificed rx ring.
764  */
765 int
766 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
767 {
768 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
769 	igb_t *igb = rx_ring->igb;
770 	struct e1000_hw *hw = &igb->hw;
771 	uint32_t index = rx_ring->index;
772 
773 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
774 		/* Interrupt disabling for MSI-X */
775 		igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
776 		E1000_WRITE_REG(hw, E1000_EIMC,
777 		    (E1000_EICR_RX_QUEUE0 << index));
778 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
779 	} else {
780 		ASSERT(index == 0);
781 		/* Interrupt disabling for MSI and legacy */
782 		igb->ims_mask &= ~E1000_IMS_RXT0;
783 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
784 	}
785 
786 	E1000_WRITE_FLUSH(hw);
787 
788 	return (0);
789 }
790 
791 /*
792  * Get the global ring index by a ring index within a group.
793  */
794 int
795 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
796 {
797 	igb_rx_ring_t *rx_ring;
798 	int i;
799 
800 	for (i = 0; i < igb->num_rx_rings; i++) {
801 		rx_ring = &igb->rx_rings[i];
802 		if (rx_ring->group_index == gindex)
803 			rindex--;
804 		if (rindex < 0)
805 			return (i);
806 	}
807 
808 	return (-1);
809 }
810 
811 static int
812 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
813 {
814 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
815 
816 	mutex_enter(&rx_ring->rx_lock);
817 	rx_ring->ring_gen_num = mr_gen_num;
818 	mutex_exit(&rx_ring->rx_lock);
819 	return (0);
820 }
821 
822 /*
823  * Callback funtion for MAC layer to register all rings.
824  */
825 /* ARGSUSED */
826 void
827 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
828     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
829 {
830 	igb_t *igb = (igb_t *)arg;
831 	mac_intr_t *mintr = &infop->mri_intr;
832 
833 	switch (rtype) {
834 	case MAC_RING_TYPE_RX: {
835 		igb_rx_ring_t *rx_ring;
836 		int global_index;
837 
838 		/*
839 		 * 'index' is the ring index within the group.
840 		 * We need the global ring index by searching in group.
841 		 */
842 		global_index = igb_get_rx_ring_index(igb, rg_index, index);
843 
844 		ASSERT(global_index >= 0);
845 
846 		rx_ring = &igb->rx_rings[global_index];
847 		rx_ring->ring_handle = rh;
848 
849 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
850 		infop->mri_start = igb_ring_start;
851 		infop->mri_stop = NULL;
852 		infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
853 
854 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
855 		mintr->mi_enable = igb_rx_ring_intr_enable;
856 		mintr->mi_disable = igb_rx_ring_intr_disable;
857 
858 		break;
859 	}
860 	case MAC_RING_TYPE_TX: {
861 		ASSERT(index < igb->num_tx_rings);
862 
863 		igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
864 		tx_ring->ring_handle = rh;
865 
866 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
867 		infop->mri_start = NULL;
868 		infop->mri_stop = NULL;
869 		infop->mri_tx = igb_tx_ring_send;
870 
871 		break;
872 	}
873 	default:
874 		break;
875 	}
876 }
877 
878 void
879 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
880     mac_group_info_t *infop, mac_group_handle_t gh)
881 {
882 	igb_t *igb = (igb_t *)arg;
883 
884 	switch (rtype) {
885 	case MAC_RING_TYPE_RX: {
886 		igb_rx_group_t *rx_group;
887 
888 		ASSERT((index >= 0) && (index < igb->num_rx_groups));
889 
890 		rx_group = &igb->rx_groups[index];
891 		rx_group->group_handle = gh;
892 
893 		infop->mgi_driver = (mac_group_driver_t)rx_group;
894 		infop->mgi_start = NULL;
895 		infop->mgi_stop = NULL;
896 		infop->mgi_addmac = igb_addmac;
897 		infop->mgi_remmac = igb_remmac;
898 		infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
899 
900 		break;
901 	}
902 	case MAC_RING_TYPE_TX:
903 		break;
904 	default:
905 		break;
906 	}
907 }
908 
909 /*
910  * Obtain the MAC's capabilities and associated data from
911  * the driver.
912  */
913 boolean_t
914 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
915 {
916 	igb_t *igb = (igb_t *)arg;
917 
918 	switch (cap) {
919 	case MAC_CAPAB_HCKSUM: {
920 		uint32_t *tx_hcksum_flags = cap_data;
921 
922 		/*
923 		 * We advertise our capabilities only if tx hcksum offload is
924 		 * enabled.  On receive, the stack will accept checksummed
925 		 * packets anyway, even if we haven't said we can deliver
926 		 * them.
927 		 */
928 		if (!igb->tx_hcksum_enable)
929 			return (B_FALSE);
930 
931 		*tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
932 		break;
933 	}
934 	case MAC_CAPAB_LSO: {
935 		mac_capab_lso_t *cap_lso = cap_data;
936 
937 		if (igb->lso_enable) {
938 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
939 			cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
940 			break;
941 		} else {
942 			return (B_FALSE);
943 		}
944 	}
945 	case MAC_CAPAB_RINGS: {
946 		mac_capab_rings_t *cap_rings = cap_data;
947 
948 		switch (cap_rings->mr_type) {
949 		case MAC_RING_TYPE_RX:
950 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
951 			cap_rings->mr_rnum = igb->num_rx_rings;
952 			cap_rings->mr_gnum = igb->num_rx_groups;
953 			cap_rings->mr_rget = igb_fill_ring;
954 			cap_rings->mr_gget = igb_fill_group;
955 			cap_rings->mr_gaddring = NULL;
956 			cap_rings->mr_gremring = NULL;
957 
958 			break;
959 		case MAC_RING_TYPE_TX:
960 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
961 			cap_rings->mr_rnum = igb->num_tx_rings;
962 			cap_rings->mr_gnum = 0;
963 			cap_rings->mr_rget = igb_fill_ring;
964 			cap_rings->mr_gget = NULL;
965 
966 			break;
967 		default:
968 			break;
969 		}
970 		break;
971 	}
972 
973 	default:
974 		return (B_FALSE);
975 	}
976 	return (B_TRUE);
977 }
978 
979 int
980 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
981     uint_t pr_valsize, const void *pr_val)
982 {
983 	igb_t *igb = (igb_t *)arg;
984 	struct e1000_hw *hw = &igb->hw;
985 	int err = 0;
986 	uint32_t flow_control;
987 	uint32_t cur_mtu, new_mtu;
988 	uint32_t rx_size;
989 	uint32_t tx_size;
990 
991 	mutex_enter(&igb->gen_lock);
992 	if (igb->igb_state & IGB_SUSPENDED) {
993 		mutex_exit(&igb->gen_lock);
994 		return (ECANCELED);
995 	}
996 
997 	if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
998 		/*
999 		 * All en_* parameters are locked (read-only)
1000 		 * while the device is in any sort of loopback mode.
1001 		 */
1002 		mutex_exit(&igb->gen_lock);
1003 		return (EBUSY);
1004 	}
1005 
1006 	switch (pr_num) {
1007 	case MAC_PROP_EN_1000FDX_CAP:
1008 		/* read/write on copper, read-only on serdes */
1009 		if (hw->phy.media_type != e1000_media_type_copper) {
1010 			err = ENOTSUP;
1011 			break;
1012 		}
1013 		igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
1014 		igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
1015 		goto setup_link;
1016 	case MAC_PROP_EN_100FDX_CAP:
1017 		if (hw->phy.media_type != e1000_media_type_copper) {
1018 			err = ENOTSUP;
1019 			break;
1020 		}
1021 		igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1022 		igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1023 		goto setup_link;
1024 	case MAC_PROP_EN_100HDX_CAP:
1025 		if (hw->phy.media_type != e1000_media_type_copper) {
1026 			err = ENOTSUP;
1027 			break;
1028 		}
1029 		igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1030 		igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1031 		goto setup_link;
1032 	case MAC_PROP_EN_10FDX_CAP:
1033 		if (hw->phy.media_type != e1000_media_type_copper) {
1034 			err = ENOTSUP;
1035 			break;
1036 		}
1037 		igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1038 		igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1039 		goto setup_link;
1040 	case MAC_PROP_EN_10HDX_CAP:
1041 		if (hw->phy.media_type != e1000_media_type_copper) {
1042 			err = ENOTSUP;
1043 			break;
1044 		}
1045 		igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1046 		igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1047 		goto setup_link;
1048 	case MAC_PROP_AUTONEG:
1049 		if (hw->phy.media_type != e1000_media_type_copper) {
1050 			err = ENOTSUP;
1051 			break;
1052 		}
1053 		igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1054 		goto setup_link;
1055 	case MAC_PROP_FLOWCTRL:
1056 		bcopy(pr_val, &flow_control, sizeof (flow_control));
1057 
1058 		switch (flow_control) {
1059 		default:
1060 			err = EINVAL;
1061 			break;
1062 		case LINK_FLOWCTRL_NONE:
1063 			hw->fc.requested_mode = e1000_fc_none;
1064 			break;
1065 		case LINK_FLOWCTRL_RX:
1066 			hw->fc.requested_mode = e1000_fc_rx_pause;
1067 			break;
1068 		case LINK_FLOWCTRL_TX:
1069 			hw->fc.requested_mode = e1000_fc_tx_pause;
1070 			break;
1071 		case LINK_FLOWCTRL_BI:
1072 			hw->fc.requested_mode = e1000_fc_full;
1073 			break;
1074 		}
1075 setup_link:
1076 		if (err == 0) {
1077 			if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1078 				err = EINVAL;
1079 		}
1080 		break;
1081 	case MAC_PROP_ADV_1000FDX_CAP:
1082 	case MAC_PROP_ADV_1000HDX_CAP:
1083 	case MAC_PROP_ADV_100T4_CAP:
1084 	case MAC_PROP_ADV_100FDX_CAP:
1085 	case MAC_PROP_ADV_100HDX_CAP:
1086 	case MAC_PROP_ADV_10FDX_CAP:
1087 	case MAC_PROP_ADV_10HDX_CAP:
1088 	case MAC_PROP_EN_1000HDX_CAP:
1089 	case MAC_PROP_EN_100T4_CAP:
1090 	case MAC_PROP_STATUS:
1091 	case MAC_PROP_SPEED:
1092 	case MAC_PROP_DUPLEX:
1093 		err = ENOTSUP; /* read-only prop. Can't set this. */
1094 		break;
1095 	case MAC_PROP_MTU:
1096 		/* adapter must be stopped for an MTU change */
1097 		if (igb->igb_state & IGB_STARTED) {
1098 			err = EBUSY;
1099 			break;
1100 		}
1101 
1102 		cur_mtu = igb->default_mtu;
1103 		bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1104 		if (new_mtu == cur_mtu) {
1105 			err = 0;
1106 			break;
1107 		}
1108 
1109 		if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1110 			err = EINVAL;
1111 			break;
1112 		}
1113 
1114 		err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1115 		if (err == 0) {
1116 			igb->default_mtu = new_mtu;
1117 			igb->max_frame_size = igb->default_mtu +
1118 			    sizeof (struct ether_vlan_header) + ETHERFCSL;
1119 
1120 			/*
1121 			 * Set rx buffer size
1122 			 */
1123 			rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1124 			igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1125 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1126 
1127 			/*
1128 			 * Set tx buffer size
1129 			 */
1130 			tx_size = igb->max_frame_size;
1131 			igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1132 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1133 		}
1134 		break;
1135 	case MAC_PROP_PRIVATE:
1136 		err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1137 		break;
1138 	default:
1139 		err = EINVAL;
1140 		break;
1141 	}
1142 
1143 	mutex_exit(&igb->gen_lock);
1144 
1145 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1146 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1147 		return (EIO);
1148 	}
1149 
1150 	return (err);
1151 }
1152 
1153 int
1154 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1155     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
1156 {
1157 	igb_t *igb = (igb_t *)arg;
1158 	struct e1000_hw *hw = &igb->hw;
1159 	int err = 0;
1160 	uint32_t flow_control;
1161 	uint64_t tmp = 0;
1162 	mac_propval_range_t range;
1163 
1164 	if (pr_valsize == 0)
1165 		return (EINVAL);
1166 
1167 	*perm = MAC_PROP_PERM_RW;
1168 
1169 	bzero(pr_val, pr_valsize);
1170 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE))
1171 		return (igb_get_def_val(igb, pr_num, pr_valsize, pr_val));
1172 
1173 	switch (pr_num) {
1174 	case MAC_PROP_DUPLEX:
1175 		*perm = MAC_PROP_PERM_READ;
1176 		if (pr_valsize >= sizeof (link_duplex_t)) {
1177 			bcopy(&igb->link_duplex, pr_val,
1178 			    sizeof (link_duplex_t));
1179 		} else
1180 			err = EINVAL;
1181 		break;
1182 	case MAC_PROP_SPEED:
1183 		*perm = MAC_PROP_PERM_READ;
1184 		if (pr_valsize >= sizeof (uint64_t)) {
1185 			tmp = igb->link_speed * 1000000ull;
1186 			bcopy(&tmp, pr_val, sizeof (tmp));
1187 		} else
1188 			err = EINVAL;
1189 		break;
1190 	case MAC_PROP_AUTONEG:
1191 		if (hw->phy.media_type != e1000_media_type_copper)
1192 			*perm = MAC_PROP_PERM_READ;
1193 		*(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1194 		break;
1195 	case MAC_PROP_FLOWCTRL:
1196 		if (pr_valsize >= sizeof (uint32_t)) {
1197 			switch (hw->fc.requested_mode) {
1198 				case e1000_fc_none:
1199 					flow_control = LINK_FLOWCTRL_NONE;
1200 					break;
1201 				case e1000_fc_rx_pause:
1202 					flow_control = LINK_FLOWCTRL_RX;
1203 					break;
1204 				case e1000_fc_tx_pause:
1205 					flow_control = LINK_FLOWCTRL_TX;
1206 					break;
1207 				case e1000_fc_full:
1208 					flow_control = LINK_FLOWCTRL_BI;
1209 					break;
1210 			}
1211 			bcopy(&flow_control, pr_val, sizeof (flow_control));
1212 		} else
1213 			err = EINVAL;
1214 		break;
1215 	case MAC_PROP_ADV_1000FDX_CAP:
1216 		*perm = MAC_PROP_PERM_READ;
1217 		*(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1218 		break;
1219 	case MAC_PROP_EN_1000FDX_CAP:
1220 		if (hw->phy.media_type != e1000_media_type_copper)
1221 			*perm = MAC_PROP_PERM_READ;
1222 		*(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1223 		break;
1224 	case MAC_PROP_ADV_1000HDX_CAP:
1225 		*perm = MAC_PROP_PERM_READ;
1226 		*(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1227 		break;
1228 	case MAC_PROP_EN_1000HDX_CAP:
1229 		*perm = MAC_PROP_PERM_READ;
1230 		*(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1231 		break;
1232 	case MAC_PROP_ADV_100T4_CAP:
1233 		*perm = MAC_PROP_PERM_READ;
1234 		*(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1235 		break;
1236 	case MAC_PROP_EN_100T4_CAP:
1237 		*perm = MAC_PROP_PERM_READ;
1238 		*(uint8_t *)pr_val = igb->param_en_100t4_cap;
1239 		break;
1240 	case MAC_PROP_ADV_100FDX_CAP:
1241 		*perm = MAC_PROP_PERM_READ;
1242 		*(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1243 		break;
1244 	case MAC_PROP_EN_100FDX_CAP:
1245 		if (hw->phy.media_type != e1000_media_type_copper)
1246 			*perm = MAC_PROP_PERM_READ;
1247 		*(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1248 		break;
1249 	case MAC_PROP_ADV_100HDX_CAP:
1250 		*perm = MAC_PROP_PERM_READ;
1251 		*(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1252 		break;
1253 	case MAC_PROP_EN_100HDX_CAP:
1254 		if (hw->phy.media_type != e1000_media_type_copper)
1255 			*perm = MAC_PROP_PERM_READ;
1256 		*(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1257 		break;
1258 	case MAC_PROP_ADV_10FDX_CAP:
1259 		*perm = MAC_PROP_PERM_READ;
1260 		*(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1261 		break;
1262 	case MAC_PROP_EN_10FDX_CAP:
1263 		if (hw->phy.media_type != e1000_media_type_copper)
1264 			*perm = MAC_PROP_PERM_READ;
1265 		*(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1266 		break;
1267 	case MAC_PROP_ADV_10HDX_CAP:
1268 		*perm = MAC_PROP_PERM_READ;
1269 		*(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1270 		break;
1271 	case MAC_PROP_EN_10HDX_CAP:
1272 		if (hw->phy.media_type != e1000_media_type_copper)
1273 			*perm = MAC_PROP_PERM_READ;
1274 		*(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1275 		break;
1276 	case MAC_PROP_PRIVATE:
1277 		err = igb_get_priv_prop(igb, pr_name,
1278 		    pr_flags, pr_valsize, pr_val, perm);
1279 		break;
1280 	case MAC_PROP_MTU:
1281 		if (!(pr_flags & MAC_PROP_POSSIBLE))
1282 			return (ENOTSUP);
1283 		if (pr_valsize < sizeof (mac_propval_range_t))
1284 			return (EINVAL);
1285 		range.mpr_count = 1;
1286 		range.mpr_type = MAC_PROPVAL_UINT32;
1287 		range.range_uint32[0].mpur_min = MIN_MTU;
1288 		range.range_uint32[0].mpur_max = MAX_MTU;
1289 		bcopy(&range, pr_val, sizeof (range));
1290 		break;
1291 	default:
1292 		err = EINVAL;
1293 		break;
1294 	}
1295 	return (err);
1296 }
1297 
1298 int
1299 igb_get_def_val(igb_t *igb, mac_prop_id_t pr_num,
1300     uint_t pr_valsize, void *pr_val)
1301 {
1302 	uint32_t flow_control;
1303 	struct e1000_hw *hw = &igb->hw;
1304 	uint16_t phy_status;
1305 	uint16_t phy_ext_status;
1306 	int err = 0;
1307 
1308 	ASSERT(pr_valsize > 0);
1309 	switch (pr_num) {
1310 	case MAC_PROP_AUTONEG:
1311 		if (hw->phy.media_type != e1000_media_type_copper) {
1312 			*(uint8_t *)pr_val = 0;
1313 		} else {
1314 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1315 			*(uint8_t *)pr_val =
1316 			    (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
1317 		}
1318 		break;
1319 	case MAC_PROP_FLOWCTRL:
1320 		if (pr_valsize < sizeof (uint32_t))
1321 			return (EINVAL);
1322 		flow_control = LINK_FLOWCTRL_BI;
1323 		bcopy(&flow_control, pr_val, sizeof (flow_control));
1324 		break;
1325 	case MAC_PROP_ADV_1000FDX_CAP:
1326 	case MAC_PROP_EN_1000FDX_CAP:
1327 		if (hw->phy.media_type != e1000_media_type_copper) {
1328 			*(uint8_t *)pr_val = 1;
1329 		} else {
1330 			(void) e1000_read_phy_reg(hw,
1331 			    PHY_EXT_STATUS, &phy_ext_status);
1332 			*(uint8_t *)pr_val =
1333 			    ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1334 			    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
1335 		}
1336 		break;
1337 	case MAC_PROP_ADV_1000HDX_CAP:
1338 	case MAC_PROP_EN_1000HDX_CAP:
1339 	case MAC_PROP_ADV_100T4_CAP:
1340 	case MAC_PROP_EN_100T4_CAP:
1341 		*(uint8_t *)pr_val = 0;
1342 		break;
1343 	case MAC_PROP_ADV_100FDX_CAP:
1344 	case MAC_PROP_EN_100FDX_CAP:
1345 		if (hw->phy.media_type != e1000_media_type_copper) {
1346 			*(uint8_t *)pr_val = 0;
1347 		} else {
1348 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1349 			*(uint8_t *)pr_val =
1350 			    ((phy_status & MII_SR_100X_FD_CAPS) ||
1351 			    (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
1352 		}
1353 		break;
1354 	case MAC_PROP_ADV_100HDX_CAP:
1355 	case MAC_PROP_EN_100HDX_CAP:
1356 		if (hw->phy.media_type != e1000_media_type_copper) {
1357 			*(uint8_t *)pr_val = 0;
1358 		} else {
1359 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1360 			*(uint8_t *)pr_val =
1361 			    ((phy_status & MII_SR_100X_HD_CAPS) ||
1362 			    (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
1363 		}
1364 		break;
1365 	case MAC_PROP_ADV_10FDX_CAP:
1366 	case MAC_PROP_EN_10FDX_CAP:
1367 		if (hw->phy.media_type != e1000_media_type_copper) {
1368 			*(uint8_t *)pr_val = 0;
1369 		} else {
1370 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1371 			*(uint8_t *)pr_val =
1372 			    (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
1373 		}
1374 		break;
1375 	case MAC_PROP_ADV_10HDX_CAP:
1376 	case MAC_PROP_EN_10HDX_CAP:
1377 		if (hw->phy.media_type != e1000_media_type_copper) {
1378 			*(uint8_t *)pr_val = 0;
1379 		} else {
1380 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1381 			*(uint8_t *)pr_val =
1382 			    (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
1383 		}
1384 		break;
1385 	default:
1386 		err = ENOTSUP;
1387 		break;
1388 	}
1389 	return (err);
1390 }
1391 
1392 boolean_t
1393 igb_param_locked(mac_prop_id_t pr_num)
1394 {
1395 	/*
1396 	 * All en_* parameters are locked (read-only) while
1397 	 * the device is in any sort of loopback mode ...
1398 	 */
1399 	switch (pr_num) {
1400 		case MAC_PROP_EN_1000FDX_CAP:
1401 		case MAC_PROP_EN_1000HDX_CAP:
1402 		case MAC_PROP_EN_100T4_CAP:
1403 		case MAC_PROP_EN_100FDX_CAP:
1404 		case MAC_PROP_EN_100HDX_CAP:
1405 		case MAC_PROP_EN_10FDX_CAP:
1406 		case MAC_PROP_EN_10HDX_CAP:
1407 		case MAC_PROP_AUTONEG:
1408 		case MAC_PROP_FLOWCTRL:
1409 			return (B_TRUE);
1410 	}
1411 	return (B_FALSE);
1412 }
1413 
1414 /* ARGSUSED */
1415 int
1416 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1417     uint_t pr_valsize, const void *pr_val)
1418 {
1419 	int err = 0;
1420 	long result;
1421 	struct e1000_hw *hw = &igb->hw;
1422 	int i;
1423 
1424 	if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1425 		if (pr_val == NULL) {
1426 			err = EINVAL;
1427 			return (err);
1428 		}
1429 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1430 		if (result < MIN_TX_COPY_THRESHOLD ||
1431 		    result > MAX_TX_COPY_THRESHOLD)
1432 			err = EINVAL;
1433 		else {
1434 			igb->tx_copy_thresh = (uint32_t)result;
1435 		}
1436 		return (err);
1437 	}
1438 	if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1439 		if (pr_val == NULL) {
1440 			err = EINVAL;
1441 			return (err);
1442 		}
1443 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1444 		if (result < MIN_TX_RECYCLE_THRESHOLD ||
1445 		    result > MAX_TX_RECYCLE_THRESHOLD)
1446 			err = EINVAL;
1447 		else {
1448 			igb->tx_recycle_thresh = (uint32_t)result;
1449 		}
1450 		return (err);
1451 	}
1452 	if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1453 		if (pr_val == NULL) {
1454 			err = EINVAL;
1455 			return (err);
1456 		}
1457 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1458 		if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1459 		    result > MAX_TX_OVERLOAD_THRESHOLD)
1460 			err = EINVAL;
1461 		else {
1462 			igb->tx_overload_thresh = (uint32_t)result;
1463 		}
1464 		return (err);
1465 	}
1466 	if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1467 		if (pr_val == NULL) {
1468 			err = EINVAL;
1469 			return (err);
1470 		}
1471 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1472 		if (result < MIN_TX_RESCHED_THRESHOLD ||
1473 		    result > MAX_TX_RESCHED_THRESHOLD)
1474 			err = EINVAL;
1475 		else {
1476 			igb->tx_resched_thresh = (uint32_t)result;
1477 		}
1478 		return (err);
1479 	}
1480 	if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1481 		if (pr_val == NULL) {
1482 			err = EINVAL;
1483 			return (err);
1484 		}
1485 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1486 		if (result < MIN_RX_COPY_THRESHOLD ||
1487 		    result > MAX_RX_COPY_THRESHOLD)
1488 			err = EINVAL;
1489 		else {
1490 			igb->rx_copy_thresh = (uint32_t)result;
1491 		}
1492 		return (err);
1493 	}
1494 	if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1495 		if (pr_val == NULL) {
1496 			err = EINVAL;
1497 			return (err);
1498 		}
1499 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1500 		if (result < MIN_RX_LIMIT_PER_INTR ||
1501 		    result > MAX_RX_LIMIT_PER_INTR)
1502 			err = EINVAL;
1503 		else {
1504 			igb->rx_limit_per_intr = (uint32_t)result;
1505 		}
1506 		return (err);
1507 	}
1508 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1509 		if (pr_val == NULL) {
1510 			err = EINVAL;
1511 			return (err);
1512 		}
1513 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1514 
1515 		if (result < igb->capab->min_intr_throttle ||
1516 		    result > igb->capab->max_intr_throttle)
1517 			err = EINVAL;
1518 		else {
1519 			igb->intr_throttling[0] = (uint32_t)result;
1520 
1521 			for (i = 0; i < MAX_NUM_EITR; i++)
1522 				igb->intr_throttling[i] =
1523 				    igb->intr_throttling[0];
1524 
1525 			/* Set interrupt throttling rate */
1526 			for (i = 0; i < igb->intr_cnt; i++)
1527 				E1000_WRITE_REG(hw, E1000_EITR(i),
1528 				    igb->intr_throttling[i]);
1529 		}
1530 		return (err);
1531 	}
1532 	return (ENOTSUP);
1533 }
1534 
1535 int
1536 igb_get_priv_prop(igb_t *igb, const char *pr_name,
1537     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
1538 {
1539 	int err = ENOTSUP;
1540 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
1541 	int value;
1542 
1543 	*perm = MAC_PROP_PERM_RW;
1544 
1545 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1546 		*perm = MAC_PROP_PERM_READ;
1547 		value = (is_default ? 1 : igb->param_adv_pause_cap);
1548 		err = 0;
1549 		goto done;
1550 	}
1551 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1552 		*perm = MAC_PROP_PERM_READ;
1553 		value = (is_default ? 1 : igb->param_adv_asym_pause_cap);
1554 		err = 0;
1555 		goto done;
1556 	}
1557 	if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1558 		value = (is_default ? DEFAULT_TX_COPY_THRESHOLD :
1559 		    igb->tx_copy_thresh);
1560 		err = 0;
1561 		goto done;
1562 	}
1563 	if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1564 		value = (is_default ? DEFAULT_TX_RECYCLE_THRESHOLD :
1565 		    igb->tx_recycle_thresh);
1566 		err = 0;
1567 		goto done;
1568 	}
1569 	if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1570 		value = (is_default ? DEFAULT_TX_OVERLOAD_THRESHOLD :
1571 		    igb->tx_overload_thresh);
1572 		err = 0;
1573 		goto done;
1574 	}
1575 	if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1576 		value = (is_default ? DEFAULT_TX_RESCHED_THRESHOLD :
1577 		    igb->tx_resched_thresh);
1578 		err = 0;
1579 		goto done;
1580 	}
1581 	if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1582 		value = (is_default ? DEFAULT_RX_COPY_THRESHOLD :
1583 		    igb->rx_copy_thresh);
1584 		err = 0;
1585 		goto done;
1586 	}
1587 	if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1588 		value = (is_default ? DEFAULT_RX_LIMIT_PER_INTR :
1589 		    igb->rx_limit_per_intr);
1590 		err = 0;
1591 		goto done;
1592 	}
1593 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1594 		value = (is_default ? igb->capab->def_intr_throttle :
1595 		    igb->intr_throttling[0]);
1596 		err = 0;
1597 		goto done;
1598 	}
1599 done:
1600 	if (err == 0) {
1601 		(void) snprintf(pr_val, pr_valsize, "%d", value);
1602 	}
1603 	return (err);
1604 }
1605