xref: /titanic_51/usr/src/uts/common/io/igb/igb_gld.c (revision 42cc51e07cdbcad3b9aca8d9d991fc09b251feb7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29  * Copyright 2014 Pluribus Networks Inc.
30  * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
31  */
32 
33 #include "igb_sw.h"
34 
35 int
36 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
37 {
38 	igb_t *igb = (igb_t *)arg;
39 	struct e1000_hw *hw = &igb->hw;
40 	igb_stat_t *igb_ks;
41 	uint32_t low_val, high_val;
42 
43 	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
44 
45 	mutex_enter(&igb->gen_lock);
46 
47 	if (igb->igb_state & IGB_SUSPENDED) {
48 		mutex_exit(&igb->gen_lock);
49 		return (ECANCELED);
50 	}
51 
52 	switch (stat) {
53 	case MAC_STAT_IFSPEED:
54 		*val = igb->link_speed * 1000000ull;
55 		break;
56 
57 	case MAC_STAT_MULTIRCV:
58 		igb->stat_mprc += E1000_READ_REG(hw, E1000_MPRC);
59 		*val = igb->stat_mprc;
60 		break;
61 
62 	case MAC_STAT_BRDCSTRCV:
63 		igb->stat_bprc += E1000_READ_REG(hw, E1000_BPRC);
64 		*val = igb->stat_bprc;
65 		break;
66 
67 	case MAC_STAT_MULTIXMT:
68 		igb->stat_mptc += E1000_READ_REG(hw, E1000_MPTC);
69 		*val = igb->stat_mptc;
70 		break;
71 
72 	case MAC_STAT_BRDCSTXMT:
73 		igb->stat_bptc += E1000_READ_REG(hw, E1000_BPTC);
74 		*val = igb->stat_bptc;
75 		break;
76 
77 	case MAC_STAT_NORCVBUF:
78 		igb->stat_rnbc += E1000_READ_REG(hw, E1000_RNBC);
79 		*val = igb->stat_rnbc;
80 		break;
81 
82 	case MAC_STAT_IERRORS:
83 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
84 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
85 		igb_ks->rlec.value.ui64 +=
86 		    E1000_READ_REG(hw, E1000_RLEC);
87 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
88 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
89 		*val = igb->stat_rxerrc +
90 		    igb->stat_algnerrc +
91 		    igb_ks->rlec.value.ui64 +
92 		    igb->stat_crcerrs +
93 		    igb->stat_cexterr;
94 		break;
95 
96 	case MAC_STAT_NOXMTBUF:
97 		*val = 0;
98 		break;
99 
100 	case MAC_STAT_OERRORS:
101 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
102 		*val = igb->stat_ecol;
103 		break;
104 
105 	case MAC_STAT_COLLISIONS:
106 		igb->stat_colc += E1000_READ_REG(hw, E1000_COLC);
107 		*val = igb->stat_colc;
108 		break;
109 
110 	case MAC_STAT_RBYTES:
111 		/*
112 		 * The 64-bit register will reset whenever the upper
113 		 * 32 bits are read. So we need to read the lower
114 		 * 32 bits first, then read the upper 32 bits.
115 		 */
116 		low_val = E1000_READ_REG(hw, E1000_TORL);
117 		high_val = E1000_READ_REG(hw, E1000_TORH);
118 		igb->stat_tor += (uint64_t)high_val << 32 | (uint64_t)low_val;
119 		*val = igb->stat_tor;
120 		break;
121 
122 	case MAC_STAT_IPACKETS:
123 		igb->stat_tpr += E1000_READ_REG(hw, E1000_TPR);
124 		*val = igb->stat_tpr;
125 		break;
126 
127 	case MAC_STAT_OBYTES:
128 		/*
129 		 * The 64-bit register will reset whenever the upper
130 		 * 32 bits are read. So we need to read the lower
131 		 * 32 bits first, then read the upper 32 bits.
132 		 */
133 		low_val = E1000_READ_REG(hw, E1000_TOTL);
134 		high_val = E1000_READ_REG(hw, E1000_TOTH);
135 		igb->stat_tot += (uint64_t)high_val << 32 | (uint64_t)low_val;
136 		*val = igb->stat_tot;
137 		break;
138 
139 	case MAC_STAT_OPACKETS:
140 		igb->stat_tpt += E1000_READ_REG(hw, E1000_TPT);
141 		*val = igb->stat_tpt;
142 		break;
143 
144 	/* RFC 1643 stats */
145 	case ETHER_STAT_ALIGN_ERRORS:
146 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
147 		*val = igb->stat_algnerrc;
148 		break;
149 
150 	case ETHER_STAT_FCS_ERRORS:
151 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
152 		*val = igb->stat_crcerrs;
153 		break;
154 
155 	case ETHER_STAT_FIRST_COLLISIONS:
156 		igb->stat_scc += E1000_READ_REG(hw, E1000_SCC);
157 		*val = igb->stat_scc;
158 		break;
159 
160 	case ETHER_STAT_MULTI_COLLISIONS:
161 		igb->stat_mcc += E1000_READ_REG(hw, E1000_MCC);
162 		*val = igb->stat_mcc;
163 		break;
164 
165 	case ETHER_STAT_SQE_ERRORS:
166 		igb->stat_sec += E1000_READ_REG(hw, E1000_SEC);
167 		*val = igb->stat_sec;
168 		break;
169 
170 	case ETHER_STAT_DEFER_XMTS:
171 		igb->stat_dc += E1000_READ_REG(hw, E1000_DC);
172 		*val = igb->stat_dc;
173 		break;
174 
175 	case ETHER_STAT_TX_LATE_COLLISIONS:
176 		igb->stat_latecol += E1000_READ_REG(hw, E1000_LATECOL);
177 		*val = igb->stat_latecol;
178 		break;
179 
180 	case ETHER_STAT_EX_COLLISIONS:
181 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
182 		*val = igb->stat_ecol;
183 		break;
184 
185 	case ETHER_STAT_MACXMT_ERRORS:
186 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
187 		*val = igb->stat_ecol;
188 		break;
189 
190 	case ETHER_STAT_CARRIER_ERRORS:
191 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
192 		*val = igb->stat_cexterr;
193 		break;
194 
195 	case ETHER_STAT_TOOLONG_ERRORS:
196 		igb->stat_roc += E1000_READ_REG(hw, E1000_ROC);
197 		*val = igb->stat_roc;
198 		break;
199 
200 	case ETHER_STAT_MACRCV_ERRORS:
201 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
202 		*val = igb->stat_rxerrc;
203 		break;
204 
205 	/* MII/GMII stats */
206 	case ETHER_STAT_XCVR_ADDR:
207 		/* The Internal PHY's MDI address for each MAC is 1 */
208 		*val = 1;
209 		break;
210 
211 	case ETHER_STAT_XCVR_ID:
212 		*val = hw->phy.id | hw->phy.revision;
213 		break;
214 
215 	case ETHER_STAT_XCVR_INUSE:
216 		switch (igb->link_speed) {
217 		case SPEED_1000:
218 			*val =
219 			    (hw->phy.media_type == e1000_media_type_copper) ?
220 			    XCVR_1000T : XCVR_1000X;
221 			break;
222 		case SPEED_100:
223 			*val =
224 			    (hw->phy.media_type == e1000_media_type_copper) ?
225 			    (igb->param_100t4_cap == 1) ?
226 			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
227 			break;
228 		case SPEED_10:
229 			*val = XCVR_10;
230 			break;
231 		default:
232 			*val = XCVR_NONE;
233 			break;
234 		}
235 		break;
236 
237 	case ETHER_STAT_CAP_1000FDX:
238 		*val = igb->param_1000fdx_cap;
239 		break;
240 
241 	case ETHER_STAT_CAP_1000HDX:
242 		*val = igb->param_1000hdx_cap;
243 		break;
244 
245 	case ETHER_STAT_CAP_100FDX:
246 		*val = igb->param_100fdx_cap;
247 		break;
248 
249 	case ETHER_STAT_CAP_100HDX:
250 		*val = igb->param_100hdx_cap;
251 		break;
252 
253 	case ETHER_STAT_CAP_10FDX:
254 		*val = igb->param_10fdx_cap;
255 		break;
256 
257 	case ETHER_STAT_CAP_10HDX:
258 		*val = igb->param_10hdx_cap;
259 		break;
260 
261 	case ETHER_STAT_CAP_ASMPAUSE:
262 		*val = igb->param_asym_pause_cap;
263 		break;
264 
265 	case ETHER_STAT_CAP_PAUSE:
266 		*val = igb->param_pause_cap;
267 		break;
268 
269 	case ETHER_STAT_CAP_AUTONEG:
270 		*val = igb->param_autoneg_cap;
271 		break;
272 
273 	case ETHER_STAT_ADV_CAP_1000FDX:
274 		*val = igb->param_adv_1000fdx_cap;
275 		break;
276 
277 	case ETHER_STAT_ADV_CAP_1000HDX:
278 		*val = igb->param_adv_1000hdx_cap;
279 		break;
280 
281 	case ETHER_STAT_ADV_CAP_100FDX:
282 		*val = igb->param_adv_100fdx_cap;
283 		break;
284 
285 	case ETHER_STAT_ADV_CAP_100HDX:
286 		*val = igb->param_adv_100hdx_cap;
287 		break;
288 
289 	case ETHER_STAT_ADV_CAP_10FDX:
290 		*val = igb->param_adv_10fdx_cap;
291 		break;
292 
293 	case ETHER_STAT_ADV_CAP_10HDX:
294 		*val = igb->param_adv_10hdx_cap;
295 		break;
296 
297 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
298 		*val = igb->param_adv_asym_pause_cap;
299 		break;
300 
301 	case ETHER_STAT_ADV_CAP_PAUSE:
302 		*val = igb->param_adv_pause_cap;
303 		break;
304 
305 	case ETHER_STAT_ADV_CAP_AUTONEG:
306 		*val = hw->mac.autoneg;
307 		break;
308 
309 	case ETHER_STAT_LP_CAP_1000FDX:
310 		*val = igb->param_lp_1000fdx_cap;
311 		break;
312 
313 	case ETHER_STAT_LP_CAP_1000HDX:
314 		*val = igb->param_lp_1000hdx_cap;
315 		break;
316 
317 	case ETHER_STAT_LP_CAP_100FDX:
318 		*val = igb->param_lp_100fdx_cap;
319 		break;
320 
321 	case ETHER_STAT_LP_CAP_100HDX:
322 		*val = igb->param_lp_100hdx_cap;
323 		break;
324 
325 	case ETHER_STAT_LP_CAP_10FDX:
326 		*val = igb->param_lp_10fdx_cap;
327 		break;
328 
329 	case ETHER_STAT_LP_CAP_10HDX:
330 		*val = igb->param_lp_10hdx_cap;
331 		break;
332 
333 	case ETHER_STAT_LP_CAP_ASMPAUSE:
334 		*val = igb->param_lp_asym_pause_cap;
335 		break;
336 
337 	case ETHER_STAT_LP_CAP_PAUSE:
338 		*val = igb->param_lp_pause_cap;
339 		break;
340 
341 	case ETHER_STAT_LP_CAP_AUTONEG:
342 		*val = igb->param_lp_autoneg_cap;
343 		break;
344 
345 	case ETHER_STAT_LINK_ASMPAUSE:
346 		*val = igb->param_asym_pause_cap;
347 		break;
348 
349 	case ETHER_STAT_LINK_PAUSE:
350 		*val = igb->param_pause_cap;
351 		break;
352 
353 	case ETHER_STAT_LINK_AUTONEG:
354 		*val = hw->mac.autoneg;
355 		break;
356 
357 	case ETHER_STAT_LINK_DUPLEX:
358 		*val = (igb->link_duplex == FULL_DUPLEX) ?
359 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
360 		break;
361 
362 	case ETHER_STAT_TOOSHORT_ERRORS:
363 		igb->stat_ruc += E1000_READ_REG(hw, E1000_RUC);
364 		*val = igb->stat_ruc;
365 		break;
366 
367 	case ETHER_STAT_CAP_REMFAULT:
368 		*val = igb->param_rem_fault;
369 		break;
370 
371 	case ETHER_STAT_ADV_REMFAULT:
372 		*val = igb->param_adv_rem_fault;
373 		break;
374 
375 	case ETHER_STAT_LP_REMFAULT:
376 		*val = igb->param_lp_rem_fault;
377 		break;
378 
379 	case ETHER_STAT_JABBER_ERRORS:
380 		igb->stat_rjc += E1000_READ_REG(hw, E1000_RJC);
381 		*val = igb->stat_rjc;
382 		break;
383 
384 	case ETHER_STAT_CAP_100T4:
385 		*val = igb->param_100t4_cap;
386 		break;
387 
388 	case ETHER_STAT_ADV_CAP_100T4:
389 		*val = igb->param_adv_100t4_cap;
390 		break;
391 
392 	case ETHER_STAT_LP_CAP_100T4:
393 		*val = igb->param_lp_100t4_cap;
394 		break;
395 
396 	default:
397 		mutex_exit(&igb->gen_lock);
398 		return (ENOTSUP);
399 	}
400 
401 	mutex_exit(&igb->gen_lock);
402 
403 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
404 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
405 		return (EIO);
406 	}
407 
408 	return (0);
409 }
410 
411 /*
412  * Bring the device out of the reset/quiesced state that it
413  * was in when the interface was registered.
414  */
415 int
416 igb_m_start(void *arg)
417 {
418 	igb_t *igb = (igb_t *)arg;
419 
420 	mutex_enter(&igb->gen_lock);
421 
422 	if (igb->igb_state & IGB_SUSPENDED) {
423 		mutex_exit(&igb->gen_lock);
424 		return (ECANCELED);
425 	}
426 
427 	if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
428 		mutex_exit(&igb->gen_lock);
429 		return (EIO);
430 	}
431 
432 	atomic_or_32(&igb->igb_state, IGB_STARTED);
433 
434 	mutex_exit(&igb->gen_lock);
435 
436 	/*
437 	 * Enable and start the watchdog timer
438 	 */
439 	igb_enable_watchdog_timer(igb);
440 
441 	return (0);
442 }
443 
444 /*
445  * Stop the device and put it in a reset/quiesced state such
446  * that the interface can be unregistered.
447  */
448 void
449 igb_m_stop(void *arg)
450 {
451 	igb_t *igb = (igb_t *)arg;
452 
453 	mutex_enter(&igb->gen_lock);
454 
455 	if (igb->igb_state & IGB_SUSPENDED) {
456 		mutex_exit(&igb->gen_lock);
457 		return;
458 	}
459 
460 	atomic_and_32(&igb->igb_state, ~IGB_STARTED);
461 
462 	igb_stop(igb, B_TRUE);
463 
464 	mutex_exit(&igb->gen_lock);
465 
466 	/*
467 	 * Disable and stop the watchdog timer
468 	 */
469 	igb_disable_watchdog_timer(igb);
470 }
471 
472 /*
473  * Set the promiscuity of the device.
474  */
475 int
476 igb_m_promisc(void *arg, boolean_t on)
477 {
478 	igb_t *igb = (igb_t *)arg;
479 	uint32_t reg_val;
480 
481 	mutex_enter(&igb->gen_lock);
482 
483 	if (igb->igb_state & IGB_SUSPENDED) {
484 		mutex_exit(&igb->gen_lock);
485 		return (ECANCELED);
486 	}
487 
488 	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
489 
490 	if (on)
491 		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
492 	else
493 		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
494 
495 	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
496 
497 	mutex_exit(&igb->gen_lock);
498 
499 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
500 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
501 		return (EIO);
502 	}
503 
504 	return (0);
505 }
506 
507 /*
508  * Add/remove the addresses to/from the set of multicast
509  * addresses for which the device will receive packets.
510  */
511 int
512 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
513 {
514 	igb_t *igb = (igb_t *)arg;
515 	int result;
516 
517 	mutex_enter(&igb->gen_lock);
518 
519 	if (igb->igb_state & IGB_SUSPENDED) {
520 		mutex_exit(&igb->gen_lock);
521 		return (ECANCELED);
522 	}
523 
524 	result = (add) ? igb_multicst_add(igb, mcst_addr)
525 	    : igb_multicst_remove(igb, mcst_addr);
526 
527 	mutex_exit(&igb->gen_lock);
528 
529 	return (result);
530 }
531 
532 /*
533  * Pass on M_IOCTL messages passed to the DLD, and support
534  * private IOCTLs for debugging and ndd.
535  */
536 void
537 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
538 {
539 	igb_t *igb = (igb_t *)arg;
540 	struct iocblk *iocp;
541 	enum ioc_reply status;
542 
543 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
544 	iocp->ioc_error = 0;
545 
546 	mutex_enter(&igb->gen_lock);
547 	if (igb->igb_state & IGB_SUSPENDED) {
548 		mutex_exit(&igb->gen_lock);
549 		miocnak(q, mp, 0, EINVAL);
550 		return;
551 	}
552 	mutex_exit(&igb->gen_lock);
553 
554 	switch (iocp->ioc_cmd) {
555 	case LB_GET_INFO_SIZE:
556 	case LB_GET_INFO:
557 	case LB_GET_MODE:
558 	case LB_SET_MODE:
559 		status = igb_loopback_ioctl(igb, iocp, mp);
560 		break;
561 
562 	default:
563 		status = IOC_INVAL;
564 		break;
565 	}
566 
567 	/*
568 	 * Decide how to reply
569 	 */
570 	switch (status) {
571 	default:
572 	case IOC_INVAL:
573 		/*
574 		 * Error, reply with a NAK and EINVAL or the specified error
575 		 */
576 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
577 		    EINVAL : iocp->ioc_error);
578 		break;
579 
580 	case IOC_DONE:
581 		/*
582 		 * OK, reply already sent
583 		 */
584 		break;
585 
586 	case IOC_ACK:
587 		/*
588 		 * OK, reply with an ACK
589 		 */
590 		miocack(q, mp, 0, 0);
591 		break;
592 
593 	case IOC_REPLY:
594 		/*
595 		 * OK, send prepared reply as ACK or NAK
596 		 */
597 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
598 		    M_IOCACK : M_IOCNAK;
599 		qreply(q, mp);
600 		break;
601 	}
602 }
603 
604 /*
605  * Add a MAC address to the target RX group.
606  */
607 static int
608 igb_addmac(void *arg, const uint8_t *mac_addr)
609 {
610 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
611 	igb_t *igb = rx_group->igb;
612 	struct e1000_hw *hw = &igb->hw;
613 	int i, slot;
614 
615 	mutex_enter(&igb->gen_lock);
616 
617 	if (igb->igb_state & IGB_SUSPENDED) {
618 		mutex_exit(&igb->gen_lock);
619 		return (ECANCELED);
620 	}
621 
622 	if (igb->unicst_avail == 0) {
623 		/* no slots available */
624 		mutex_exit(&igb->gen_lock);
625 		return (ENOSPC);
626 	}
627 
628 	/*
629 	 * The slots from 0 to igb->num_rx_groups are reserved slots which
630 	 * are 1 to 1 mapped with group index directly. The other slots are
631 	 * shared between the all of groups. While adding a MAC address,
632 	 * it will try to set the reserved slots first, then the shared slots.
633 	 */
634 	slot = -1;
635 	if (igb->unicst_addr[rx_group->index].mac.set == 1) {
636 		/*
637 		 * The reserved slot for current group is used, find the free
638 		 * slots in the shared slots.
639 		 */
640 		for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
641 			if (igb->unicst_addr[i].mac.set == 0) {
642 				slot = i;
643 				break;
644 			}
645 		}
646 	} else
647 		slot = rx_group->index;
648 
649 	if (slot == -1) {
650 		/* no slots available in the shared slots */
651 		mutex_exit(&igb->gen_lock);
652 		return (ENOSPC);
653 	}
654 
655 	/* Set VMDq according to the mode supported by hardware. */
656 	e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
657 
658 	bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
659 	igb->unicst_addr[slot].mac.group_index = rx_group->index;
660 	igb->unicst_addr[slot].mac.set = 1;
661 	igb->unicst_avail--;
662 
663 	mutex_exit(&igb->gen_lock);
664 
665 	return (0);
666 }
667 
668 /*
669  * Remove a MAC address from the specified RX group.
670  */
671 static int
672 igb_remmac(void *arg, const uint8_t *mac_addr)
673 {
674 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
675 	igb_t *igb = rx_group->igb;
676 	struct e1000_hw *hw = &igb->hw;
677 	int slot;
678 
679 	mutex_enter(&igb->gen_lock);
680 
681 	if (igb->igb_state & IGB_SUSPENDED) {
682 		mutex_exit(&igb->gen_lock);
683 		return (ECANCELED);
684 	}
685 
686 	slot = igb_unicst_find(igb, mac_addr);
687 	if (slot == -1) {
688 		mutex_exit(&igb->gen_lock);
689 		return (EINVAL);
690 	}
691 
692 	if (igb->unicst_addr[slot].mac.set == 0) {
693 		mutex_exit(&igb->gen_lock);
694 		return (EINVAL);
695 	}
696 
697 	/* Clear the MAC ddress in the slot */
698 	e1000_rar_clear(hw, slot);
699 	igb->unicst_addr[slot].mac.set = 0;
700 	igb->unicst_avail++;
701 
702 	mutex_exit(&igb->gen_lock);
703 
704 	return (0);
705 }
706 
707 /*
708  * Enable interrupt on the specificed rx ring.
709  */
710 int
711 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
712 {
713 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
714 	igb_t *igb = rx_ring->igb;
715 	struct e1000_hw *hw = &igb->hw;
716 	uint32_t index = rx_ring->index;
717 
718 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
719 		/* Interrupt enabling for MSI-X */
720 		igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
721 		E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
722 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
723 	} else {
724 		ASSERT(index == 0);
725 		/* Interrupt enabling for MSI and legacy */
726 		igb->ims_mask |= E1000_IMS_RXT0;
727 		E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
728 	}
729 
730 	E1000_WRITE_FLUSH(hw);
731 
732 	return (0);
733 }
734 
735 /*
736  * Disable interrupt on the specificed rx ring.
737  */
738 int
739 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
740 {
741 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
742 	igb_t *igb = rx_ring->igb;
743 	struct e1000_hw *hw = &igb->hw;
744 	uint32_t index = rx_ring->index;
745 
746 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
747 		/* Interrupt disabling for MSI-X */
748 		igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
749 		E1000_WRITE_REG(hw, E1000_EIMC,
750 		    (E1000_EICR_RX_QUEUE0 << index));
751 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
752 	} else {
753 		ASSERT(index == 0);
754 		/* Interrupt disabling for MSI and legacy */
755 		igb->ims_mask &= ~E1000_IMS_RXT0;
756 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
757 	}
758 
759 	E1000_WRITE_FLUSH(hw);
760 
761 	return (0);
762 }
763 
764 /*
765  * Get the global ring index by a ring index within a group.
766  */
767 int
768 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
769 {
770 	igb_rx_ring_t *rx_ring;
771 	int i;
772 
773 	for (i = 0; i < igb->num_rx_rings; i++) {
774 		rx_ring = &igb->rx_rings[i];
775 		if (rx_ring->group_index == gindex)
776 			rindex--;
777 		if (rindex < 0)
778 			return (i);
779 	}
780 
781 	return (-1);
782 }
783 
784 static int
785 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
786 {
787 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
788 
789 	mutex_enter(&rx_ring->rx_lock);
790 	rx_ring->ring_gen_num = mr_gen_num;
791 	mutex_exit(&rx_ring->rx_lock);
792 	return (0);
793 }
794 
795 /*
796  * Callback funtion for MAC layer to register all rings.
797  */
798 /* ARGSUSED */
799 void
800 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
801     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
802 {
803 	igb_t *igb = (igb_t *)arg;
804 	mac_intr_t *mintr = &infop->mri_intr;
805 
806 	switch (rtype) {
807 	case MAC_RING_TYPE_RX: {
808 		igb_rx_ring_t *rx_ring;
809 		int global_index;
810 
811 		/*
812 		 * 'index' is the ring index within the group.
813 		 * We need the global ring index by searching in group.
814 		 */
815 		global_index = igb_get_rx_ring_index(igb, rg_index, index);
816 
817 		ASSERT(global_index >= 0);
818 
819 		rx_ring = &igb->rx_rings[global_index];
820 		rx_ring->ring_handle = rh;
821 
822 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
823 		infop->mri_start = igb_ring_start;
824 		infop->mri_stop = NULL;
825 		infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
826 		infop->mri_stat = igb_rx_ring_stat;
827 
828 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
829 		mintr->mi_enable = igb_rx_ring_intr_enable;
830 		mintr->mi_disable = igb_rx_ring_intr_disable;
831 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
832 			mintr->mi_ddi_handle =
833 			    igb->htable[rx_ring->intr_vector];
834 		}
835 		break;
836 	}
837 	case MAC_RING_TYPE_TX: {
838 		ASSERT(index < igb->num_tx_rings);
839 
840 		igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
841 		tx_ring->ring_handle = rh;
842 
843 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
844 		infop->mri_start = NULL;
845 		infop->mri_stop = NULL;
846 		infop->mri_tx = igb_tx_ring_send;
847 		infop->mri_stat = igb_tx_ring_stat;
848 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
849 			mintr->mi_ddi_handle =
850 			    igb->htable[tx_ring->intr_vector];
851 		}
852 		break;
853 	}
854 	default:
855 		break;
856 	}
857 }
858 
859 void
860 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
861     mac_group_info_t *infop, mac_group_handle_t gh)
862 {
863 	igb_t *igb = (igb_t *)arg;
864 
865 	switch (rtype) {
866 	case MAC_RING_TYPE_RX: {
867 		igb_rx_group_t *rx_group;
868 
869 		ASSERT((index >= 0) && (index < igb->num_rx_groups));
870 
871 		rx_group = &igb->rx_groups[index];
872 		rx_group->group_handle = gh;
873 
874 		infop->mgi_driver = (mac_group_driver_t)rx_group;
875 		infop->mgi_start = NULL;
876 		infop->mgi_stop = NULL;
877 		infop->mgi_addmac = igb_addmac;
878 		infop->mgi_remmac = igb_remmac;
879 		infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
880 
881 		break;
882 	}
883 	case MAC_RING_TYPE_TX:
884 		break;
885 	default:
886 		break;
887 	}
888 }
889 
890 /*
891  * Obtain the MAC's capabilities and associated data from
892  * the driver.
893  */
894 boolean_t
895 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
896 {
897 	igb_t *igb = (igb_t *)arg;
898 
899 	switch (cap) {
900 	case MAC_CAPAB_HCKSUM: {
901 		uint32_t *tx_hcksum_flags = cap_data;
902 
903 		/*
904 		 * We advertise our capabilities only if tx hcksum offload is
905 		 * enabled.  On receive, the stack will accept checksummed
906 		 * packets anyway, even if we haven't said we can deliver
907 		 * them.
908 		 */
909 		if (!igb->tx_hcksum_enable)
910 			return (B_FALSE);
911 
912 		*tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
913 		break;
914 	}
915 	case MAC_CAPAB_LSO: {
916 		mac_capab_lso_t *cap_lso = cap_data;
917 
918 		if (igb->lso_enable) {
919 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
920 			cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
921 			break;
922 		} else {
923 			return (B_FALSE);
924 		}
925 	}
926 	case MAC_CAPAB_RINGS: {
927 		mac_capab_rings_t *cap_rings = cap_data;
928 
929 		switch (cap_rings->mr_type) {
930 		case MAC_RING_TYPE_RX:
931 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
932 			cap_rings->mr_rnum = igb->num_rx_rings;
933 			cap_rings->mr_gnum = igb->num_rx_groups;
934 			cap_rings->mr_rget = igb_fill_ring;
935 			cap_rings->mr_gget = igb_fill_group;
936 			cap_rings->mr_gaddring = NULL;
937 			cap_rings->mr_gremring = NULL;
938 
939 			break;
940 		case MAC_RING_TYPE_TX:
941 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
942 			cap_rings->mr_rnum = igb->num_tx_rings;
943 			cap_rings->mr_gnum = 0;
944 			cap_rings->mr_rget = igb_fill_ring;
945 			cap_rings->mr_gget = NULL;
946 
947 			break;
948 		default:
949 			break;
950 		}
951 		break;
952 	}
953 
954 	default:
955 		return (B_FALSE);
956 	}
957 	return (B_TRUE);
958 }
959 
960 int
961 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
962     uint_t pr_valsize, const void *pr_val)
963 {
964 	igb_t *igb = (igb_t *)arg;
965 	struct e1000_hw *hw = &igb->hw;
966 	int err = 0;
967 	uint32_t flow_control;
968 	uint32_t cur_mtu, new_mtu;
969 	uint32_t rx_size;
970 	uint32_t tx_size;
971 
972 	mutex_enter(&igb->gen_lock);
973 	if (igb->igb_state & IGB_SUSPENDED) {
974 		mutex_exit(&igb->gen_lock);
975 		return (ECANCELED);
976 	}
977 
978 	if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
979 		/*
980 		 * All en_* parameters are locked (read-only)
981 		 * while the device is in any sort of loopback mode.
982 		 */
983 		mutex_exit(&igb->gen_lock);
984 		return (EBUSY);
985 	}
986 
987 	switch (pr_num) {
988 	case MAC_PROP_EN_1000FDX_CAP:
989 		/* read/write on copper, read-only on serdes */
990 		if (hw->phy.media_type != e1000_media_type_copper) {
991 			err = ENOTSUP;
992 			break;
993 		}
994 		igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
995 		igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
996 		goto setup_link;
997 	case MAC_PROP_EN_100FDX_CAP:
998 		if (hw->phy.media_type != e1000_media_type_copper) {
999 			err = ENOTSUP;
1000 			break;
1001 		}
1002 		igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1003 		igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1004 		goto setup_link;
1005 	case MAC_PROP_EN_100HDX_CAP:
1006 		if (hw->phy.media_type != e1000_media_type_copper) {
1007 			err = ENOTSUP;
1008 			break;
1009 		}
1010 		igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1011 		igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1012 		goto setup_link;
1013 	case MAC_PROP_EN_10FDX_CAP:
1014 		if (hw->phy.media_type != e1000_media_type_copper) {
1015 			err = ENOTSUP;
1016 			break;
1017 		}
1018 		igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1019 		igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1020 		goto setup_link;
1021 	case MAC_PROP_EN_10HDX_CAP:
1022 		if (hw->phy.media_type != e1000_media_type_copper) {
1023 			err = ENOTSUP;
1024 			break;
1025 		}
1026 		igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1027 		igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1028 		goto setup_link;
1029 	case MAC_PROP_AUTONEG:
1030 		if (hw->phy.media_type != e1000_media_type_copper) {
1031 			err = ENOTSUP;
1032 			break;
1033 		}
1034 		igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1035 		goto setup_link;
1036 	case MAC_PROP_FLOWCTRL:
1037 		bcopy(pr_val, &flow_control, sizeof (flow_control));
1038 
1039 		switch (flow_control) {
1040 		default:
1041 			err = EINVAL;
1042 			break;
1043 		case LINK_FLOWCTRL_NONE:
1044 			hw->fc.requested_mode = e1000_fc_none;
1045 			break;
1046 		case LINK_FLOWCTRL_RX:
1047 			hw->fc.requested_mode = e1000_fc_rx_pause;
1048 			break;
1049 		case LINK_FLOWCTRL_TX:
1050 			hw->fc.requested_mode = e1000_fc_tx_pause;
1051 			break;
1052 		case LINK_FLOWCTRL_BI:
1053 			hw->fc.requested_mode = e1000_fc_full;
1054 			break;
1055 		}
1056 setup_link:
1057 		if (err == 0) {
1058 			if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1059 				err = EINVAL;
1060 		}
1061 		break;
1062 	case MAC_PROP_ADV_1000FDX_CAP:
1063 	case MAC_PROP_ADV_1000HDX_CAP:
1064 	case MAC_PROP_ADV_100T4_CAP:
1065 	case MAC_PROP_ADV_100FDX_CAP:
1066 	case MAC_PROP_ADV_100HDX_CAP:
1067 	case MAC_PROP_ADV_10FDX_CAP:
1068 	case MAC_PROP_ADV_10HDX_CAP:
1069 	case MAC_PROP_EN_1000HDX_CAP:
1070 	case MAC_PROP_EN_100T4_CAP:
1071 	case MAC_PROP_STATUS:
1072 	case MAC_PROP_SPEED:
1073 	case MAC_PROP_DUPLEX:
1074 		err = ENOTSUP; /* read-only prop. Can't set this. */
1075 		break;
1076 	case MAC_PROP_MTU:
1077 		/* adapter must be stopped for an MTU change */
1078 		if (igb->igb_state & IGB_STARTED) {
1079 			err = EBUSY;
1080 			break;
1081 		}
1082 
1083 		cur_mtu = igb->default_mtu;
1084 		bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1085 		if (new_mtu == cur_mtu) {
1086 			err = 0;
1087 			break;
1088 		}
1089 
1090 		if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1091 			err = EINVAL;
1092 			break;
1093 		}
1094 
1095 		err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1096 		if (err == 0) {
1097 			igb->default_mtu = new_mtu;
1098 			igb->max_frame_size = igb->default_mtu +
1099 			    sizeof (struct ether_vlan_header) + ETHERFCSL;
1100 
1101 			/*
1102 			 * Set rx buffer size
1103 			 */
1104 			rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1105 			igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1106 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1107 
1108 			/*
1109 			 * Set tx buffer size
1110 			 */
1111 			tx_size = igb->max_frame_size;
1112 			igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1113 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1114 		}
1115 		break;
1116 	case MAC_PROP_PRIVATE:
1117 		err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1118 		break;
1119 	default:
1120 		err = ENOTSUP;
1121 		break;
1122 	}
1123 
1124 	mutex_exit(&igb->gen_lock);
1125 
1126 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1127 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1128 		return (EIO);
1129 	}
1130 
1131 	return (err);
1132 }
1133 
1134 int
1135 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1136     uint_t pr_valsize, void *pr_val)
1137 {
1138 	igb_t *igb = (igb_t *)arg;
1139 	struct e1000_hw *hw = &igb->hw;
1140 	int err = 0;
1141 	uint32_t flow_control;
1142 	uint64_t tmp = 0;
1143 
1144 	switch (pr_num) {
1145 	case MAC_PROP_DUPLEX:
1146 		ASSERT(pr_valsize >= sizeof (link_duplex_t));
1147 		bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t));
1148 		break;
1149 	case MAC_PROP_SPEED:
1150 		ASSERT(pr_valsize >= sizeof (uint64_t));
1151 		tmp = igb->link_speed * 1000000ull;
1152 		bcopy(&tmp, pr_val, sizeof (tmp));
1153 		break;
1154 	case MAC_PROP_AUTONEG:
1155 		ASSERT(pr_valsize >= sizeof (uint8_t));
1156 		*(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1157 		break;
1158 	case MAC_PROP_FLOWCTRL:
1159 		ASSERT(pr_valsize >= sizeof (uint32_t));
1160 		switch (hw->fc.requested_mode) {
1161 			case e1000_fc_none:
1162 				flow_control = LINK_FLOWCTRL_NONE;
1163 				break;
1164 			case e1000_fc_rx_pause:
1165 				flow_control = LINK_FLOWCTRL_RX;
1166 				break;
1167 			case e1000_fc_tx_pause:
1168 				flow_control = LINK_FLOWCTRL_TX;
1169 				break;
1170 			case e1000_fc_full:
1171 				flow_control = LINK_FLOWCTRL_BI;
1172 				break;
1173 		}
1174 		bcopy(&flow_control, pr_val, sizeof (flow_control));
1175 		break;
1176 	case MAC_PROP_ADV_1000FDX_CAP:
1177 		*(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1178 		break;
1179 	case MAC_PROP_EN_1000FDX_CAP:
1180 		*(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1181 		break;
1182 	case MAC_PROP_ADV_1000HDX_CAP:
1183 		*(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1184 		break;
1185 	case MAC_PROP_EN_1000HDX_CAP:
1186 		*(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1187 		break;
1188 	case MAC_PROP_ADV_100T4_CAP:
1189 		*(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1190 		break;
1191 	case MAC_PROP_EN_100T4_CAP:
1192 		*(uint8_t *)pr_val = igb->param_en_100t4_cap;
1193 		break;
1194 	case MAC_PROP_ADV_100FDX_CAP:
1195 		*(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1196 		break;
1197 	case MAC_PROP_EN_100FDX_CAP:
1198 		*(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1199 		break;
1200 	case MAC_PROP_ADV_100HDX_CAP:
1201 		*(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1202 		break;
1203 	case MAC_PROP_EN_100HDX_CAP:
1204 		*(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1205 		break;
1206 	case MAC_PROP_ADV_10FDX_CAP:
1207 		*(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1208 		break;
1209 	case MAC_PROP_EN_10FDX_CAP:
1210 		*(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1211 		break;
1212 	case MAC_PROP_ADV_10HDX_CAP:
1213 		*(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1214 		break;
1215 	case MAC_PROP_EN_10HDX_CAP:
1216 		*(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1217 		break;
1218 	case MAC_PROP_PRIVATE:
1219 		err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val);
1220 		break;
1221 	default:
1222 		err = ENOTSUP;
1223 		break;
1224 	}
1225 	return (err);
1226 }
1227 
1228 void
1229 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1230     mac_prop_info_handle_t prh)
1231 {
1232 	igb_t *igb = (igb_t *)arg;
1233 	struct e1000_hw *hw = &igb->hw;
1234 	uint16_t phy_status, phy_ext_status;
1235 
1236 	switch (pr_num) {
1237 	case MAC_PROP_DUPLEX:
1238 	case MAC_PROP_SPEED:
1239 	case MAC_PROP_ADV_1000FDX_CAP:
1240 	case MAC_PROP_ADV_1000HDX_CAP:
1241 	case MAC_PROP_EN_1000HDX_CAP:
1242 	case MAC_PROP_ADV_100T4_CAP:
1243 	case MAC_PROP_EN_100T4_CAP:
1244 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1245 		break;
1246 
1247 	case MAC_PROP_EN_1000FDX_CAP:
1248 		if (hw->phy.media_type != e1000_media_type_copper) {
1249 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1250 		} else {
1251 			(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
1252 			    &phy_ext_status);
1253 			mac_prop_info_set_default_uint8(prh,
1254 			    ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1255 			    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
1256 		}
1257 		break;
1258 
1259 	case MAC_PROP_ADV_100FDX_CAP:
1260 	case MAC_PROP_EN_100FDX_CAP:
1261 		if (hw->phy.media_type != e1000_media_type_copper) {
1262 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1263 		} else {
1264 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1265 			mac_prop_info_set_default_uint8(prh,
1266 			    ((phy_status & MII_SR_100X_FD_CAPS) ||
1267 			    (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0);
1268 		}
1269 		break;
1270 
1271 	case MAC_PROP_ADV_100HDX_CAP:
1272 	case MAC_PROP_EN_100HDX_CAP:
1273 		if (hw->phy.media_type != e1000_media_type_copper) {
1274 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1275 		} else {
1276 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1277 			mac_prop_info_set_default_uint8(prh,
1278 			    ((phy_status & MII_SR_100X_HD_CAPS) ||
1279 			    (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0);
1280 		}
1281 		break;
1282 
1283 	case MAC_PROP_ADV_10FDX_CAP:
1284 	case MAC_PROP_EN_10FDX_CAP:
1285 		if (hw->phy.media_type != e1000_media_type_copper) {
1286 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1287 		} else {
1288 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1289 			mac_prop_info_set_default_uint8(prh,
1290 			    (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
1291 		}
1292 		break;
1293 
1294 	case MAC_PROP_ADV_10HDX_CAP:
1295 	case MAC_PROP_EN_10HDX_CAP:
1296 		if (hw->phy.media_type != e1000_media_type_copper) {
1297 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1298 		} else {
1299 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1300 			mac_prop_info_set_default_uint8(prh,
1301 			    (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
1302 		}
1303 		break;
1304 
1305 	case MAC_PROP_AUTONEG:
1306 		if (hw->phy.media_type != e1000_media_type_copper) {
1307 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1308 		} else {
1309 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1310 			mac_prop_info_set_default_uint8(prh,
1311 			    (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
1312 		}
1313 		break;
1314 
1315 	case MAC_PROP_FLOWCTRL:
1316 		mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1317 		break;
1318 
1319 	case MAC_PROP_MTU:
1320 		mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU);
1321 		break;
1322 
1323 	case MAC_PROP_PRIVATE:
1324 		igb_priv_prop_info(igb, pr_name, prh);
1325 		break;
1326 	}
1327 
1328 }
1329 
1330 boolean_t
1331 igb_param_locked(mac_prop_id_t pr_num)
1332 {
1333 	/*
1334 	 * All en_* parameters are locked (read-only) while
1335 	 * the device is in any sort of loopback mode ...
1336 	 */
1337 	switch (pr_num) {
1338 		case MAC_PROP_EN_1000FDX_CAP:
1339 		case MAC_PROP_EN_1000HDX_CAP:
1340 		case MAC_PROP_EN_100T4_CAP:
1341 		case MAC_PROP_EN_100FDX_CAP:
1342 		case MAC_PROP_EN_100HDX_CAP:
1343 		case MAC_PROP_EN_10FDX_CAP:
1344 		case MAC_PROP_EN_10HDX_CAP:
1345 		case MAC_PROP_AUTONEG:
1346 		case MAC_PROP_FLOWCTRL:
1347 			return (B_TRUE);
1348 	}
1349 	return (B_FALSE);
1350 }
1351 
1352 /* ARGSUSED */
1353 int
1354 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1355     uint_t pr_valsize, const void *pr_val)
1356 {
1357 	int err = 0;
1358 	long result;
1359 	struct e1000_hw *hw = &igb->hw;
1360 	int i;
1361 
1362 	if (strcmp(pr_name, "_eee_support") == 0) {
1363 		if (pr_val == NULL)
1364 			return (EINVAL);
1365 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1366 		switch (result) {
1367 		case 0:
1368 		case 1:
1369 			/*
1370 			 * For now, only supported on I350/I354.
1371 			 * Add new mac.type values (or use < instead)
1372 			 * as new cards offer up EEE.
1373 			 */
1374 			switch (hw->mac.type) {
1375 			case e1000_i350:
1376 				/* Must set this prior to the set call. */
1377 				hw->dev_spec._82575.eee_disable = !result;
1378 				if (e1000_set_eee_i350(hw, result,
1379 				    result) != E1000_SUCCESS)
1380 					err = EIO;
1381 				break;
1382 			case e1000_i354:
1383 				/* Must set this prior to the set call. */
1384 				hw->dev_spec._82575.eee_disable = !result;
1385 				if (e1000_set_eee_i354(hw, result,
1386 				    result) != E1000_SUCCESS)
1387 					err = EIO;
1388 				break;
1389 			default:
1390 				return (ENXIO);
1391 			}
1392 			break;
1393 		default:
1394 			err = EINVAL;
1395 			/* FALLTHRU */
1396 		}
1397 		return (err);
1398 	}
1399 	if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1400 		if (pr_val == NULL) {
1401 			err = EINVAL;
1402 			return (err);
1403 		}
1404 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1405 		if (result < MIN_TX_COPY_THRESHOLD ||
1406 		    result > MAX_TX_COPY_THRESHOLD)
1407 			err = EINVAL;
1408 		else {
1409 			igb->tx_copy_thresh = (uint32_t)result;
1410 		}
1411 		return (err);
1412 	}
1413 	if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1414 		if (pr_val == NULL) {
1415 			err = EINVAL;
1416 			return (err);
1417 		}
1418 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1419 		if (result < MIN_TX_RECYCLE_THRESHOLD ||
1420 		    result > MAX_TX_RECYCLE_THRESHOLD)
1421 			err = EINVAL;
1422 		else {
1423 			igb->tx_recycle_thresh = (uint32_t)result;
1424 		}
1425 		return (err);
1426 	}
1427 	if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1428 		if (pr_val == NULL) {
1429 			err = EINVAL;
1430 			return (err);
1431 		}
1432 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1433 		if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1434 		    result > MAX_TX_OVERLOAD_THRESHOLD)
1435 			err = EINVAL;
1436 		else {
1437 			igb->tx_overload_thresh = (uint32_t)result;
1438 		}
1439 		return (err);
1440 	}
1441 	if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1442 		if (pr_val == NULL) {
1443 			err = EINVAL;
1444 			return (err);
1445 		}
1446 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1447 		if (result < MIN_TX_RESCHED_THRESHOLD ||
1448 		    result > MAX_TX_RESCHED_THRESHOLD ||
1449 		    result > igb->tx_ring_size)
1450 			err = EINVAL;
1451 		else {
1452 			igb->tx_resched_thresh = (uint32_t)result;
1453 		}
1454 		return (err);
1455 	}
1456 	if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1457 		if (pr_val == NULL) {
1458 			err = EINVAL;
1459 			return (err);
1460 		}
1461 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1462 		if (result < MIN_RX_COPY_THRESHOLD ||
1463 		    result > MAX_RX_COPY_THRESHOLD)
1464 			err = EINVAL;
1465 		else {
1466 			igb->rx_copy_thresh = (uint32_t)result;
1467 		}
1468 		return (err);
1469 	}
1470 	if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1471 		if (pr_val == NULL) {
1472 			err = EINVAL;
1473 			return (err);
1474 		}
1475 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1476 		if (result < MIN_RX_LIMIT_PER_INTR ||
1477 		    result > MAX_RX_LIMIT_PER_INTR)
1478 			err = EINVAL;
1479 		else {
1480 			igb->rx_limit_per_intr = (uint32_t)result;
1481 		}
1482 		return (err);
1483 	}
1484 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1485 		if (pr_val == NULL) {
1486 			err = EINVAL;
1487 			return (err);
1488 		}
1489 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1490 
1491 		if (result < igb->capab->min_intr_throttle ||
1492 		    result > igb->capab->max_intr_throttle)
1493 			err = EINVAL;
1494 		else {
1495 			igb->intr_throttling[0] = (uint32_t)result;
1496 
1497 			for (i = 0; i < MAX_NUM_EITR; i++)
1498 				igb->intr_throttling[i] =
1499 				    igb->intr_throttling[0];
1500 
1501 			/* Set interrupt throttling rate */
1502 			for (i = 0; i < igb->intr_cnt; i++)
1503 				E1000_WRITE_REG(hw, E1000_EITR(i),
1504 				    igb->intr_throttling[i]);
1505 		}
1506 		return (err);
1507 	}
1508 	return (ENOTSUP);
1509 }
1510 
1511 int
1512 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize,
1513     void *pr_val)
1514 {
1515 	int value;
1516 
1517 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1518 		value = igb->param_adv_pause_cap;
1519 	} else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1520 		value = igb->param_adv_asym_pause_cap;
1521 	} else if (strcmp(pr_name, "_eee_support") == 0) {
1522 		/*
1523 		 * For now, only supported on I350.  Add new mac.type values
1524 		 * (or use < instead) as new cards offer up EEE.
1525 		 */
1526 		switch (igb->hw.mac.type) {
1527 		case e1000_i350:
1528 		case e1000_i354:
1529 			value = !(igb->hw.dev_spec._82575.eee_disable);
1530 			break;
1531 		default:
1532 			value = 0;
1533 		}
1534 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1535 		value = igb->tx_copy_thresh;
1536 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1537 		value = igb->tx_recycle_thresh;
1538 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1539 		value = igb->tx_overload_thresh;
1540 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1541 		value = igb->tx_resched_thresh;
1542 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1543 		value = igb->rx_copy_thresh;
1544 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1545 		value = igb->rx_limit_per_intr;
1546 	} else if (strcmp(pr_name, "_intr_throttling") == 0) {
1547 		value = igb->intr_throttling[0];
1548 	} else {
1549 		return (ENOTSUP);
1550 	}
1551 
1552 	(void) snprintf(pr_val, pr_valsize, "%d", value);
1553 	return (0);
1554 }
1555 
1556 void
1557 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh)
1558 {
1559 	char valstr[64];
1560 	int value;
1561 
1562 	if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
1563 	    strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1564 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1565 		return;
1566 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1567 		value = DEFAULT_TX_COPY_THRESHOLD;
1568 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1569 		value = DEFAULT_TX_RECYCLE_THRESHOLD;
1570 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1571 		value = DEFAULT_TX_OVERLOAD_THRESHOLD;
1572 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1573 		value = DEFAULT_TX_RESCHED_THRESHOLD;
1574 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1575 		value = DEFAULT_RX_COPY_THRESHOLD;
1576 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1577 		value = DEFAULT_RX_LIMIT_PER_INTR;
1578 	} else 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1579 		value = igb->capab->def_intr_throttle;
1580 	} else {
1581 		return;
1582 	}
1583 
1584 	(void) snprintf(valstr, sizeof (valstr), "%d", value);
1585 	mac_prop_info_set_default_str(prh, valstr);
1586 }
1587