xref: /illumos-gate/usr/src/uts/common/io/igb/igb_gld.c (revision 2bbdd445a21f9d61f4a0ca0faf05d5ceb2bd91f3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29  * Copyright 2014 Pluribus Networks Inc.
30  */
31 
32 #include "igb_sw.h"
33 
34 int
35 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
36 {
37 	igb_t *igb = (igb_t *)arg;
38 	struct e1000_hw *hw = &igb->hw;
39 	igb_stat_t *igb_ks;
40 	uint32_t low_val, high_val;
41 
42 	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
43 
44 	mutex_enter(&igb->gen_lock);
45 
46 	if (igb->igb_state & IGB_SUSPENDED) {
47 		mutex_exit(&igb->gen_lock);
48 		return (ECANCELED);
49 	}
50 
51 	switch (stat) {
52 	case MAC_STAT_IFSPEED:
53 		*val = igb->link_speed * 1000000ull;
54 		break;
55 
56 	case MAC_STAT_MULTIRCV:
57 		igb->stat_mprc += E1000_READ_REG(hw, E1000_MPRC);
58 		*val = igb->stat_mprc;
59 		break;
60 
61 	case MAC_STAT_BRDCSTRCV:
62 		igb->stat_bprc += E1000_READ_REG(hw, E1000_BPRC);
63 		*val = igb->stat_bprc;
64 		break;
65 
66 	case MAC_STAT_MULTIXMT:
67 		igb->stat_mptc += E1000_READ_REG(hw, E1000_MPTC);
68 		*val = igb->stat_mptc;
69 		break;
70 
71 	case MAC_STAT_BRDCSTXMT:
72 		igb->stat_bptc += E1000_READ_REG(hw, E1000_BPTC);
73 		*val = igb->stat_bptc;
74 		break;
75 
76 	case MAC_STAT_NORCVBUF:
77 		igb->stat_rnbc += E1000_READ_REG(hw, E1000_RNBC);
78 		*val = igb->stat_rnbc;
79 		break;
80 
81 	case MAC_STAT_IERRORS:
82 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
83 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
84 		igb_ks->rlec.value.ui64 +=
85 		    E1000_READ_REG(hw, E1000_RLEC);
86 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
87 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
88 		*val = igb->stat_rxerrc +
89 		    igb->stat_algnerrc +
90 		    igb_ks->rlec.value.ui64 +
91 		    igb->stat_crcerrs +
92 		    igb->stat_cexterr;
93 		break;
94 
95 	case MAC_STAT_NOXMTBUF:
96 		*val = 0;
97 		break;
98 
99 	case MAC_STAT_OERRORS:
100 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
101 		*val = igb->stat_ecol;
102 		break;
103 
104 	case MAC_STAT_COLLISIONS:
105 		igb->stat_colc += E1000_READ_REG(hw, E1000_COLC);
106 		*val = igb->stat_colc;
107 		break;
108 
109 	case MAC_STAT_RBYTES:
110 		/*
111 		 * The 64-bit register will reset whenever the upper
112 		 * 32 bits are read. So we need to read the lower
113 		 * 32 bits first, then read the upper 32 bits.
114 		 */
115 		low_val = E1000_READ_REG(hw, E1000_TORL);
116 		high_val = E1000_READ_REG(hw, E1000_TORH);
117 		igb->stat_tor += (uint64_t)high_val << 32 | (uint64_t)low_val;
118 		*val = igb->stat_tor;
119 		break;
120 
121 	case MAC_STAT_IPACKETS:
122 		igb->stat_tpr += E1000_READ_REG(hw, E1000_TPR);
123 		*val = igb->stat_tpr;
124 		break;
125 
126 	case MAC_STAT_OBYTES:
127 		/*
128 		 * The 64-bit register will reset whenever the upper
129 		 * 32 bits are read. So we need to read the lower
130 		 * 32 bits first, then read the upper 32 bits.
131 		 */
132 		low_val = E1000_READ_REG(hw, E1000_TOTL);
133 		high_val = E1000_READ_REG(hw, E1000_TOTH);
134 		igb->stat_tot += (uint64_t)high_val << 32 | (uint64_t)low_val;
135 		*val = igb->stat_tot;
136 		break;
137 
138 	case MAC_STAT_OPACKETS:
139 		igb->stat_tpt += E1000_READ_REG(hw, E1000_TPT);
140 		*val = igb->stat_tpt;
141 		break;
142 
143 	/* RFC 1643 stats */
144 	case ETHER_STAT_ALIGN_ERRORS:
145 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
146 		*val = igb->stat_algnerrc;
147 		break;
148 
149 	case ETHER_STAT_FCS_ERRORS:
150 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
151 		*val = igb->stat_crcerrs;
152 		break;
153 
154 	case ETHER_STAT_FIRST_COLLISIONS:
155 		igb->stat_scc += E1000_READ_REG(hw, E1000_SCC);
156 		*val = igb->stat_scc;
157 		break;
158 
159 	case ETHER_STAT_MULTI_COLLISIONS:
160 		igb->stat_mcc += E1000_READ_REG(hw, E1000_MCC);
161 		*val = igb->stat_mcc;
162 		break;
163 
164 	case ETHER_STAT_SQE_ERRORS:
165 		igb->stat_sec += E1000_READ_REG(hw, E1000_SEC);
166 		*val = igb->stat_sec;
167 		break;
168 
169 	case ETHER_STAT_DEFER_XMTS:
170 		igb->stat_dc += E1000_READ_REG(hw, E1000_DC);
171 		*val = igb->stat_dc;
172 		break;
173 
174 	case ETHER_STAT_TX_LATE_COLLISIONS:
175 		igb->stat_latecol += E1000_READ_REG(hw, E1000_LATECOL);
176 		*val = igb->stat_latecol;
177 		break;
178 
179 	case ETHER_STAT_EX_COLLISIONS:
180 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
181 		*val = igb->stat_ecol;
182 		break;
183 
184 	case ETHER_STAT_MACXMT_ERRORS:
185 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
186 		*val = igb->stat_ecol;
187 		break;
188 
189 	case ETHER_STAT_CARRIER_ERRORS:
190 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
191 		*val = igb->stat_cexterr;
192 		break;
193 
194 	case ETHER_STAT_TOOLONG_ERRORS:
195 		igb->stat_roc += E1000_READ_REG(hw, E1000_ROC);
196 		*val = igb->stat_roc;
197 		break;
198 
199 	case ETHER_STAT_MACRCV_ERRORS:
200 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
201 		*val = igb->stat_rxerrc;
202 		break;
203 
204 	/* MII/GMII stats */
205 	case ETHER_STAT_XCVR_ADDR:
206 		/* The Internal PHY's MDI address for each MAC is 1 */
207 		*val = 1;
208 		break;
209 
210 	case ETHER_STAT_XCVR_ID:
211 		*val = hw->phy.id | hw->phy.revision;
212 		break;
213 
214 	case ETHER_STAT_XCVR_INUSE:
215 		switch (igb->link_speed) {
216 		case SPEED_1000:
217 			*val =
218 			    (hw->phy.media_type == e1000_media_type_copper) ?
219 			    XCVR_1000T : XCVR_1000X;
220 			break;
221 		case SPEED_100:
222 			*val =
223 			    (hw->phy.media_type == e1000_media_type_copper) ?
224 			    (igb->param_100t4_cap == 1) ?
225 			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
226 			break;
227 		case SPEED_10:
228 			*val = XCVR_10;
229 			break;
230 		default:
231 			*val = XCVR_NONE;
232 			break;
233 		}
234 		break;
235 
236 	case ETHER_STAT_CAP_1000FDX:
237 		*val = igb->param_1000fdx_cap;
238 		break;
239 
240 	case ETHER_STAT_CAP_1000HDX:
241 		*val = igb->param_1000hdx_cap;
242 		break;
243 
244 	case ETHER_STAT_CAP_100FDX:
245 		*val = igb->param_100fdx_cap;
246 		break;
247 
248 	case ETHER_STAT_CAP_100HDX:
249 		*val = igb->param_100hdx_cap;
250 		break;
251 
252 	case ETHER_STAT_CAP_10FDX:
253 		*val = igb->param_10fdx_cap;
254 		break;
255 
256 	case ETHER_STAT_CAP_10HDX:
257 		*val = igb->param_10hdx_cap;
258 		break;
259 
260 	case ETHER_STAT_CAP_ASMPAUSE:
261 		*val = igb->param_asym_pause_cap;
262 		break;
263 
264 	case ETHER_STAT_CAP_PAUSE:
265 		*val = igb->param_pause_cap;
266 		break;
267 
268 	case ETHER_STAT_CAP_AUTONEG:
269 		*val = igb->param_autoneg_cap;
270 		break;
271 
272 	case ETHER_STAT_ADV_CAP_1000FDX:
273 		*val = igb->param_adv_1000fdx_cap;
274 		break;
275 
276 	case ETHER_STAT_ADV_CAP_1000HDX:
277 		*val = igb->param_adv_1000hdx_cap;
278 		break;
279 
280 	case ETHER_STAT_ADV_CAP_100FDX:
281 		*val = igb->param_adv_100fdx_cap;
282 		break;
283 
284 	case ETHER_STAT_ADV_CAP_100HDX:
285 		*val = igb->param_adv_100hdx_cap;
286 		break;
287 
288 	case ETHER_STAT_ADV_CAP_10FDX:
289 		*val = igb->param_adv_10fdx_cap;
290 		break;
291 
292 	case ETHER_STAT_ADV_CAP_10HDX:
293 		*val = igb->param_adv_10hdx_cap;
294 		break;
295 
296 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
297 		*val = igb->param_adv_asym_pause_cap;
298 		break;
299 
300 	case ETHER_STAT_ADV_CAP_PAUSE:
301 		*val = igb->param_adv_pause_cap;
302 		break;
303 
304 	case ETHER_STAT_ADV_CAP_AUTONEG:
305 		*val = hw->mac.autoneg;
306 		break;
307 
308 	case ETHER_STAT_LP_CAP_1000FDX:
309 		*val = igb->param_lp_1000fdx_cap;
310 		break;
311 
312 	case ETHER_STAT_LP_CAP_1000HDX:
313 		*val = igb->param_lp_1000hdx_cap;
314 		break;
315 
316 	case ETHER_STAT_LP_CAP_100FDX:
317 		*val = igb->param_lp_100fdx_cap;
318 		break;
319 
320 	case ETHER_STAT_LP_CAP_100HDX:
321 		*val = igb->param_lp_100hdx_cap;
322 		break;
323 
324 	case ETHER_STAT_LP_CAP_10FDX:
325 		*val = igb->param_lp_10fdx_cap;
326 		break;
327 
328 	case ETHER_STAT_LP_CAP_10HDX:
329 		*val = igb->param_lp_10hdx_cap;
330 		break;
331 
332 	case ETHER_STAT_LP_CAP_ASMPAUSE:
333 		*val = igb->param_lp_asym_pause_cap;
334 		break;
335 
336 	case ETHER_STAT_LP_CAP_PAUSE:
337 		*val = igb->param_lp_pause_cap;
338 		break;
339 
340 	case ETHER_STAT_LP_CAP_AUTONEG:
341 		*val = igb->param_lp_autoneg_cap;
342 		break;
343 
344 	case ETHER_STAT_LINK_ASMPAUSE:
345 		*val = igb->param_asym_pause_cap;
346 		break;
347 
348 	case ETHER_STAT_LINK_PAUSE:
349 		*val = igb->param_pause_cap;
350 		break;
351 
352 	case ETHER_STAT_LINK_AUTONEG:
353 		*val = hw->mac.autoneg;
354 		break;
355 
356 	case ETHER_STAT_LINK_DUPLEX:
357 		*val = (igb->link_duplex == FULL_DUPLEX) ?
358 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
359 		break;
360 
361 	case ETHER_STAT_TOOSHORT_ERRORS:
362 		igb->stat_ruc += E1000_READ_REG(hw, E1000_RUC);
363 		*val = igb->stat_ruc;
364 		break;
365 
366 	case ETHER_STAT_CAP_REMFAULT:
367 		*val = igb->param_rem_fault;
368 		break;
369 
370 	case ETHER_STAT_ADV_REMFAULT:
371 		*val = igb->param_adv_rem_fault;
372 		break;
373 
374 	case ETHER_STAT_LP_REMFAULT:
375 		*val = igb->param_lp_rem_fault;
376 		break;
377 
378 	case ETHER_STAT_JABBER_ERRORS:
379 		igb->stat_rjc += E1000_READ_REG(hw, E1000_RJC);
380 		*val = igb->stat_rjc;
381 		break;
382 
383 	case ETHER_STAT_CAP_100T4:
384 		*val = igb->param_100t4_cap;
385 		break;
386 
387 	case ETHER_STAT_ADV_CAP_100T4:
388 		*val = igb->param_adv_100t4_cap;
389 		break;
390 
391 	case ETHER_STAT_LP_CAP_100T4:
392 		*val = igb->param_lp_100t4_cap;
393 		break;
394 
395 	default:
396 		mutex_exit(&igb->gen_lock);
397 		return (ENOTSUP);
398 	}
399 
400 	mutex_exit(&igb->gen_lock);
401 
402 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
403 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
404 		return (EIO);
405 	}
406 
407 	return (0);
408 }
409 
410 /*
411  * Bring the device out of the reset/quiesced state that it
412  * was in when the interface was registered.
413  */
414 int
415 igb_m_start(void *arg)
416 {
417 	igb_t *igb = (igb_t *)arg;
418 
419 	mutex_enter(&igb->gen_lock);
420 
421 	if (igb->igb_state & IGB_SUSPENDED) {
422 		mutex_exit(&igb->gen_lock);
423 		return (ECANCELED);
424 	}
425 
426 	if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
427 		mutex_exit(&igb->gen_lock);
428 		return (EIO);
429 	}
430 
431 	atomic_or_32(&igb->igb_state, IGB_STARTED);
432 
433 	mutex_exit(&igb->gen_lock);
434 
435 	/*
436 	 * Enable and start the watchdog timer
437 	 */
438 	igb_enable_watchdog_timer(igb);
439 
440 	return (0);
441 }
442 
443 /*
444  * Stop the device and put it in a reset/quiesced state such
445  * that the interface can be unregistered.
446  */
447 void
448 igb_m_stop(void *arg)
449 {
450 	igb_t *igb = (igb_t *)arg;
451 
452 	mutex_enter(&igb->gen_lock);
453 
454 	if (igb->igb_state & IGB_SUSPENDED) {
455 		mutex_exit(&igb->gen_lock);
456 		return;
457 	}
458 
459 	atomic_and_32(&igb->igb_state, ~IGB_STARTED);
460 
461 	igb_stop(igb, B_TRUE);
462 
463 	mutex_exit(&igb->gen_lock);
464 
465 	/*
466 	 * Disable and stop the watchdog timer
467 	 */
468 	igb_disable_watchdog_timer(igb);
469 }
470 
471 /*
472  * Set the promiscuity of the device.
473  */
474 int
475 igb_m_promisc(void *arg, boolean_t on)
476 {
477 	igb_t *igb = (igb_t *)arg;
478 	uint32_t reg_val;
479 
480 	mutex_enter(&igb->gen_lock);
481 
482 	if (igb->igb_state & IGB_SUSPENDED) {
483 		mutex_exit(&igb->gen_lock);
484 		return (ECANCELED);
485 	}
486 
487 	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
488 
489 	if (on)
490 		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
491 	else
492 		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
493 
494 	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
495 
496 	mutex_exit(&igb->gen_lock);
497 
498 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
499 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
500 		return (EIO);
501 	}
502 
503 	return (0);
504 }
505 
506 /*
507  * Add/remove the addresses to/from the set of multicast
508  * addresses for which the device will receive packets.
509  */
510 int
511 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
512 {
513 	igb_t *igb = (igb_t *)arg;
514 	int result;
515 
516 	mutex_enter(&igb->gen_lock);
517 
518 	if (igb->igb_state & IGB_SUSPENDED) {
519 		mutex_exit(&igb->gen_lock);
520 		return (ECANCELED);
521 	}
522 
523 	result = (add) ? igb_multicst_add(igb, mcst_addr)
524 	    : igb_multicst_remove(igb, mcst_addr);
525 
526 	mutex_exit(&igb->gen_lock);
527 
528 	return (result);
529 }
530 
531 /*
532  * Pass on M_IOCTL messages passed to the DLD, and support
533  * private IOCTLs for debugging and ndd.
534  */
535 void
536 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
537 {
538 	igb_t *igb = (igb_t *)arg;
539 	struct iocblk *iocp;
540 	enum ioc_reply status;
541 
542 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
543 	iocp->ioc_error = 0;
544 
545 	mutex_enter(&igb->gen_lock);
546 	if (igb->igb_state & IGB_SUSPENDED) {
547 		mutex_exit(&igb->gen_lock);
548 		miocnak(q, mp, 0, EINVAL);
549 		return;
550 	}
551 	mutex_exit(&igb->gen_lock);
552 
553 	switch (iocp->ioc_cmd) {
554 	case LB_GET_INFO_SIZE:
555 	case LB_GET_INFO:
556 	case LB_GET_MODE:
557 	case LB_SET_MODE:
558 		status = igb_loopback_ioctl(igb, iocp, mp);
559 		break;
560 
561 	default:
562 		status = IOC_INVAL;
563 		break;
564 	}
565 
566 	/*
567 	 * Decide how to reply
568 	 */
569 	switch (status) {
570 	default:
571 	case IOC_INVAL:
572 		/*
573 		 * Error, reply with a NAK and EINVAL or the specified error
574 		 */
575 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
576 		    EINVAL : iocp->ioc_error);
577 		break;
578 
579 	case IOC_DONE:
580 		/*
581 		 * OK, reply already sent
582 		 */
583 		break;
584 
585 	case IOC_ACK:
586 		/*
587 		 * OK, reply with an ACK
588 		 */
589 		miocack(q, mp, 0, 0);
590 		break;
591 
592 	case IOC_REPLY:
593 		/*
594 		 * OK, send prepared reply as ACK or NAK
595 		 */
596 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
597 		    M_IOCACK : M_IOCNAK;
598 		qreply(q, mp);
599 		break;
600 	}
601 }
602 
603 /*
604  * Add a MAC address to the target RX group.
605  */
606 static int
607 igb_addmac(void *arg, const uint8_t *mac_addr)
608 {
609 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
610 	igb_t *igb = rx_group->igb;
611 	struct e1000_hw *hw = &igb->hw;
612 	int i, slot;
613 
614 	mutex_enter(&igb->gen_lock);
615 
616 	if (igb->igb_state & IGB_SUSPENDED) {
617 		mutex_exit(&igb->gen_lock);
618 		return (ECANCELED);
619 	}
620 
621 	if (igb->unicst_avail == 0) {
622 		/* no slots available */
623 		mutex_exit(&igb->gen_lock);
624 		return (ENOSPC);
625 	}
626 
627 	/*
628 	 * The slots from 0 to igb->num_rx_groups are reserved slots which
629 	 * are 1 to 1 mapped with group index directly. The other slots are
630 	 * shared between the all of groups. While adding a MAC address,
631 	 * it will try to set the reserved slots first, then the shared slots.
632 	 */
633 	slot = -1;
634 	if (igb->unicst_addr[rx_group->index].mac.set == 1) {
635 		/*
636 		 * The reserved slot for current group is used, find the free
637 		 * slots in the shared slots.
638 		 */
639 		for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
640 			if (igb->unicst_addr[i].mac.set == 0) {
641 				slot = i;
642 				break;
643 			}
644 		}
645 	} else
646 		slot = rx_group->index;
647 
648 	if (slot == -1) {
649 		/* no slots available in the shared slots */
650 		mutex_exit(&igb->gen_lock);
651 		return (ENOSPC);
652 	}
653 
654 	/* Set VMDq according to the mode supported by hardware. */
655 	e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
656 
657 	bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
658 	igb->unicst_addr[slot].mac.group_index = rx_group->index;
659 	igb->unicst_addr[slot].mac.set = 1;
660 	igb->unicst_avail--;
661 
662 	mutex_exit(&igb->gen_lock);
663 
664 	return (0);
665 }
666 
667 /*
668  * Remove a MAC address from the specified RX group.
669  */
670 static int
671 igb_remmac(void *arg, const uint8_t *mac_addr)
672 {
673 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
674 	igb_t *igb = rx_group->igb;
675 	struct e1000_hw *hw = &igb->hw;
676 	int slot;
677 
678 	mutex_enter(&igb->gen_lock);
679 
680 	if (igb->igb_state & IGB_SUSPENDED) {
681 		mutex_exit(&igb->gen_lock);
682 		return (ECANCELED);
683 	}
684 
685 	slot = igb_unicst_find(igb, mac_addr);
686 	if (slot == -1) {
687 		mutex_exit(&igb->gen_lock);
688 		return (EINVAL);
689 	}
690 
691 	if (igb->unicst_addr[slot].mac.set == 0) {
692 		mutex_exit(&igb->gen_lock);
693 		return (EINVAL);
694 	}
695 
696 	/* Clear the MAC ddress in the slot */
697 	e1000_rar_clear(hw, slot);
698 	igb->unicst_addr[slot].mac.set = 0;
699 	igb->unicst_avail++;
700 
701 	mutex_exit(&igb->gen_lock);
702 
703 	return (0);
704 }
705 
706 /*
707  * Enable interrupt on the specificed rx ring.
708  */
709 int
710 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
711 {
712 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
713 	igb_t *igb = rx_ring->igb;
714 	struct e1000_hw *hw = &igb->hw;
715 	uint32_t index = rx_ring->index;
716 
717 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
718 		/* Interrupt enabling for MSI-X */
719 		igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
720 		E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
721 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
722 	} else {
723 		ASSERT(index == 0);
724 		/* Interrupt enabling for MSI and legacy */
725 		igb->ims_mask |= E1000_IMS_RXT0;
726 		E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
727 	}
728 
729 	E1000_WRITE_FLUSH(hw);
730 
731 	return (0);
732 }
733 
734 /*
735  * Disable interrupt on the specificed rx ring.
736  */
737 int
738 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
739 {
740 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
741 	igb_t *igb = rx_ring->igb;
742 	struct e1000_hw *hw = &igb->hw;
743 	uint32_t index = rx_ring->index;
744 
745 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
746 		/* Interrupt disabling for MSI-X */
747 		igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
748 		E1000_WRITE_REG(hw, E1000_EIMC,
749 		    (E1000_EICR_RX_QUEUE0 << index));
750 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
751 	} else {
752 		ASSERT(index == 0);
753 		/* Interrupt disabling for MSI and legacy */
754 		igb->ims_mask &= ~E1000_IMS_RXT0;
755 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
756 	}
757 
758 	E1000_WRITE_FLUSH(hw);
759 
760 	return (0);
761 }
762 
763 /*
764  * Get the global ring index by a ring index within a group.
765  */
766 int
767 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
768 {
769 	igb_rx_ring_t *rx_ring;
770 	int i;
771 
772 	for (i = 0; i < igb->num_rx_rings; i++) {
773 		rx_ring = &igb->rx_rings[i];
774 		if (rx_ring->group_index == gindex)
775 			rindex--;
776 		if (rindex < 0)
777 			return (i);
778 	}
779 
780 	return (-1);
781 }
782 
783 static int
784 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
785 {
786 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
787 
788 	mutex_enter(&rx_ring->rx_lock);
789 	rx_ring->ring_gen_num = mr_gen_num;
790 	mutex_exit(&rx_ring->rx_lock);
791 	return (0);
792 }
793 
794 /*
795  * Callback funtion for MAC layer to register all rings.
796  */
797 /* ARGSUSED */
798 void
799 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
800     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
801 {
802 	igb_t *igb = (igb_t *)arg;
803 	mac_intr_t *mintr = &infop->mri_intr;
804 
805 	switch (rtype) {
806 	case MAC_RING_TYPE_RX: {
807 		igb_rx_ring_t *rx_ring;
808 		int global_index;
809 
810 		/*
811 		 * 'index' is the ring index within the group.
812 		 * We need the global ring index by searching in group.
813 		 */
814 		global_index = igb_get_rx_ring_index(igb, rg_index, index);
815 
816 		ASSERT(global_index >= 0);
817 
818 		rx_ring = &igb->rx_rings[global_index];
819 		rx_ring->ring_handle = rh;
820 
821 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
822 		infop->mri_start = igb_ring_start;
823 		infop->mri_stop = NULL;
824 		infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
825 		infop->mri_stat = igb_rx_ring_stat;
826 
827 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
828 		mintr->mi_enable = igb_rx_ring_intr_enable;
829 		mintr->mi_disable = igb_rx_ring_intr_disable;
830 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
831 			mintr->mi_ddi_handle =
832 			    igb->htable[rx_ring->intr_vector];
833 		}
834 		break;
835 	}
836 	case MAC_RING_TYPE_TX: {
837 		ASSERT(index < igb->num_tx_rings);
838 
839 		igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
840 		tx_ring->ring_handle = rh;
841 
842 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
843 		infop->mri_start = NULL;
844 		infop->mri_stop = NULL;
845 		infop->mri_tx = igb_tx_ring_send;
846 		infop->mri_stat = igb_tx_ring_stat;
847 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
848 			mintr->mi_ddi_handle =
849 			    igb->htable[tx_ring->intr_vector];
850 		}
851 		break;
852 	}
853 	default:
854 		break;
855 	}
856 }
857 
858 void
859 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
860     mac_group_info_t *infop, mac_group_handle_t gh)
861 {
862 	igb_t *igb = (igb_t *)arg;
863 
864 	switch (rtype) {
865 	case MAC_RING_TYPE_RX: {
866 		igb_rx_group_t *rx_group;
867 
868 		ASSERT((index >= 0) && (index < igb->num_rx_groups));
869 
870 		rx_group = &igb->rx_groups[index];
871 		rx_group->group_handle = gh;
872 
873 		infop->mgi_driver = (mac_group_driver_t)rx_group;
874 		infop->mgi_start = NULL;
875 		infop->mgi_stop = NULL;
876 		infop->mgi_addmac = igb_addmac;
877 		infop->mgi_remmac = igb_remmac;
878 		infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
879 
880 		break;
881 	}
882 	case MAC_RING_TYPE_TX:
883 		break;
884 	default:
885 		break;
886 	}
887 }
888 
889 /*
890  * Obtain the MAC's capabilities and associated data from
891  * the driver.
892  */
893 boolean_t
894 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
895 {
896 	igb_t *igb = (igb_t *)arg;
897 
898 	switch (cap) {
899 	case MAC_CAPAB_HCKSUM: {
900 		uint32_t *tx_hcksum_flags = cap_data;
901 
902 		/*
903 		 * We advertise our capabilities only if tx hcksum offload is
904 		 * enabled.  On receive, the stack will accept checksummed
905 		 * packets anyway, even if we haven't said we can deliver
906 		 * them.
907 		 */
908 		if (!igb->tx_hcksum_enable)
909 			return (B_FALSE);
910 
911 		*tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
912 		break;
913 	}
914 	case MAC_CAPAB_LSO: {
915 		mac_capab_lso_t *cap_lso = cap_data;
916 
917 		if (igb->lso_enable) {
918 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
919 			cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
920 			break;
921 		} else {
922 			return (B_FALSE);
923 		}
924 	}
925 	case MAC_CAPAB_RINGS: {
926 		mac_capab_rings_t *cap_rings = cap_data;
927 
928 		switch (cap_rings->mr_type) {
929 		case MAC_RING_TYPE_RX:
930 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
931 			cap_rings->mr_rnum = igb->num_rx_rings;
932 			cap_rings->mr_gnum = igb->num_rx_groups;
933 			cap_rings->mr_rget = igb_fill_ring;
934 			cap_rings->mr_gget = igb_fill_group;
935 			cap_rings->mr_gaddring = NULL;
936 			cap_rings->mr_gremring = NULL;
937 
938 			break;
939 		case MAC_RING_TYPE_TX:
940 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
941 			cap_rings->mr_rnum = igb->num_tx_rings;
942 			cap_rings->mr_gnum = 0;
943 			cap_rings->mr_rget = igb_fill_ring;
944 			cap_rings->mr_gget = NULL;
945 
946 			break;
947 		default:
948 			break;
949 		}
950 		break;
951 	}
952 
953 	default:
954 		return (B_FALSE);
955 	}
956 	return (B_TRUE);
957 }
958 
959 int
960 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
961     uint_t pr_valsize, const void *pr_val)
962 {
963 	igb_t *igb = (igb_t *)arg;
964 	struct e1000_hw *hw = &igb->hw;
965 	int err = 0;
966 	uint32_t flow_control;
967 	uint32_t cur_mtu, new_mtu;
968 	uint32_t rx_size;
969 	uint32_t tx_size;
970 
971 	mutex_enter(&igb->gen_lock);
972 	if (igb->igb_state & IGB_SUSPENDED) {
973 		mutex_exit(&igb->gen_lock);
974 		return (ECANCELED);
975 	}
976 
977 	if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
978 		/*
979 		 * All en_* parameters are locked (read-only)
980 		 * while the device is in any sort of loopback mode.
981 		 */
982 		mutex_exit(&igb->gen_lock);
983 		return (EBUSY);
984 	}
985 
986 	switch (pr_num) {
987 	case MAC_PROP_EN_1000FDX_CAP:
988 		/* read/write on copper, read-only on serdes */
989 		if (hw->phy.media_type != e1000_media_type_copper) {
990 			err = ENOTSUP;
991 			break;
992 		}
993 		igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
994 		igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
995 		goto setup_link;
996 	case MAC_PROP_EN_100FDX_CAP:
997 		if (hw->phy.media_type != e1000_media_type_copper) {
998 			err = ENOTSUP;
999 			break;
1000 		}
1001 		igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1002 		igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1003 		goto setup_link;
1004 	case MAC_PROP_EN_100HDX_CAP:
1005 		if (hw->phy.media_type != e1000_media_type_copper) {
1006 			err = ENOTSUP;
1007 			break;
1008 		}
1009 		igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1010 		igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1011 		goto setup_link;
1012 	case MAC_PROP_EN_10FDX_CAP:
1013 		if (hw->phy.media_type != e1000_media_type_copper) {
1014 			err = ENOTSUP;
1015 			break;
1016 		}
1017 		igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1018 		igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1019 		goto setup_link;
1020 	case MAC_PROP_EN_10HDX_CAP:
1021 		if (hw->phy.media_type != e1000_media_type_copper) {
1022 			err = ENOTSUP;
1023 			break;
1024 		}
1025 		igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1026 		igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1027 		goto setup_link;
1028 	case MAC_PROP_AUTONEG:
1029 		if (hw->phy.media_type != e1000_media_type_copper) {
1030 			err = ENOTSUP;
1031 			break;
1032 		}
1033 		igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1034 		goto setup_link;
1035 	case MAC_PROP_FLOWCTRL:
1036 		bcopy(pr_val, &flow_control, sizeof (flow_control));
1037 
1038 		switch (flow_control) {
1039 		default:
1040 			err = EINVAL;
1041 			break;
1042 		case LINK_FLOWCTRL_NONE:
1043 			hw->fc.requested_mode = e1000_fc_none;
1044 			break;
1045 		case LINK_FLOWCTRL_RX:
1046 			hw->fc.requested_mode = e1000_fc_rx_pause;
1047 			break;
1048 		case LINK_FLOWCTRL_TX:
1049 			hw->fc.requested_mode = e1000_fc_tx_pause;
1050 			break;
1051 		case LINK_FLOWCTRL_BI:
1052 			hw->fc.requested_mode = e1000_fc_full;
1053 			break;
1054 		}
1055 setup_link:
1056 		if (err == 0) {
1057 			if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1058 				err = EINVAL;
1059 		}
1060 		break;
1061 	case MAC_PROP_ADV_1000FDX_CAP:
1062 	case MAC_PROP_ADV_1000HDX_CAP:
1063 	case MAC_PROP_ADV_100T4_CAP:
1064 	case MAC_PROP_ADV_100FDX_CAP:
1065 	case MAC_PROP_ADV_100HDX_CAP:
1066 	case MAC_PROP_ADV_10FDX_CAP:
1067 	case MAC_PROP_ADV_10HDX_CAP:
1068 	case MAC_PROP_EN_1000HDX_CAP:
1069 	case MAC_PROP_EN_100T4_CAP:
1070 	case MAC_PROP_STATUS:
1071 	case MAC_PROP_SPEED:
1072 	case MAC_PROP_DUPLEX:
1073 		err = ENOTSUP; /* read-only prop. Can't set this. */
1074 		break;
1075 	case MAC_PROP_MTU:
1076 		/* adapter must be stopped for an MTU change */
1077 		if (igb->igb_state & IGB_STARTED) {
1078 			err = EBUSY;
1079 			break;
1080 		}
1081 
1082 		cur_mtu = igb->default_mtu;
1083 		bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1084 		if (new_mtu == cur_mtu) {
1085 			err = 0;
1086 			break;
1087 		}
1088 
1089 		if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1090 			err = EINVAL;
1091 			break;
1092 		}
1093 
1094 		err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1095 		if (err == 0) {
1096 			igb->default_mtu = new_mtu;
1097 			igb->max_frame_size = igb->default_mtu +
1098 			    sizeof (struct ether_vlan_header) + ETHERFCSL;
1099 
1100 			/*
1101 			 * Set rx buffer size
1102 			 */
1103 			rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1104 			igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1105 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1106 
1107 			/*
1108 			 * Set tx buffer size
1109 			 */
1110 			tx_size = igb->max_frame_size;
1111 			igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1112 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1113 		}
1114 		break;
1115 	case MAC_PROP_PRIVATE:
1116 		err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1117 		break;
1118 	default:
1119 		err = EINVAL;
1120 		break;
1121 	}
1122 
1123 	mutex_exit(&igb->gen_lock);
1124 
1125 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1126 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1127 		return (EIO);
1128 	}
1129 
1130 	return (err);
1131 }
1132 
1133 int
1134 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1135     uint_t pr_valsize, void *pr_val)
1136 {
1137 	igb_t *igb = (igb_t *)arg;
1138 	struct e1000_hw *hw = &igb->hw;
1139 	int err = 0;
1140 	uint32_t flow_control;
1141 	uint64_t tmp = 0;
1142 
1143 	switch (pr_num) {
1144 	case MAC_PROP_DUPLEX:
1145 		ASSERT(pr_valsize >= sizeof (link_duplex_t));
1146 		bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t));
1147 		break;
1148 	case MAC_PROP_SPEED:
1149 		ASSERT(pr_valsize >= sizeof (uint64_t));
1150 		tmp = igb->link_speed * 1000000ull;
1151 		bcopy(&tmp, pr_val, sizeof (tmp));
1152 		break;
1153 	case MAC_PROP_AUTONEG:
1154 		ASSERT(pr_valsize >= sizeof (uint8_t));
1155 		*(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1156 		break;
1157 	case MAC_PROP_FLOWCTRL:
1158 		ASSERT(pr_valsize >= sizeof (uint32_t));
1159 		switch (hw->fc.requested_mode) {
1160 			case e1000_fc_none:
1161 				flow_control = LINK_FLOWCTRL_NONE;
1162 				break;
1163 			case e1000_fc_rx_pause:
1164 				flow_control = LINK_FLOWCTRL_RX;
1165 				break;
1166 			case e1000_fc_tx_pause:
1167 				flow_control = LINK_FLOWCTRL_TX;
1168 				break;
1169 			case e1000_fc_full:
1170 				flow_control = LINK_FLOWCTRL_BI;
1171 				break;
1172 		}
1173 		bcopy(&flow_control, pr_val, sizeof (flow_control));
1174 		break;
1175 	case MAC_PROP_ADV_1000FDX_CAP:
1176 		*(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1177 		break;
1178 	case MAC_PROP_EN_1000FDX_CAP:
1179 		*(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1180 		break;
1181 	case MAC_PROP_ADV_1000HDX_CAP:
1182 		*(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1183 		break;
1184 	case MAC_PROP_EN_1000HDX_CAP:
1185 		*(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1186 		break;
1187 	case MAC_PROP_ADV_100T4_CAP:
1188 		*(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1189 		break;
1190 	case MAC_PROP_EN_100T4_CAP:
1191 		*(uint8_t *)pr_val = igb->param_en_100t4_cap;
1192 		break;
1193 	case MAC_PROP_ADV_100FDX_CAP:
1194 		*(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1195 		break;
1196 	case MAC_PROP_EN_100FDX_CAP:
1197 		*(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1198 		break;
1199 	case MAC_PROP_ADV_100HDX_CAP:
1200 		*(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1201 		break;
1202 	case MAC_PROP_EN_100HDX_CAP:
1203 		*(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1204 		break;
1205 	case MAC_PROP_ADV_10FDX_CAP:
1206 		*(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1207 		break;
1208 	case MAC_PROP_EN_10FDX_CAP:
1209 		*(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1210 		break;
1211 	case MAC_PROP_ADV_10HDX_CAP:
1212 		*(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1213 		break;
1214 	case MAC_PROP_EN_10HDX_CAP:
1215 		*(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1216 		break;
1217 	case MAC_PROP_PRIVATE:
1218 		err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val);
1219 		break;
1220 	default:
1221 		err = EINVAL;
1222 		break;
1223 	}
1224 	return (err);
1225 }
1226 
1227 void
1228 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1229     mac_prop_info_handle_t prh)
1230 {
1231 	igb_t *igb = (igb_t *)arg;
1232 	struct e1000_hw *hw = &igb->hw;
1233 	uint16_t phy_status, phy_ext_status;
1234 
1235 	switch (pr_num) {
1236 	case MAC_PROP_DUPLEX:
1237 	case MAC_PROP_SPEED:
1238 	case MAC_PROP_ADV_1000FDX_CAP:
1239 	case MAC_PROP_ADV_1000HDX_CAP:
1240 	case MAC_PROP_EN_1000HDX_CAP:
1241 	case MAC_PROP_ADV_100T4_CAP:
1242 	case MAC_PROP_EN_100T4_CAP:
1243 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1244 		break;
1245 
1246 	case MAC_PROP_EN_1000FDX_CAP:
1247 		if (hw->phy.media_type != e1000_media_type_copper) {
1248 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1249 		} else {
1250 			(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
1251 			    &phy_ext_status);
1252 			mac_prop_info_set_default_uint8(prh,
1253 			    ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1254 			    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
1255 		}
1256 		break;
1257 
1258 	case MAC_PROP_ADV_100FDX_CAP:
1259 	case MAC_PROP_EN_100FDX_CAP:
1260 		if (hw->phy.media_type != e1000_media_type_copper) {
1261 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1262 		} else {
1263 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1264 			mac_prop_info_set_default_uint8(prh,
1265 			    ((phy_status & MII_SR_100X_FD_CAPS) ||
1266 			    (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0);
1267 		}
1268 		break;
1269 
1270 	case MAC_PROP_ADV_100HDX_CAP:
1271 	case MAC_PROP_EN_100HDX_CAP:
1272 		if (hw->phy.media_type != e1000_media_type_copper) {
1273 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1274 		} else {
1275 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1276 			mac_prop_info_set_default_uint8(prh,
1277 			    ((phy_status & MII_SR_100X_HD_CAPS) ||
1278 			    (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0);
1279 		}
1280 		break;
1281 
1282 	case MAC_PROP_ADV_10FDX_CAP:
1283 	case MAC_PROP_EN_10FDX_CAP:
1284 		if (hw->phy.media_type != e1000_media_type_copper) {
1285 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1286 		} else {
1287 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1288 			mac_prop_info_set_default_uint8(prh,
1289 			    (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
1290 		}
1291 		break;
1292 
1293 	case MAC_PROP_ADV_10HDX_CAP:
1294 	case MAC_PROP_EN_10HDX_CAP:
1295 		if (hw->phy.media_type != e1000_media_type_copper) {
1296 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1297 		} else {
1298 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1299 			mac_prop_info_set_default_uint8(prh,
1300 			    (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
1301 		}
1302 		break;
1303 
1304 	case MAC_PROP_AUTONEG:
1305 		if (hw->phy.media_type != e1000_media_type_copper) {
1306 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1307 		} else {
1308 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1309 			mac_prop_info_set_default_uint8(prh,
1310 			    (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
1311 		}
1312 		break;
1313 
1314 	case MAC_PROP_FLOWCTRL:
1315 		mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1316 		break;
1317 
1318 	case MAC_PROP_MTU:
1319 		mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU);
1320 		break;
1321 
1322 	case MAC_PROP_PRIVATE:
1323 		igb_priv_prop_info(igb, pr_name, prh);
1324 		break;
1325 	}
1326 
1327 }
1328 
1329 boolean_t
1330 igb_param_locked(mac_prop_id_t pr_num)
1331 {
1332 	/*
1333 	 * All en_* parameters are locked (read-only) while
1334 	 * the device is in any sort of loopback mode ...
1335 	 */
1336 	switch (pr_num) {
1337 		case MAC_PROP_EN_1000FDX_CAP:
1338 		case MAC_PROP_EN_1000HDX_CAP:
1339 		case MAC_PROP_EN_100T4_CAP:
1340 		case MAC_PROP_EN_100FDX_CAP:
1341 		case MAC_PROP_EN_100HDX_CAP:
1342 		case MAC_PROP_EN_10FDX_CAP:
1343 		case MAC_PROP_EN_10HDX_CAP:
1344 		case MAC_PROP_AUTONEG:
1345 		case MAC_PROP_FLOWCTRL:
1346 			return (B_TRUE);
1347 	}
1348 	return (B_FALSE);
1349 }
1350 
1351 /* ARGSUSED */
1352 int
1353 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1354     uint_t pr_valsize, const void *pr_val)
1355 {
1356 	int err = 0;
1357 	long result;
1358 	struct e1000_hw *hw = &igb->hw;
1359 	int i;
1360 
1361 	if (strcmp(pr_name, "_eee_support") == 0) {
1362 		if (pr_val == NULL)
1363 			return (EINVAL);
1364 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1365 		switch (result) {
1366 		case 0:
1367 		case 1:
1368 			/*
1369 			 * For now, only supported on I350/I354.
1370 			 * Add new mac.type values (or use < instead)
1371 			 * as new cards offer up EEE.
1372 			 */
1373 			switch (hw->mac.type) {
1374 			case e1000_i350:
1375 				/* Must set this prior to the set call. */
1376 				hw->dev_spec._82575.eee_disable = !result;
1377 				if (e1000_set_eee_i350(hw) != E1000_SUCCESS)
1378 					err = EIO;
1379 				break;
1380 			case e1000_i354:
1381 				/* Must set this prior to the set call. */
1382 				hw->dev_spec._82575.eee_disable = !result;
1383 				if (e1000_set_eee_i354(hw) != E1000_SUCCESS)
1384 					err = EIO;
1385 				break;
1386 			default:
1387 				return (ENXIO);
1388 			}
1389 			break;
1390 		default:
1391 			err = EINVAL;
1392 			/* FALLTHRU */
1393 		}
1394 		return (err);
1395 	}
1396 	if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1397 		if (pr_val == NULL) {
1398 			err = EINVAL;
1399 			return (err);
1400 		}
1401 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1402 		if (result < MIN_TX_COPY_THRESHOLD ||
1403 		    result > MAX_TX_COPY_THRESHOLD)
1404 			err = EINVAL;
1405 		else {
1406 			igb->tx_copy_thresh = (uint32_t)result;
1407 		}
1408 		return (err);
1409 	}
1410 	if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1411 		if (pr_val == NULL) {
1412 			err = EINVAL;
1413 			return (err);
1414 		}
1415 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1416 		if (result < MIN_TX_RECYCLE_THRESHOLD ||
1417 		    result > MAX_TX_RECYCLE_THRESHOLD)
1418 			err = EINVAL;
1419 		else {
1420 			igb->tx_recycle_thresh = (uint32_t)result;
1421 		}
1422 		return (err);
1423 	}
1424 	if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1425 		if (pr_val == NULL) {
1426 			err = EINVAL;
1427 			return (err);
1428 		}
1429 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1430 		if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1431 		    result > MAX_TX_OVERLOAD_THRESHOLD)
1432 			err = EINVAL;
1433 		else {
1434 			igb->tx_overload_thresh = (uint32_t)result;
1435 		}
1436 		return (err);
1437 	}
1438 	if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1439 		if (pr_val == NULL) {
1440 			err = EINVAL;
1441 			return (err);
1442 		}
1443 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1444 		if (result < MIN_TX_RESCHED_THRESHOLD ||
1445 		    result > MAX_TX_RESCHED_THRESHOLD ||
1446 		    result > igb->tx_ring_size)
1447 			err = EINVAL;
1448 		else {
1449 			igb->tx_resched_thresh = (uint32_t)result;
1450 		}
1451 		return (err);
1452 	}
1453 	if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1454 		if (pr_val == NULL) {
1455 			err = EINVAL;
1456 			return (err);
1457 		}
1458 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1459 		if (result < MIN_RX_COPY_THRESHOLD ||
1460 		    result > MAX_RX_COPY_THRESHOLD)
1461 			err = EINVAL;
1462 		else {
1463 			igb->rx_copy_thresh = (uint32_t)result;
1464 		}
1465 		return (err);
1466 	}
1467 	if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1468 		if (pr_val == NULL) {
1469 			err = EINVAL;
1470 			return (err);
1471 		}
1472 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1473 		if (result < MIN_RX_LIMIT_PER_INTR ||
1474 		    result > MAX_RX_LIMIT_PER_INTR)
1475 			err = EINVAL;
1476 		else {
1477 			igb->rx_limit_per_intr = (uint32_t)result;
1478 		}
1479 		return (err);
1480 	}
1481 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1482 		if (pr_val == NULL) {
1483 			err = EINVAL;
1484 			return (err);
1485 		}
1486 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1487 
1488 		if (result < igb->capab->min_intr_throttle ||
1489 		    result > igb->capab->max_intr_throttle)
1490 			err = EINVAL;
1491 		else {
1492 			igb->intr_throttling[0] = (uint32_t)result;
1493 
1494 			for (i = 0; i < MAX_NUM_EITR; i++)
1495 				igb->intr_throttling[i] =
1496 				    igb->intr_throttling[0];
1497 
1498 			/* Set interrupt throttling rate */
1499 			for (i = 0; i < igb->intr_cnt; i++)
1500 				E1000_WRITE_REG(hw, E1000_EITR(i),
1501 				    igb->intr_throttling[i]);
1502 		}
1503 		return (err);
1504 	}
1505 	return (ENOTSUP);
1506 }
1507 
1508 int
1509 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize,
1510     void *pr_val)
1511 {
1512 	int value;
1513 
1514 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1515 		value = igb->param_adv_pause_cap;
1516 	} else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1517 		value = igb->param_adv_asym_pause_cap;
1518 	} else if (strcmp(pr_name, "_eee_support") == 0) {
1519 		/*
1520 		 * For now, only supported on I350.  Add new mac.type values
1521 		 * (or use < instead) as new cards offer up EEE.
1522 		 */
1523 		switch (igb->hw.mac.type) {
1524 		case e1000_i350:
1525 		case e1000_i354:
1526 			value = !(igb->hw.dev_spec._82575.eee_disable);
1527 			break;
1528 		default:
1529 			value = 0;
1530 		}
1531 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1532 		value = igb->tx_copy_thresh;
1533 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1534 		value = igb->tx_recycle_thresh;
1535 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1536 		value = igb->tx_overload_thresh;
1537 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1538 		value = igb->tx_resched_thresh;
1539 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1540 		value = igb->rx_copy_thresh;
1541 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1542 		value = igb->rx_limit_per_intr;
1543 	} else if (strcmp(pr_name, "_intr_throttling") == 0) {
1544 		value = igb->intr_throttling[0];
1545 	} else {
1546 		return (ENOTSUP);
1547 	}
1548 
1549 	(void) snprintf(pr_val, pr_valsize, "%d", value);
1550 	return (0);
1551 }
1552 
1553 void
1554 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh)
1555 {
1556 	char valstr[64];
1557 	int value;
1558 
1559 	if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
1560 	    strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1561 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1562 		return;
1563 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1564 		value = DEFAULT_TX_COPY_THRESHOLD;
1565 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1566 		value = DEFAULT_TX_RECYCLE_THRESHOLD;
1567 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1568 		value = DEFAULT_TX_OVERLOAD_THRESHOLD;
1569 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1570 		value = DEFAULT_TX_RESCHED_THRESHOLD;
1571 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1572 		value = DEFAULT_RX_COPY_THRESHOLD;
1573 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1574 		value = DEFAULT_RX_LIMIT_PER_INTR;
1575 	} else 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1576 		value = igb->capab->def_intr_throttle;
1577 	} else {
1578 		return;
1579 	}
1580 
1581 	(void) snprintf(valstr, sizeof (valstr), "%d", value);
1582 	mac_prop_info_set_default_str(prh, valstr);
1583 }
1584